source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
pydualsense.py |
# needed for python > 3.8
import os, sys
from sys import platform
if platform.startswith('Windows') and sys.version_info >= (3,8):
os.add_dll_directory(os.getcwd())
import hidapi
from .enums import (LedOptions, PlayerID, PulseOptions, TriggerModes, Brightness, ConnectionType) # type: ignore
import threading
class pydualsense:
def __init__(self, verbose: bool = False) -> None:#
# TODO: maybe add a init function to not automatically allocate controller when class is declared
self.verbose = verbose
self.leftMotor = 0
self.rightMotor = 0
def init(self):
"""initialize module and device states
"""
self.device: hidapi.Device = self.__find_device()
self.light = DSLight() # control led light of ds
self.audio = DSAudio() # ds audio setting
self.triggerL = DSTrigger() # left trigger
self.triggerR = DSTrigger() # right trigger
self.state = DSState() # controller states
if platform.startswith('Windows'):
self.conType = self.determineConnectionType() # determine USB or BT connection
else:
# set for usb manually
self.input_report_length = 64
self.output_report_length = 64
# thread for receiving and sending
self.ds_thread = True
self.report_thread = threading.Thread(target=self.sendReport)
self.report_thread.start()
def determineConnectionType(self) -> ConnectionType:
if self.device._device.input_report_length == 64:
self.input_report_length = 64
self.output_report_length = 64
return ConnectionType.USB
elif self.device._device.input_report_length == 78:
self.input_report_length = 78
self.output_report_length = 78
return ConnectionType.BT
def close(self):
"""
Stops the report thread and closes the HID device
"""
self.ds_thread = False
self.report_thread.join()
self.device.close()
def __find_device(self) -> hidapi.Device:
"""
find HID device and open it
Raises:
Exception: HIDGuardian detected
Exception: No device detected
Returns:
hid.Device: returns opened controller device
"""
# TODO: detect connection mode, bluetooth has a bigger write buffer
# TODO: implement multiple controllers working
if sys.platform.startswith('win32'):
import pydualsense.hidguardian as hidguardian
if hidguardian.check_hide():
raise Exception('HIDGuardian detected. Delete the controller from HIDGuardian and restart PC to connect to controller')
detected_device: hidapi.Device = None
devices = hidapi.enumerate(vendor_id=0x054c)
for device in devices:
if device.vendor_id == 0x054c and device.product_id == 0x0CE6:
detected_device = device
if detected_device == None:
raise Exception('No device detected')
print("debug")
print("device_id: {0}".format(detected_device.vendor_id))
print("product_id: {0}".format(detected_device.product_id))
dual_sense = hidapi.Device(vendor_id=detected_device.vendor_id, product_id=detected_device.product_id)
return dual_sense
def setLeftMotor(self, intensity: int):
"""
set left motor rumble
Args:
intensity (int): rumble intensity
Raises:
TypeError: intensity false type
Exception: intensity out of bounds 0..255
"""
if not isinstance(intensity, int):
raise TypeError('left motor intensity needs to be an int')
if intensity > 255 or intensity < 0:
raise Exception('maximum intensity is 255')
self.leftMotor = intensity
def setRightMotor(self, intensity: int):
"""
set right motor rumble
Args:
intensity (int): rumble intensity
Raises:
TypeError: intensity false type
Exception: intensity out of bounds 0..255
"""
if not isinstance(intensity, int):
raise TypeError('right motor intensity needs to be an int')
if intensity > 255 or intensity < 0:
raise Exception('maximum intensity is 255')
self.rightMotor = intensity
def sendReport(self):
"""background thread handling the reading of the device and updating its states
"""
while self.ds_thread:
# read data from the input report of the controller
inReport = self.device.read(self.input_report_length)
if self.verbose:
print(inReport)
# decrypt the packet and bind the inputs
self.readInput(inReport)
# prepare new report for device
outReport = self.prepareReport()
# write the report to the device
self.writeReport(outReport)
def readInput(self, inReport):
"""
read the input from the controller and assign the states
Args:
inReport (bytearray): read bytearray containing the state of the whole controller
"""
states = list(inReport) # convert bytes to list
# states 0 is always 1
self.state.LX = states[1] - 127
self.state.LY = states[2] - 127
self.state.RX = states[3] - 127
self.state.RY = states[4] - 127
self.state.L2 = states[5]
self.state.R2 = states[6]
# state 7 always increments -> not used anywhere
buttonState = states[8]
self.state.triangle = (buttonState & (1 << 7)) != 0
self.state.circle = (buttonState & (1 << 6)) != 0
self.state.cross = (buttonState & (1 << 5)) != 0
self.state.square = (buttonState & (1 << 4)) != 0
# dpad
dpad_state = buttonState & 0x0F
self.state.setDPadState(dpad_state)
misc = states[9]
self.state.R3 = (misc & (1 << 7)) != 0
self.state.L3 = (misc & (1 << 6)) != 0
self.state.options = (misc & (1 << 5)) != 0
self.state.share = (misc & (1 << 4)) != 0
self.state.R2Btn = (misc & (1 << 3)) != 0
self.state.L2Btn = (misc & (1 << 2)) != 0
self.state.R1 = (misc & (1 << 1)) != 0
self.state.L1 = (misc & (1 << 0)) != 0
misc2 = states[10]
self.state.ps = (misc2 & (1 << 0)) != 0
self.state.touchBtn = (misc2 & 0x02) != 0
self.state.micBtn = (misc2 & 0x04) != 0
# trackpad touch
self.state.trackPadTouch0.ID = inReport[33] & 0x7F
self.state.trackPadTouch0.isActive = (inReport[33] & 0x80) == 0
self.state.trackPadTouch0.X = ((inReport[35] & 0x0f) << 8) | (inReport[34])
self.state.trackPadTouch0.Y = ((inReport[36]) << 4) | ((inReport[35] & 0xf0) >> 4)
# trackpad touch
self.state.trackPadTouch1.ID = inReport[37] & 0x7F
self.state.trackPadTouch1.isActive = (inReport[37] & 0x80) == 0
self.state.trackPadTouch1.X = ((inReport[39] & 0x0f) << 8) | (inReport[38])
self.state.trackPadTouch1.Y = ((inReport[40]) << 4) | ((inReport[39] & 0xf0) >> 4)
# print(f'1Active = {self.state.trackPadTouch0.isActive}')
# print(f'X1: {self.state.trackPadTouch0.X} Y2: {self.state.trackPadTouch0.Y}')
# print(f'2Active = {self.state.trackPadTouch1.isActive}')
# print(f'X2: {self.state.trackPadTouch1.X} Y2: {self.state.trackPadTouch1.Y}')
# print(f'DPAD {self.state.DpadLeft} {self.state.DpadUp} {self.state.DpadRight} {self.state.DpadDown}')
# TODO: implement gyrometer and accelerometer
# TODO: control mouse with touchpad for fun as DS4Windows
def writeReport(self, outReport):
"""
write the report to the device
Args:
outReport (list): report to be written to device
"""
self.device.write(bytes(outReport))
def prepareReport(self):
"""
prepare the output to be send to the controller
Returns:
list: report to send to controller
"""
outReport = [0] * self.output_report_length # create empty list with range of output report
# packet type
outReport[0] = 0x2
# flags determing what changes this packet will perform
# 0x01 set the main motors (also requires flag 0x02); setting this by itself will allow rumble to gracefully terminate and then re-enable audio haptics, whereas not setting it will kill the rumble instantly and re-enable audio haptics.
# 0x02 set the main motors (also requires flag 0x01; without bit 0x01 motors are allowed to time out without re-enabling audio haptics)
# 0x04 set the right trigger motor
# 0x08 set the left trigger motor
# 0x10 modification of audio volume
# 0x20 toggling of internal speaker while headset is connected
# 0x40 modification of microphone volume
outReport[1] = 0xff # [1]
# further flags determining what changes this packet will perform
# 0x01 toggling microphone LED
# 0x02 toggling audio/mic mute
# 0x04 toggling LED strips on the sides of the touchpad
# 0x08 will actively turn all LEDs off? Convenience flag? (if so, third parties might not support it properly)
# 0x10 toggling white player indicator LEDs below touchpad
# 0x20 ???
# 0x40 adjustment of overall motor/effect power (index 37 - read note on triggers)
# 0x80 ???
outReport[2] = 0x1 | 0x2 | 0x4 | 0x10 | 0x40 # [2]
outReport[3] = self.leftMotor # left low freq motor 0-255 # [3]
outReport[4] = self.rightMotor # right low freq motor 0-255 # [4]
# outReport[5] - outReport[8] audio related
# set Micrphone LED, setting doesnt effect microphone settings
outReport[9] = self.audio.microphone_led # [9]
outReport[10] = 0x10 if self.audio.microphone_mute == True else 0x00
# add right trigger mode + parameters to packet
outReport[11] = self.triggerR.mode.value
outReport[12] = self.triggerR.forces[0]
outReport[13] = self.triggerR.forces[1]
outReport[14] = self.triggerR.forces[2]
outReport[15] = self.triggerR.forces[3]
outReport[16] = self.triggerR.forces[4]
outReport[17] = self.triggerR.forces[5]
outReport[20] = self.triggerR.forces[6]
outReport[22] = self.triggerL.mode.value
outReport[23] = self.triggerL.forces[0]
outReport[24] = self.triggerL.forces[1]
outReport[25] = self.triggerL.forces[2]
outReport[26] = self.triggerL.forces[3]
outReport[27] = self.triggerL.forces[4]
outReport[28] = self.triggerL.forces[5]
outReport[31] = self.triggerL.forces[6]
outReport[39] = self.light.ledOption.value
outReport[42] = self.light.pulseOptions.value
outReport[43] = self.light.brightness.value
outReport[44] = self.light.playerNumber.value
outReport[45] = self.light.TouchpadColor[0]
outReport[46] = self.light.TouchpadColor[1]
outReport[47] = self.light.TouchpadColor[2]
if self.verbose:
print(outReport)
return outReport
class DSTouchpad:
def __init__(self) -> None:
"""
Class represents the Touchpad of the controller
"""
self.isActive = False
self.ID = 0
self.X = 0
self.Y = 0
class DSState:
def __init__(self) -> None:
self.packerC = 0
self.square, self.triangle, self.circle, self.cross = False, False, False, False
self.DpadUp, self.DpadDown, self.DpadLeft, self.DpadRight = False, False, False, False
self.L1, self.L2, self.L3, self.R1, self.R2, self.R3, self.R2Btn, self.L2Btn = False, False, False, False, False, False, False, False
self.share, self.options, self.ps, self.touch1, self.touch2, self.touchBtn, self.touchRight, self.touchLeft = False, False, False, False, False, False, False, False
self.touchFinger1, self.touchFinger2 = False, False
self.RX, self.RY, self.LX, self.LY = 128,128,128,128
self.trackPadTouch0, self.trackPadTouch1 = DSTouchpad(), DSTouchpad()
def setDPadState(self, dpad_state):
if dpad_state == 0:
self.DpadUp = True
self.DpadDown = False
self.DpadLeft = False
self.DpadRight = False
elif dpad_state == 1:
self.DpadUp = True
self.DpadDown = False
self.DpadLeft = False
self.DpadRight = True
elif dpad_state == 2:
self.DpadUp = False
self.DpadDown = False
self.DpadLeft = False
self.DpadRight = True
elif dpad_state == 3:
self.DpadUp = False
self.DpadDown = True
self.DpadLeft = False
self.DpadRight = True
elif dpad_state == 4:
self.DpadUp = False
self.DpadDown = True
self.DpadLeft = False
self.DpadRight = False
elif dpad_state == 5:
self.DpadUp = False
self.DpadDown = True
self.DpadLeft = False
self.DpadRight = False
elif dpad_state == 6:
self.DpadUp = False
self.DpadDown = False
self.DpadLeft = True
self.DpadRight = False
elif dpad_state == 7:
self.DpadUp = True
self.DpadDown = False
self.DpadLeft = True
self.DpadRight = False
else:
self.DpadUp = False
self.DpadDown = False
self.DpadLeft = False
self.DpadRight = False
class DSLight:
"""
Represents all features of lights on the controller
"""
def __init__(self) -> None:
self.brightness: Brightness = Brightness.low # sets
self.playerNumber: PlayerID = PlayerID.player1
self.ledOption : LedOptions = LedOptions.Both
self.pulseOptions : PulseOptions = PulseOptions.Off
self.TouchpadColor = (0,0,255)
def setLEDOption(self, option: LedOptions):
"""
Sets the LED Option
Args:
option (LedOptions): Led option
Raises:
TypeError: LedOption is false type
"""
if not isinstance(option, LedOptions):
raise TypeError('Need LEDOption type')
self.ledOption = option
def setPulseOption(self, option: PulseOptions):
"""
Sets the Pulse Option of the LEDs
Args:
option (PulseOptions): pulse option of the LEDs
Raises:
TypeError: Pulse option is false type
"""
if not isinstance(option, PulseOptions):
raise TypeError('Need PulseOption type')
self.pulseOptions = option
def setBrightness(self, brightness: Brightness):
"""
Defines the brightness of the Player LEDs
Args:
brightness (Brightness): brightness of LEDS
Raises:
TypeError: brightness false type
"""
if not isinstance(brightness, Brightness):
raise TypeError('Need Brightness type')
self.brightness = brightness
def setPlayerID(self, player : PlayerID):
"""
Sets the PlayerID of the controller with the choosen LEDs.
The controller has 4 Player states
Args:
player (PlayerID): chosen PlayerID for the Controller
Raises:
TypeError: [description]
"""
if not isinstance(player, PlayerID):
raise TypeError('Need PlayerID type')
self.playerNumber = player
def setColorI(self, r: int , g: int, b: int) -> None:
"""
Sets the Color around the Touchpad of the controller
Args:
r (int): red channel
g (int): green channel
b (int): blue channel
Raises:
TypeError: color channels have wrong type
Exception: color channels are out of bounds
"""
if not isinstance(r, int) or not isinstance(g, int) or not isinstance(b, int):
raise TypeError('Color parameter need to be int')
# check if color is out of bounds
if (r > 255 or g > 255 or b > 255) or (r < 0 or g < 0 or b < 0):
raise Exception('colors have values from 0 to 255 only')
self.TouchpadColor = (r,g,b)
def setColorT(self, color: tuple) -> None:
"""
Sets the Color around the Touchpad as a tuple
Args:
color (tuple): color as tuple
Raises:
TypeError: color has wrong type
Exception: color channels are out of bounds
"""
if not isinstance(color, tuple):
raise TypeError('Color type is tuple')
# unpack for out of bounds check
r,g,b = map(int, color)
# check if color is out of bounds
if (r > 255 or g > 255 or b > 255) or (r < 0 or g < 0 or b < 0):
raise Exception('colors have values from 0 to 255 only')
self.TouchpadColor = (r,g,b)
class DSAudio:
def __init__(self) -> None:
self.microphone_mute = 0
self.microphone_led = 0
def setMicrophoneLED(self, value):
"""
Activates or disables the microphone led.
This doesnt change the mute/unmutes the microphone itself.
Args:
value (bool): On or off microphone LED
Raises:
Exception: false state for the led
"""
if not isinstance(value, bool):
raise TypeError('MicrophoneLED can only be a bool')
self.microphone_led = value
def setMicrophoneMute(self, state):
if not isinstance(state, bool):
raise TypeError('state needs to be bool')
self.setMicrophoneLED(state) # set led accordingly
self.microphone_mute = state
class DSTrigger:
def __init__(self) -> None:
# trigger modes
self.mode : TriggerModes = TriggerModes.Off
# force parameters for the triggers
self.forces = [0 for i in range(7)]
def setForce(self, forceID: int = 0, force: int = 0):
"""
Sets the forces of the choosen force parameter
Args:
forceID (int, optional): force parameter. Defaults to 0.
force (int, optional): applied force to the parameter. Defaults to 0.
Raises:
TypeError: wrong type of forceID or force
Exception: choosen a false force parameter
"""
if not isinstance(forceID, int) or not isinstance(force, int):
raise TypeError('forceID and force needs to be type int')
if forceID > 6 or forceID < 0:
raise Exception('only 7 parameters available')
self.forces[forceID] = force
def setMode(self, mode: TriggerModes):
"""
Set the Mode for the Trigger
Args:
mode (TriggerModes): Trigger mode
Raises:
TypeError: false Trigger mode type
"""
if not isinstance(mode, TriggerModes):
raise TypeError('Trigger mode parameter needs to be of type `TriggerModes`')
self.mode = mode
|
maderad.py |
import logging
import os
import platform
import subprocess
import sys
import tempfile
import time
import random
import string
import multiprocessing
import traceback
import jinja2
import click
from madera.consumer.topic_watcher import launch
class Process(multiprocessing.Process):
def __init__(self, *args, **kwargs):
multiprocessing.Process.__init__(self, *args, **kwargs)
self._pconn, self._cconn = multiprocessing.Pipe()
self._exception = None
def run(self):
try:
multiprocessing.Process.run(self)
self._cconn.send(None)
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
self._cconn.send((RuntimeError(str(e)), tb))
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
return self._exception
@click.command()
@click.option('--port', default='9091', help='The port to launch the madera kafka broker on')
@click.option('--zookeeper-port', default='2181', help='The port to launch the kafka zookeeper instance on')
@click.option('--consumers-only')
@click.option('--data-dir', default=None, help='The root data directory')
def main(**kwargs): # pylint: disable=too-many-locals,too-many-statements,too-many-branches
logging.basicConfig(level=logging.DEBUG)
# Check server compatibility
if not platform.java_ver:
logging.error('Unable to launch kafka brokers: Please install a version of Java > 8.0')
sys.exit(1)
data_dir = kwargs['data_dir'] if kwargs['data_dir'] is not None else os.getcwd()
# Create a variable for tracking running subprocesses
kafka_processes = []
zookeeper_processes = []
# Launch the zookeeper broker on the correct port
# ./zookeeper-server-start.sh ../config/zookeeper.properties
kafka_bin_path = os.path.join(os.path.dirname(__file__), '..', 'kafka', 'kafka_2.12-2.3.0', 'bin')
local_bin_path = os.path.dirname(__file__)
logging.info('Configuring zookeeper')
with open(os.path.join(local_bin_path, 'templates', 'zookeeper.jinja'), 'r') as jf:
zookeeper_template = jinja2.Template(jf.read())
try:
zookeeper_temp = tempfile.NamedTemporaryFile('w+')
# Validate the zookeeper config options
try:
port = int(kwargs['zookeeper_port'])
except ValueError:
logging.error('Unable to launch zookeeper: Invalid port %s', kwargs['zookeeper_port'])
sys.exit(1)
# Write the zookeeper properties configuration file
zookeeper_temp.write(zookeeper_template.render(client_port=kwargs['zookeeper_port']))
zookeeper_temp.flush()
zookeeper_temp.file.close()
# Launch the zookeeper instance
logging.info('Launching zookeeper instance on port %s', port)
cmd = 'exec '
cmd += str(os.path.join(kafka_bin_path, 'zookeeper-server-start.sh'))
cmd += ' {}'.format(zookeeper_temp.name)
cmd += ' > {} 2>&1'.format(os.path.join(data_dir, 'zookeeper.log'))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
zookeeper_processes.append(process)
# Wait for zookeeper to start up
print('Waiting for zookeeper to start', end='')
sys.stdout.flush()
for _ in range(10):
print('.', end='')
sys.stdout.flush()
time.sleep(1)
print()
# Write the Kafka JAAS
logging.info('Creating Kafka JAAS file')
api_key = ''.join(random.choice(string.ascii_lowercase) for _ in range(16))
with open(os.path.join(local_bin_path, 'templates', 'kafka_server_jaas.jinja'), 'r') as jf:
jaas_template = jinja2.Template(jf.read())
jaas_temp = tempfile.NamedTemporaryFile('w+')
jaas_temp.write(jaas_template.render(user_password=api_key))
jaas_temp.flush()
jaas_temp.file.close()
# Write the server-start-sh
logging.info('Creating Kafka server start script')
with open(os.path.join(local_bin_path, 'templates', 'kafka-server-start.sh.jinja'), 'r') as jf:
kss_template = jinja2.Template(jf.read())
kss_temp = tempfile.NamedTemporaryFile('w+')
kss_temp.write(kss_template.render(kafka_jaas=jaas_temp.name, kafka_bindir=kafka_bin_path))
kss_temp.flush()
kss_temp.file.close()
# Launch the Kafka broker
# Write the kafka server properties
logging.info('Configuring Kafka')
with open(os.path.join(local_bin_path, 'templates', 'server.jinja'), 'r') as jf:
kafka_template = jinja2.Template(jf.read())
kafka_temp = tempfile.NamedTemporaryFile('w+')
# Validate the zookeeper config options
try:
port = int(kwargs['port'])
except ValueError:
logging.error('Unable to launch Kafka broker: Invalid port %s', kwargs['port'])
sys.exit(1)
# Write the zookeeper properties configuration file
kafka_temp.write(kafka_template.render(zookeeper_port=kwargs['zookeeper_port'], kafka_port=kwargs['port']))
kafka_temp.flush()
kafka_temp.file.close()
logging.info('Launching kafka instance on port %s', port)
cmd = 'chmod +x {} && exec '.format(kss_temp.name)
cmd += kss_temp.name
cmd += ' {}'.format(kafka_temp.name)
cmd += ' > {} 2>&1'.format(os.path.join(data_dir, 'kafka.log'))
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, preexec_fn=os.setsid) # pylint:disable=subprocess-popen-preexec-fn
kafka_processes.append(process)
# Launch the madera topic watcher
logging.info('Launching madera announce consumer')
madera_announce = Process(target=launch, args=(api_key, port, data_dir,))
madera_announce.start()
print('Finished launch! Running on port: {}'.format(port))
print('API Key: {}'.format(api_key))
# Wait for the announce thread to join
madera_announce.join()
if madera_announce.exception:
_, tb = madera_announce.exception
print(tb)
sys.exit(1)
# Wait for and clean up returned processse
for process in kafka_processes + zookeeper_processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=cmd)
finally:
print('\nCleaning up Kafka servers...')
# Cleanup the temp files
try:
zookeeper_temp.close()
except NameError:
pass
try:
jaas_temp.close()
except NameError:
pass
try:
kss_temp.close()
except NameError:
pass
try:
kafka_temp.close()
except NameError:
pass
# Kill the Kafka processes cleanly
for p in kafka_processes:
p.kill()
print('Finished cleaning up Kafka.')
# Kill the zookeeper processes cleanly
print('Waiting one moment before cleaning up zookeeper...')
time.sleep(5)
for p in zookeeper_processes:
p.kill()
print('Finished cleaning up zookeeper.')
if __name__ == '__main__':
main()
|
wait.py | #
# Copyright 2016-2021 Razorbill Instruments Ltd.
# This file is part of the Razorbill Lab Python library which is
# available under the MIT licence - see the LICENCE file for more.
#
"""
Various functions for waiting for some condition to be true before continuing
the experiment. If the calling code is running in a measurment.Sequence it
can be paused or aborted during one of these `Wait`s.
"""
import time
import numpy
from . import _logger
import threading
import ctypes
from measurement.sequences import Sequence
class _Wait():
""" This is a superclass for the other Wait classes. """
def __init__(self, period, timeout):
self.start_time = time.time()
self.period = period
self.timeout = timeout
self.sequence = None
if isinstance(threading.current_thread(), Sequence):
self.sequence = threading.current_thread()
def run(self):
done = False
paused = False
has_timed_out = False
while ((not done) or paused) and not has_timed_out:
time.sleep(self.period)
if self.sequence is not None:
self.sequence._check_stop()
paused = self.sequence._check_pause()
done = self.test()
if (time.time() - self.start_time) > self.timeout:
_logger.warning("Wait timed out")
has_timed_out = True
class In_Band(_Wait):
def __init__(self, quantity, low, high, target_time, period=1,
quantity_index=0, timeout=numpy.inf):
"""
Blocks until `quantity` is between `high` and `low` for `target_time`
This is used to delay an experiment until some measurement quantity
is in a certain band. For example, to wait until a cryostat settles
at a target tempearture.
Construction
------------
quantity : a measurement.Quantity
The wait will end or not depending on the value of this quantity
low : number
minimum value of quantity
high : number
maximum value of quantity
target_time : number
contiguous time quantity must be in band, in seconds
period : number
how often to check the Quantity, in seconds (default is 1)
quantity_index : integer
If quantity is a list Quantity, watch this item
timeout : integer
If set, the function will timeout if it is still not in band after
this many seconds
"""
super().__init__(period, timeout)
_logger.info(f"Waiting for {quantity.name} to be in [{low}, {high}] for {target_time}s...")
self.target_count = target_time / period
self.stable_count = 0
self.quantity = quantity
self.quantity_index = quantity_index
self.low = low
self.high = high
self.run()
_logger.info(f"Done waiting for {quantity.name}")
def test(self):
if type(self.quantity.name) is list:
meas = self.quantity.value[self.quantity_index]
else:
meas = self.quantity.value
if meas > self.low and meas < self.high:
self.stable_count += 1
else:
self.stable_count = 0
return self.stable_count > self.target_count
class Is_Equal(_Wait):
def __init__(self, quantity, target, target_time, period=1,
quantity_index=0, timeout=numpy.inf):
"""
Blocks until `quantity` is equal to target for `target_time`
This is used to delay an experiment until some measurement quantity
is a certain value. For example, for a state flag to change. For
continuous variables use In_Band instead.
Construction
------------
quantity : a measurement.Quantity
The wait will end or not depending on the value of this quantity
target : number
the target for the quantity to reach. Can be an IntEnum.
target_time : number
contiguous time quantity must be in band, in seconds
period : number
how often to check the Quantity, in seconds (default is 1)
quantity_index : integer
If quantity is a list Quantity, watch this item
timeout : integer
If set, the function will timeout if it is still not in band after
this many seconds
"""
super().__init__(period, timeout)
_logger.info(f"Waiting for {quantity.name} to be {target} for {target_time}s...")
self.target_count = target_time / period
self.stable_count = 0
self.quantity = quantity
self.quantity_index = quantity_index
self.target = target
self.run()
_logger.info(f"Done waiting for {quantity.name}")
def test(self):
if type(self.quantity.name) is list:
meas = self.quantity.value[self.quantity_index]
else:
meas = self.quantity.value
if meas == self.target:
self.stable_count += 1
else:
self.stable_count = 0
return self.stable_count > self.target_count
class Is_Stable(_Wait):
def __init__(self, quantity, variation, test_time, period=1,
quantity_index=0, timeout=numpy.inf):
"""Blocks until `quantity` is has an rms deviation < `variation`
This is used to delay an experiment until some measurement quantity
stops changing. For example until a cryostat has reached base
temperature, but that temperature is not known in advance.
quantity : measurement.Quantity
The wait will end or not depending on the value of this quantity
variation : number
Permissable rms, as a fraction of the measured value
test_time : number
time over which rms is measured, in seconds
period : number
how often to check the Quantity, in seconds (default 1)
quantity_index : integer
Where quantity is a list Quantity, watch this item
timeout: integer
If set, the function will timeout if it is still not stable after
this many seconds
"""
super().__init__(period, timeout)
_logger.info(f"Waiting for {quantity.name} to be stable to {variation} for {test_time}s...")
self.num_values = int(test_time / period)
self.quantity = quantity
self.variation = variation
self.data = numpy.ones(self.num_values) * numpy.nan
self.pointer = 0
self.run()
_logger.info(f"Done waiting for {quantity.name}")
def test(self):
if type(self.quantity.name) is list:
meas = self.quantity.value[self.quantity_index]
else:
meas = self.quantity.value
self.data[self.pointer] = meas
self.pointer = (self.pointer + 1) % self.num_values
mean = numpy.mean(self.data)
rms_dev = numpy.sqrt(numpy.mean(numpy.square(self.data - mean)))
metric = rms_dev / numpy.abs(mean)
return metric < self.variation
class For_Seconds(_Wait):
def __init__(self, number):
"""Blocks for a fixed time, in seconds. Use instead of time.sleep."""
super().__init__(1, numpy.inf)
self.number = number
if number > 60:
_logger.debug(f"Waiting for {number} seconds...")
self.run()
if number > 60:
_logger.debug("...done waiting")
def test(self):
return (time.time() - self.start_time) > self.number
class For_Click(_Wait):
def __init__(self, msg='', title="Script Waiting", period=1, timeout=numpy.inf):
"""Pops up a message box and waits until it is dismissed."""
super().__init__(period, timeout)
thread_name = threading.current_thread().name
msg = msg + f"\n\nThread '{thread_name}' is paused.\n"
msg = msg + "Press 'OK' to continue."
_logger.debug("Waiting for message box click...")
def thread_target():
resp = 0
while resp != 1:
resp = ctypes.windll.user32.MessageBoxW(0, msg, title, 0x10141)
self.popup_thread = threading.Thread(target=thread_target, name="Wait For Click")
self.popup_thread.start()
self.run()
_logger.debug("...done waiting")
def test(self):
return not self.popup_thread.is_alive()
def in_band(quantity, low, high, target_time, period=1, quantity_index=0, timeout=numpy.inf):
_logger.warning("function in_band is depreciated. Use class In_Band instead")
In_Band(quantity, low, high, target_time, period, quantity_index, timeout)
def is_stable(quantity, variation, test_time, period=1, quantity_index=0, timeout=numpy.inf):
_logger.warning("function is_stable is depreciated. Use class Is_Stable instead")
Is_Stable(quantity, variation, test_time, period, quantity_index, timeout)
|
test_client.py | import asyncio
import base64
import contextlib
import json
import os
import signal
import string
import subprocess
import tempfile
import threading
import time
import unittest.mock
import urllib.parse
import grpclib
import pytest
import tenacity
import aetcd3.exceptions
import aetcd3.rpc as rpc
import aetcd3.utils as utils
etcd_version = os.environ.get('TEST_ETCD_VERSION', 'v3.2.8')
os.environ['ETCDCTL_API'] = '3'
def etcdctl(*args):
endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
if endpoint:
args = ['--endpoints', endpoint] + list(args)
args = ['etcdctl', '-w', 'json'] + list(args)
print(' '.join(args))
output = subprocess.check_output(args)
return json.loads(output.decode('utf-8'))
@contextlib.contextmanager
def _out_quorum():
pids = subprocess.check_output(['pgrep', '-f', '--', '--name pifpaf[12]'])
pids = [int(pid.strip()) for pid in pids.splitlines()]
try:
for pid in pids:
os.kill(pid, signal.SIGSTOP)
yield
finally:
for pid in pids:
os.kill(pid, signal.SIGCONT)
class TestEtcd3:
class MockedException(grpclib.exceptions.GRPCError):
def __init__(self, status):
self.status = status
@pytest.fixture
async def etcd(self):
endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
timeout = 5
if endpoint:
url = urllib.parse.urlparse(endpoint)
with aetcd3.client(
host=url.hostname,
port=url.port,
timeout=timeout,
) as client:
yield client
else:
async with aetcd3.client() as client:
yield client
@tenacity.retry(wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_attempt(3))
def delete_keys_definitely():
# clean up after fixture goes out of scope
etcdctl('del', '--prefix', '/')
out = etcdctl('get', '--prefix', '/')
assert 'kvs' not in out
delete_keys_definitely()
@pytest.mark.asyncio
async def test_get_unknown_key(self, etcd):
value, meta = await etcd.get('probably-invalid-key')
assert value is None
assert meta is None
@pytest.mark.asyncio
async def test_get_key(self, etcd, string='xxx'):
etcdctl('put', '/doot/a_key', string)
returned, _ = await etcd.get('/doot/a_key')
assert returned == string.encode('utf-8')
@pytest.mark.asyncio
async def test_get_random_key(self, etcd, string='xxxx'):
etcdctl('put', '/doot/' + string, 'dootdoot')
returned, _ = await etcd.get('/doot/' + string)
assert returned == b'dootdoot'
@pytest.mark.asyncio
async def test_get_have_cluster_revision(self, etcd, string='xxx'):
etcdctl('put', '/doot/' + string, 'dootdoot')
_, md = await etcd.get('/doot/' + string)
assert md.response_header.revision > 0
# @given(characters(blacklist_categories=['Cs', 'Cc']))
@pytest.mark.asyncio
async def test_put_key(self, etcd, string='xxx'):
await etcd.put('/doot/put_1', string)
out = etcdctl('get', '/doot/put_1')
assert base64.b64decode(out['kvs'][0]['value']) == \
string.encode('utf-8')
# @given(
# characters(blacklist_categories=['Cs', 'Cc']),
# characters(blacklist_categories=['Cs', 'Cc']),
# )
@pytest.mark.asyncio
async def test_get_key_serializable(self, etcd, key='foo', string='xxx'):
etcdctl('put', '/doot/' + key, string)
with _out_quorum():
returned, _ = await etcd.get('/doot/' + key, serializable=True)
assert returned == string.encode('utf-8')
# @given(characters(blacklist_categories=['Cs', 'Cc']))
@pytest.mark.asyncio
async def test_put_has_cluster_revision(self, etcd, string='xxx'):
response = await etcd.put('/doot/put_1', string)
assert response.header.revision > 0
# @given(characters(blacklist_categories=['Cs', 'Cc']))
@pytest.mark.asyncio
async def test_put_has_prev_kv(self, etcd, string='xxxx'):
etcdctl('put', '/doot/put_1', 'old_value')
response = await etcd.put('/doot/put_1', string, prev_kv=True)
assert response.prev_kv.value == b'old_value'
@pytest.mark.asyncio
async def test_delete_key(self, etcd):
etcdctl('put', '/doot/delete_this', 'delete pls')
v, _ = await etcd.get('/doot/delete_this')
assert v == b'delete pls'
deleted = await etcd.delete('/doot/delete_this')
assert deleted is True
deleted = await etcd.delete('/doot/delete_this')
assert deleted is False
deleted = await etcd.delete('/doot/not_here_dude')
assert deleted is False
v, _ = await etcd.get('/doot/delete_this')
assert v is None
@pytest.mark.asyncio
async def test_delete_has_cluster_revision(self, etcd):
response = await etcd.delete('/doot/delete_this', return_response=True)
assert response.header.revision > 0
@pytest.mark.asyncio
async def test_delete_has_prev_kv(self, etcd):
etcdctl('put', '/doot/delete_this', 'old_value')
response = await etcd.delete('/doot/delete_this',
prev_kv=True,
return_response=True)
assert response.prev_kvs[0].value == b'old_value'
@pytest.mark.asyncio
async def test_delete_keys_with_prefix(self, etcd):
etcdctl('put', '/foo/1', 'bar')
etcdctl('put', '/foo/2', 'baz')
v, _ = await etcd.get('/foo/1')
assert v == b'bar'
v, _ = await etcd.get('/foo/2')
assert v == b'baz'
response = await etcd.delete_prefix('/foo')
assert response.deleted == 2
v, _ = await etcd.get('/foo/1')
assert v is None
v, _ = await etcd.get('/foo/2')
assert v is None
@pytest.mark.asyncio
async def test_watch_key(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch', v)
out = etcdctl('get', '/doot/watch')
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name='update_key', target=update_key)
t.start()
change_count = 0
events_iterator, cancel = await etcd.watch(b'/doot/watch')
async for event in events_iterator:
assert event.key == b'/doot/watch'
assert event.value == utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
# if cancel not work, we will block in this for-loop forever
await cancel()
t.join()
@pytest.mark.asyncio
async def test_watch_key_with_revision_compacted(self, etcd):
etcdctl('put', '/watchcompation', '0') # Some data to compact
value, meta = await etcd.get('/watchcompation')
revision = meta.mod_revision
# Compact etcd and test watcher
await etcd.compact(revision)
def update_etcd(v):
etcdctl('put', '/watchcompation', v)
out = etcdctl('get', '/watchcompation')
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
update_etcd('1')
update_etcd('2')
update_etcd('3')
t = threading.Thread(name='update_key', target=update_key)
t.start()
async def watch_compacted_revision_test():
events_iterator, cancel = await etcd.watch(
b'/watchcompation', start_revision=(revision - 1))
error_raised = False
compacted_revision = 0
try:
await events_iterator.__anext__()
except Exception as err:
error_raised = True
assert isinstance(err,
aetcd3.exceptions.RevisionCompactedError)
compacted_revision = err.compacted_revision
assert error_raised is True
assert compacted_revision == revision
change_count = 0
events_iterator, cancel = await etcd.watch(
b'/watchcompation', start_revision=compacted_revision)
async for event in events_iterator:
assert event.key == b'/watchcompation'
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
await cancel()
await watch_compacted_revision_test()
t.join()
@pytest.mark.asyncio
async def test_watch_exception_during_watch(self, etcd):
await etcd.open()
async def pass_exception_to_callback(callback):
await asyncio.sleep(1)
ex = self.MockedException(grpclib.const.Status.UNAVAILABLE)
await callback(ex)
task = None
async def add_callback_mock(*args, **kwargs):
nonlocal task
callback = args[1]
task = asyncio.get_event_loop().create_task(
pass_exception_to_callback(callback))
return 1
watcher_mock = unittest.mock.MagicMock()
watcher_mock.add_callback = add_callback_mock
etcd.watcher = watcher_mock
events_iterator, cancel = await etcd.watch('foo')
with pytest.raises(aetcd3.exceptions.ConnectionFailedError):
async for _ in events_iterator:
_
await task
@pytest.mark.asyncio
async def test_watch_timeout_on_establishment(self):
async with aetcd3.client(timeout=3) as foo_etcd:
@contextlib.asynccontextmanager
async def slow_watch_mock(*args, **kwargs):
await asyncio.sleep(40)
yield 'foo'
foo_etcd.watcher._watch_stub.Watch.open = slow_watch_mock # noqa
with pytest.raises(aetcd3.exceptions.WatchTimedOut):
events_iterator, cancel = await foo_etcd.watch('foo')
async for _ in events_iterator:
pass
@pytest.mark.asyncio
async def test_watch_prefix(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch/prefix/' + v, v)
out = etcdctl('get', '/doot/watch/prefix/' + v)
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name='update_key_prefix', target=update_key)
t.start()
change_count = 0
events_iterator, cancel = await etcd.watch_prefix(
'/doot/watch/prefix/')
async for event in events_iterator:
assert event.key == \
utils.to_bytes('/doot/watch/prefix/{}'.format(change_count))
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
# if cancel not work, we will block in this for-loop forever
await cancel()
t.join()
@pytest.mark.asyncio
async def test_sequential_watch_prefix_once(self, etcd):
try:
await etcd.watch_prefix_once('/doot/', 1)
except aetcd3.exceptions.WatchTimedOut:
print('timeout1')
pass
try:
await etcd.watch_prefix_once('/doot/', 1)
except aetcd3.exceptions.WatchTimedOut:
print('timeout2')
pass
try:
await etcd.watch_prefix_once('/doot/', 1)
except aetcd3.exceptions.WatchTimedOut:
print('timeout3')
pass
@pytest.mark.asyncio
async def test_transaction_success(self, etcd):
etcdctl('put', '/doot/txn', 'dootdoot')
await etcd.transaction(
compare=[etcd.transactions.value('/doot/txn') == 'dootdoot'],
success=[etcd.transactions.put('/doot/txn', 'success')],
failure=[etcd.transactions.put('/doot/txn', 'failure')],
)
out = etcdctl('get', '/doot/txn')
assert base64.b64decode(out['kvs'][0]['value']) == b'success'
@pytest.mark.asyncio
async def test_transaction_failure(self, etcd):
etcdctl('put', '/doot/txn', 'notdootdoot')
await etcd.transaction(
compare=[etcd.transactions.value('/doot/txn') == 'dootdoot'],
success=[etcd.transactions.put('/doot/txn', 'success')],
failure=[etcd.transactions.put('/doot/txn', 'failure')],
)
out = etcdctl('get', '/doot/txn')
assert base64.b64decode(out['kvs'][0]['value']) == b'failure'
def test_ops_to_requests(self, etcd):
with pytest.raises(Exception):
etcd._ops_to_requests(['not_transaction_type'])
with pytest.raises(TypeError):
etcd._ops_to_requests(0)
@pytest.mark.skipif(etcd_version < 'v3.3',
reason='requires etcd v3.3 or higher')
@pytest.mark.asyncio
async def test_nested_transactions(self, etcd):
await etcd.transaction(
compare=[],
success=[etcd.transactions.put('/doot/txn1', '1'),
etcd.transactions.txn(
compare=[],
success=[etcd.transactions.put('/doot/txn2', '2')],
failure=[])],
failure=[],
)
value, _ = await etcd.get('/doot/txn1')
assert value == b'1'
value, _ = await etcd.get('/doot/txn2')
assert value == b'2'
@pytest.mark.asyncio
async def test_replace_success(self, etcd):
await etcd.put('/doot/thing', 'toot')
status = await etcd.replace('/doot/thing', 'toot', 'doot')
v, _ = await etcd.get('/doot/thing')
assert v == b'doot'
assert status is True
@pytest.mark.asyncio
async def test_replace_fail(self, etcd):
await etcd.put('/doot/thing', 'boot')
status = await etcd.replace('/doot/thing', 'toot', 'doot')
v, _ = await etcd.get('/doot/thing')
assert v == b'boot'
assert status is False
@pytest.mark.asyncio
async def test_get_prefix(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am a range')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
values = [p async for p in etcd.get_prefix('/doot/range')]
assert len(values) == 20
for value, _ in values:
assert value == b'i am a range'
@pytest.mark.asyncio
async def test_get_prefix_keys_only(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am a range')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
values = [p async for p in etcd.get_prefix('/doot/range',
keys_only=True)]
assert len(values) == 20
for value, meta in values:
assert meta.key.startswith(b'/doot/range')
assert not value
@pytest.mark.asyncio
async def test_get_range(self, etcd):
for char in string.ascii_lowercase:
if char < 'p':
etcdctl('put', '/doot/' + char, 'i am in range')
else:
etcdctl('put', '/doot/' + char, 'i am not in range')
values = [v async for v in etcd.get_range('/doot/a', '/doot/p')]
assert len(values) == 15
for value, _ in values:
assert value == b'i am in range'
@pytest.mark.asyncio
async def test_all_not_found_error(self, etcd):
result = [x async for x in etcd.get_all()]
assert not result
@pytest.mark.asyncio
async def test_range_not_found_error(self, etcd):
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
result = [p async for p in etcd.get_prefix('/doot/range')]
assert not result
@pytest.mark.asyncio
async def test_get_all(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am in all')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am in all')
values = [x async for x in etcd.get_all()]
assert len(values) == 25
for value, _ in values:
assert value == b'i am in all'
@pytest.mark.asyncio
async def test_sort_order(self, etcd):
def remove_prefix(string, prefix):
return string[len(prefix):]
initial_keys = 'abcde'
initial_values = 'qwert'
for k, v in zip(initial_keys, initial_values):
etcdctl('put', '/doot/{}'.format(k), v)
keys = ''
async for value, meta in etcd.get_prefix('/doot', sort_order='ascend'):
keys += remove_prefix(meta.key.decode('utf-8'), '/doot/')
assert keys == initial_keys
reverse_keys = ''
async for value, meta in etcd.get_prefix('/doot',
sort_order='descend'):
reverse_keys += remove_prefix(meta.key.decode('utf-8'), '/doot/')
assert reverse_keys == ''.join(reversed(initial_keys))
@pytest.mark.asyncio
async def test_lease_grant(self, etcd):
lease = await etcd.lease(1)
assert isinstance(lease.ttl, int)
assert isinstance(lease.id, int)
@pytest.mark.asyncio
async def test_lease_revoke(self, etcd):
lease = await etcd.lease(1)
await lease.revoke()
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason='requires etcd v3.1 or higher')
@pytest.mark.asyncio
async def test_lease_keys_empty(self, etcd):
lease = await etcd.lease(1)
assert (await lease.keys()) == []
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason='requires etcd v3.1 or higher')
@pytest.mark.asyncio
async def test_lease_single_key(self, etcd):
lease = await etcd.lease(1)
await etcd.put('/doot/lease_test', 'this is a lease', lease=lease)
assert (await lease.keys()) == [b'/doot/lease_test']
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason='requires etcd v3.1 or higher')
@pytest.mark.asyncio
async def test_lease_expire(self, etcd):
key = '/doot/lease_test_expire'
lease = await etcd.lease(1)
await etcd.put(key, 'this is a lease', lease=lease)
assert (await lease.keys()) == [utils.to_bytes(key)]
v, _ = await etcd.get(key)
assert v == b'this is a lease'
assert (await lease.remaining_ttl()) <= (await lease.granted_ttl())
# wait for the lease to expire
await asyncio.sleep((await lease.granted_ttl()) + 2)
v, _ = await etcd.get(key)
assert v is None
@pytest.mark.asyncio
async def test_member_list(self, etcd):
assert len([m async for m in etcd.members()]) == 3
async for member in etcd.members():
assert member.name.startswith('pifpaf')
for peer_url in member.peer_urls:
assert peer_url.startswith('http://')
for client_url in member.client_urls:
assert client_url.startswith('http://')
assert isinstance(member.id, int) is True
@pytest.mark.asyncio
async def test_lock_acquire(self, etcd):
lock = etcd.lock('lock-1', ttl=10)
assert (await lock.acquire()) is True
assert (await etcd.get(lock.key))[0] is not None
assert (await lock.acquire(timeout=0)) is False
assert (await lock.acquire(timeout=1)) is False
@pytest.mark.asyncio
async def test_lock_release(self, etcd):
lock = etcd.lock('lock-2', ttl=10)
assert (await lock.acquire()) is True
assert (await etcd.get(lock.key))[0] is not None
assert (await lock.release()) is True
v, _ = await etcd.get(lock.key)
assert v is None
assert (await lock.acquire()) is True
assert (await lock.release()) is True
assert (await lock.acquire(timeout=None)) is True
@pytest.mark.asyncio
async def test_lock_expire(self, etcd):
lock = etcd.lock('lock-3', ttl=3)
assert (await lock.acquire()) is True
assert (await etcd.get(lock.key))[0] is not None
# wait for the lease to expire
await asyncio.sleep(9)
v, _ = await etcd.get(lock.key)
assert v is None
@pytest.mark.asyncio
async def test_lock_refresh(self, etcd):
lock = etcd.lock('lock-4', ttl=3)
assert (await lock.acquire()) is True
assert (await etcd.get(lock.key))[0] is not None
# sleep for the same total time as test_lock_expire, but refresh each
# second
for _ in range(9):
await asyncio.sleep(1)
await lock.refresh()
assert (await etcd.get(lock.key))[0] is not None
@pytest.mark.asyncio
async def test_lock_is_acquired(self, etcd):
lock1 = etcd.lock('lock-5', ttl=2)
assert (await lock1.is_acquired()) is False
lock2 = etcd.lock('lock-5', ttl=2)
await lock2.acquire()
assert (await lock2.is_acquired()) is True
await lock2.release()
lock3 = etcd.lock('lock-5', ttl=2)
await lock3.acquire()
assert (await lock3.is_acquired()) is True
assert (await lock2.is_acquired()) is False
@pytest.mark.asyncio
async def test_lock_context_manager(self, etcd):
async with etcd.lock('lock-6', ttl=2) as lock:
assert (await lock.is_acquired()) is True
assert (await lock.is_acquired()) is False
@pytest.mark.asyncio
async def test_lock_contended(self, etcd):
lock1 = etcd.lock('lock-7', ttl=2)
await lock1.acquire()
lock2 = etcd.lock('lock-7', ttl=2)
await lock2.acquire()
assert (await lock1.is_acquired()) is False
assert (await lock2.is_acquired()) is True
@pytest.mark.asyncio
async def test_lock_double_acquire_release(self, etcd):
lock = etcd.lock('lock-8', ttl=10)
assert (await lock.acquire(0)) is True
assert (await lock.acquire(0)) is False
assert (await lock.release()) is True
@pytest.mark.asyncio
async def test_lock_acquire_none(self, etcd):
lock = etcd.lock('lock-9', ttl=10)
assert (await lock.acquire(None)) is True
# This will succeed after 10 seconds since the TTL will expire and the
# lock is not refreshed
assert (await lock.acquire(None)) is True
@pytest.mark.asyncio
async def test_internal_exception_on_internal_error(self, etcd):
await etcd.open()
exception = self.MockedException(grpclib.const.Status.INTERNAL)
kv_mock = unittest.mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(aetcd3.exceptions.InternalServerError):
await etcd.get('foo')
@pytest.mark.asyncio
async def test_connection_failure_exception_on_connection_failure(self, etcd):
await etcd.open()
exception = self.MockedException(grpclib.const.Status.UNAVAILABLE)
kv_mock = unittest.mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(aetcd3.exceptions.ConnectionFailedError):
await etcd.get('foo')
@pytest.mark.asyncio
async def test_connection_timeout_exception_on_connection_timeout(self, etcd):
ex = self.MockedException(grpclib.const.Status.DEADLINE_EXCEEDED)
class MockKvstub:
async def Range(self, *args, **kwargs): # noqa: N802
raise ex
etcd.kvstub = MockKvstub()
with pytest.raises(aetcd3.exceptions.ConnectionTimeoutError):
await etcd.get('foo')
@pytest.mark.asyncio
async def test_grpc_exception_on_unknown_code(self, etcd):
exception = self.MockedException(grpclib.const.Status.DATA_LOSS)
kv_mock = unittest.mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
try:
await etcd.get('foo')
except grpclib.exceptions.GRPCError:
pass
else:
raise RuntimeError
@pytest.mark.asyncio
async def test_status_member(self, etcd):
status = await etcd.status()
assert isinstance(status.leader, aetcd3.members.Member) is True
assert status.leader.id in [m.id async for m in etcd.members()]
@pytest.mark.asyncio
async def test_hash(self, etcd):
assert isinstance((await etcd.hash()), int)
@pytest.mark.asyncio
async def test_snapshot(self, etcd):
with tempfile.NamedTemporaryFile() as f:
await etcd.snapshot(f)
f.flush()
etcdctl('snapshot', 'status', f.name)
class TestAlarms(object):
@pytest.fixture
async def etcd(self):
etcd = aetcd3.client()
yield etcd
await etcd.disarm_alarm()
async for m in etcd.members():
if m.active_alarms:
await etcd.disarm_alarm(m.id)
@pytest.mark.asyncio
async def test_create_alarm_all_members(self, etcd):
alarms = await etcd.create_alarm()
assert len(alarms) == 1
assert alarms[0].member_id == 0
assert alarms[0].alarm_type == rpc.NOSPACE
@pytest.mark.asyncio
async def test_create_alarm_specific_member(self, etcd):
members = [m async for m in etcd.members()]
a_member = members[0]
alarms = await etcd.create_alarm(member_id=a_member.id)
assert len(alarms) == 1
assert alarms[0].member_id == a_member.id
assert alarms[0].alarm_type == rpc.NOSPACE
@pytest.mark.asyncio
async def test_list_alarms(self, etcd):
members = [m async for m in etcd.members()]
a_member = members[0]
await etcd.create_alarm()
await etcd.create_alarm(member_id=a_member.id)
possible_member_ids = [0, a_member.id]
alarms = [a async for a in etcd.list_alarms()]
assert len(alarms) == 2
for alarm in alarms:
possible_member_ids.remove(alarm.member_id)
assert alarm.alarm_type == rpc.NOSPACE
assert possible_member_ids == []
@pytest.mark.asyncio
async def test_disarm_alarm(self, etcd):
await etcd.create_alarm()
assert len([a async for a in etcd.list_alarms()]) == 1
await etcd.disarm_alarm()
assert len([a async for a in etcd.list_alarms()]) == 0
class TestUtils(object):
def test_prefix_range_end(self):
assert aetcd3.utils.prefix_range_end(b'foo') == b'fop'
def test_to_bytes(self):
assert isinstance(aetcd3.utils.to_bytes(b'doot'), bytes) is True
assert isinstance(aetcd3.utils.to_bytes('doot'), bytes) is True
assert aetcd3.utils.to_bytes(b'doot') == b'doot'
assert aetcd3.utils.to_bytes('doot') == b'doot'
class TestClient(object):
@pytest.fixture
def etcd(self):
yield aetcd3.client()
def test_sort_target(self, etcd):
key = 'key'.encode('utf-8')
sort_target = {
None: rpc.RangeRequest.KEY,
'key': rpc.RangeRequest.KEY,
'version': rpc.RangeRequest.VERSION,
'create': rpc.RangeRequest.CREATE,
'mod': rpc.RangeRequest.MOD,
'value': rpc.RangeRequest.VALUE,
}
for input, expected in sort_target.items():
range_request = etcd._build_get_range_request(key,
sort_target=input)
assert range_request.sort_target == expected
with pytest.raises(ValueError):
etcd._build_get_range_request(key, sort_target='feelsbadman')
def test_sort_order(self, etcd):
key = 'key'.encode('utf-8')
sort_target = {
None: rpc.RangeRequest.NONE,
'ascend': rpc.RangeRequest.ASCEND,
'descend': rpc.RangeRequest.DESCEND,
}
for input, expected in sort_target.items():
range_request = etcd._build_get_range_request(key,
sort_order=input)
assert range_request.sort_order == expected
with pytest.raises(ValueError):
etcd._build_get_range_request(key, sort_order='feelsbadman')
@pytest.mark.asyncio
async def test_secure_channel(self):
client = aetcd3.client(
ca_cert='tests/ca.crt',
cert_key='tests/client.key',
cert_cert='tests/client.crt',
)
await client.open()
assert client.uses_secure_channel is True
@pytest.mark.asyncio
async def test_secure_channel_ca_cert_only(self):
with tempfile.NamedTemporaryFile() as certfile_bundle:
for fname in ('client.crt', 'ca.crt', 'client.key'):
with open(f'tests/{fname}', 'r+b') as f:
certfile_bundle.write(f.read())
certfile_bundle.flush()
client = aetcd3.client(
ca_cert=certfile_bundle.name,
cert_key=None,
cert_cert=None,
)
await client.open()
assert client.uses_secure_channel is True
def test_secure_channel_ca_cert_and_key_raise_exception(self):
with pytest.raises(ValueError):
aetcd3.client(
ca_cert='tests/ca.crt',
cert_key='tests/client.crt',
cert_cert=None,
)
with pytest.raises(ValueError):
aetcd3.client(
ca_cert='tests/ca.crt',
cert_key=None,
cert_cert='tests/client.crt',
)
@pytest.mark.asyncio
async def test_compact(self, etcd):
await etcd.put('/foo', 'x')
_, meta = await etcd.get('/foo')
revision = meta.mod_revision
await etcd.compact(revision)
with pytest.raises(grpclib.exceptions.GRPCError):
await etcd.compact(revision)
@pytest.mark.asyncio
async def test_channel_with_no_cert(self):
client = aetcd3.client(
ca_cert=None,
cert_key=None,
cert_cert=None,
)
await client.open()
assert client.uses_secure_channel is False
@pytest.mark.asyncio
async def test_user_pwd_auth(self):
with self._enabled_auth_in_etcd():
# Create a client using username and password auth
client = aetcd3.client(
user='root',
password='pwd',
)
await client.get('probably-invalid-key')
def test_user_or_pwd_auth_raises_exception(self):
with pytest.raises(Exception, match='both user and password'):
aetcd3.client(user='usr')
with pytest.raises(Exception, match='both user and password'):
aetcd3.client(password='pwd')
@staticmethod
@contextlib.contextmanager
def _enabled_auth_in_etcd():
subprocess.call(['etcdctl', '-w', 'json', 'user', 'add', 'root:pwd'])
subprocess.call(['etcdctl', 'auth', 'enable'])
try:
yield
finally:
subprocess.call(['etcdctl',
'-w', 'json', '--user', 'root:pwd',
'auth', 'disable'])
subprocess.call(['etcdctl', 'user', 'delete', 'root'])
subprocess.call(['etcdctl', 'role', 'delete', 'root'])
class TestCompares(object):
def test_compare_version(self):
key = 'key'
tx = aetcd3.Transactions()
version_compare = tx.version(key) == 1
assert version_compare.op == rpc.Compare.EQUAL
version_compare = tx.version(key) != 2
assert version_compare.op == rpc.Compare.NOT_EQUAL
version_compare = tx.version(key) < 91
assert version_compare.op == rpc.Compare.LESS
version_compare = tx.version(key) > 92
assert version_compare.op == rpc.Compare.GREATER
assert version_compare.build_message().target == \
rpc.Compare.VERSION
def test_compare_value(self):
key = 'key'
tx = aetcd3.Transactions()
value_compare = tx.value(key) == 'b'
assert value_compare.op == rpc.Compare.EQUAL
value_compare = tx.value(key) != 'b'
assert value_compare.op == rpc.Compare.NOT_EQUAL
value_compare = tx.value(key) < 'b'
assert value_compare.op == rpc.Compare.LESS
value_compare = tx.value(key) > 'b'
assert value_compare.op == rpc.Compare.GREATER
assert value_compare.build_message().target == rpc.Compare.VALUE
def test_compare_mod(self):
key = 'key'
tx = aetcd3.Transactions()
mod_compare = tx.mod(key) == -100
assert mod_compare.op == rpc.Compare.EQUAL
mod_compare = tx.mod(key) != -100
assert mod_compare.op == rpc.Compare.NOT_EQUAL
mod_compare = tx.mod(key) < 19
assert mod_compare.op == rpc.Compare.LESS
mod_compare = tx.mod(key) > 21
assert mod_compare.op == rpc.Compare.GREATER
assert mod_compare.build_message().target == rpc.Compare.MOD
def test_compare_create(self):
key = 'key'
tx = aetcd3.Transactions()
create_compare = tx.create(key) == 10
assert create_compare.op == rpc.Compare.EQUAL
create_compare = tx.create(key) != 10
assert create_compare.op == rpc.Compare.NOT_EQUAL
create_compare = tx.create(key) < 155
assert create_compare.op == rpc.Compare.LESS
create_compare = tx.create(key) > -12
assert create_compare.op == rpc.Compare.GREATER
assert create_compare.build_message().target == rpc.Compare.CREATE
|
tree.py | import os, threading
import tkinter.ttk as ttk
import tkinter as tk
class Tree(ttk.Treeview):
def __init__(self, master, double_click=None, single_click=None, startpath=None, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
self.base = master.base
self.double_click = double_click
self.single_click = single_click
self.path = startpath
self.file_icn = tk.PhotoImage(data="""
iVBORw0KGgoAAAANSUhEUgAAAA0AAAARCAYAAAAG/yacAAAACXBIWXMAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB
3d3cuaW5rc2NhcGUub3Jnm+48GgAAAUNJREFUKJHVzzFLQmEUxvH/uaipF0EbW1rEsWyLJCirKShszIKMoqnFrU9Re+YSak
MtTU3XhnCLhj5GIEJeu5re06KScpXWnunlvM+P876SuvmIBEPhK1UyQIzRdPSbleqR+fp7aASC4UtVjj0AQED81DYr9tIIE
sgAqMuulTXFypoCmgNQoQj4XFfvtir23BABs0Cnemg+jq8R9EWVU5B4zxUr/fA1P0AArsfTAKgemEWEM9AEjr6vlpsLxqQy
gKrEAax9syDKOWjEr1LzeZUFw1EUgYt02d5G6SogIh1VNT1Rr9N+MvyBGsIyyuLwwnVdRPBEz7lYA0iNzzdK9huQnPqnSfk
nSP5S1n7fAOrAzPqtvTMNrJWaSSAB1CVdsq+Bk/7CT9CuhxEg2j8XfG2nlQ+GwoYqe6BRDzBIA7hvO638D0khbw04aabsAA
AAAElFTkSuQmCC""")
self.folder_icn = tk.PhotoImage(data="""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAMCAYAAABr5z2BAAAACXBIWXMAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB
3d3cuaW5rc2NhcGUub3Jnm+48GgAAAJBJREFUKJHdzTEKwkAUhOF/loCFRbAVr+IhLAWLCPaW3sFGPIOm1Bt4hxSSEwRs7Z
UdayErmnROO++bp93htJK0BUa8pxEq1ovZhQ/R/ni+G/LWEjW2y4Stx4NnmUU7l9R6YTxBbFLfb49sGlL4m9ieh84aAA17D
sCfDLiHdwDqrlpwDTHGAqiA+IONQIW0fAFkySdEGFdeCgAAAABJRU5ErkJggg==""")
self.config(
show="tree", columns=("fullpath", "type"), displaycolumns='')
if startpath:
self.open_directory(startpath)
else:
self.insert('', 0, text='You have not yet opened a folder.')
self.bind('<Double-Button-1>', self.double_click)
self.bind("<<TreeviewSelect>>", self.check_single_click)
self.bind("<<TreeviewOpen>>", self.update_tree)
def check_single_click(self, _):
if self.item_type(self.focus()) == 'file':
if self.single_click:
self.single_click(self.item_fullpath(self.focus()))
else:
self.toggle_node(self.focus())
def is_open(self, node):
return self.item(node, 'open')
def toggle_node(self, node):
if self.item_type(node) == 'directory':
if self.is_open(node):
self.item(node, open=False)
else:
self.item(node, open=True)
self.update_node(node)
def clear_node(self, node):
self.delete(*self.get_children(node))
def clear_tree(self):
self.clear_node('')
def fill_node(self, node, path):
self.clear_node(node)
items = [os.path.join(path, p) for p in os.listdir(path)]
directories = sorted([p for p in items if os.path.isdir(p)])
files = sorted([p for p in items if os.path.isfile(p)])
for p in directories:
name = os.path.split(p)[1]
oid = self.insert(node, tk.END, text=f" {name}", values=[p, 'directory'], image=self.folder_icn)
self.insert(oid, 0, text='dummy')
for p in files:
if os.path.isfile(p):
name = os.path.split(p)[1]
oid = self.insert(node, tk.END, text=f" {name}", values=[p, 'file'], image=self.file_icn)
def update_node(self, node):
if self.set(node, "type") != 'directory':
return
path = self.set(node, "fullpath")
self.fill_node(node, path)
def update_tree(self, *_):
self.update_node(self.focus())
def create_root(self, path):
self.clear_tree()
self.fill_node('', path)
def item_type(self, item):
return self.set(item, "type")
def item_fullpath(self, item):
return self.set(item, "fullpath")
def open_directory(self, path):
self.path = os.path.abspath(path)
threading.Thread(target=self.create_root, args=[self.path]).start()
def refresh_tree(self):
self.open_directory(self.path)
def collapse_all(self):
for node in self.get_children():
self.item(node, open=False)
# def add_node(self):
# name = enterbox("Enter file name")
# selected = self.focus() or ''
# # parent = self.parent(selected)
# # if parent == '':
# # parent = self.path
# path = os.path.join(self.item_fullpath(selected), name)
# # fullpath = os.path.join(parent_path, name)
# with open(path, 'w') as f:
# f.write("")
# self.update_node(selected)
|
test_InfluxDBClient.py | import http.server
import json
import os
import threading
import unittest
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS, ASYNCHRONOUS, WriteOptions, WriteType
class InfluxDBClientTest(unittest.TestCase):
def tearDown(self) -> None:
if self.client:
self.client.close()
if hasattr(self, 'httpd'):
self.httpd.shutdown()
if hasattr(self, 'httpd_thread'):
self.httpd_thread.join()
def test_default_conf(self):
self.client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
self.assertIsNotNone(self.client.api_client.configuration.connection_pool_maxsize)
def test_TrailingSlashInUrl(self):
self.client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
self.assertEqual('http://localhost:8086', self.client.api_client.configuration.host)
self.client = InfluxDBClient(url="http://localhost:8086/", token="my-token", org="my-org")
self.assertEqual('http://localhost:8086', self.client.api_client.configuration.host)
def test_ConnectToSelfSignedServer(self):
self._start_http_server()
self.client = InfluxDBClient(f"https://localhost:{self.httpd.server_address[1]}",
token="my-token", verify_ssl=False)
health = self.client.health()
self.assertEqual(health.message, 'ready for queries and writes')
self.assertEqual(health.status, "pass")
self.assertEqual(health.name, "influxdb")
def test_certificate_file(self):
self._start_http_server()
self.client = InfluxDBClient(f"https://localhost:{self.httpd.server_address[1]}",
token="my-token", verify_ssl=True,
ssl_ca_cert=f'{os.path.dirname(__file__)}/server.pem')
health = self.client.health()
self.assertEqual(health.message, 'ready for queries and writes')
self.assertEqual(health.status, "pass")
self.assertEqual(health.name, "influxdb")
def test_init_from_ini_file(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertConfig()
def test_init_from_toml_file(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.toml')
self.assertConfig()
def assertConfig(self):
self.assertEqual("http://localhost:8086", self.client.url)
self.assertEqual("my-org", self.client.org)
self.assertEqual("my-token", self.client.token)
self.assertEqual(6000, self.client.api_client.configuration.timeout)
self.assertEqual(3, len(self.client.default_tags))
self.assertEqual("132-987-655", self.client.default_tags["id"])
self.assertEqual("California Miner", self.client.default_tags["customer"])
self.assertEqual("${env.data_center}", self.client.default_tags["data_center"])
self.assertEqual(55, self.client.api_client.configuration.connection_pool_maxsize)
self.assertEqual(False, self.client.api_client.configuration.auth_basic)
self.assertEqual(["query", "operator"], self.client.profilers)
def test_init_from_file_proxy(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config-enabled-proxy.ini')
self.assertConfig()
self.assertEqual("http://proxy.domain.org:8080", self.client.api_client.configuration.proxy)
def test_init_from_file_ssl_default(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertTrue(self.client.api_client.configuration.verify_ssl)
def test_init_from_file_ssl(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config-disabled-ssl.ini')
self.assertFalse(self.client.api_client.configuration.verify_ssl)
def test_init_from_env_ssl_default(self):
if os.getenv("INFLUXDB_V2_VERIFY_SSL"):
del os.environ["INFLUXDB_V2_VERIFY_SSL"]
self.client = InfluxDBClient.from_env_properties()
self.assertTrue(self.client.api_client.configuration.verify_ssl)
def test_init_from_env_ssl(self):
os.environ["INFLUXDB_V2_SSL_CA_CERT"] = "/my/custom/path"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual("/my/custom/path", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_file_ssl_ca_cert_default(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertIsNone(self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_file_ssl_ca_cert(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config-ssl-ca-cert.ini')
self.assertEqual("/path/to/my/cert", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_ssl_ca_cert_default(self):
if os.getenv("INFLUXDB_V2_SSL_CA_CERT"):
del os.environ["INFLUXDB_V2_SSL_CA_CERT"]
self.client = InfluxDBClient.from_env_properties()
self.assertIsNone(self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_ssl_ca_cert(self):
os.environ["INFLUXDB_V2_SSL_CA_CERT"] = "/my/custom/path/to/cert"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual("/my/custom/path/to/cert", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_connection_pool_maxsize(self):
os.environ["INFLUXDB_V2_CONNECTION_POOL_MAXSIZE"] = "29"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual(29, self.client.api_client.configuration.connection_pool_maxsize)
def _start_http_server(self):
import http.server
import ssl
# Disable unverified HTTPS requests
import urllib3
urllib3.disable_warnings()
# Configure HTTP server
self.httpd = http.server.HTTPServer(('localhost', 0), ServerWithSelfSingedSSL)
self.httpd.socket = ssl.wrap_socket(self.httpd.socket, certfile=f'{os.path.dirname(__file__)}/server.pem',
server_side=True)
# Start server at background
self.httpd_thread = threading.Thread(target=self.httpd.serve_forever)
self.httpd_thread.start()
def test_write_context_manager(self):
with InfluxDBClient.from_env_properties(self.debug) as self.client:
api_client = self.client.api_client
with self.client.write_api(write_options=WriteOptions(write_type=WriteType.batching)) as write_api:
write_api_test = write_api
write_api.write(bucket="my-bucket",
record=Point("h2o_feet")
.tag("location", "coyote_creek")
.field("level water_level", 5.0))
self.assertIsNotNone(write_api._subject)
self.assertIsNotNone(write_api._disposable)
self.assertIsNone(write_api_test._subject)
self.assertIsNone(write_api_test._disposable)
self.assertIsNotNone(self.client.api_client)
self.assertIsNotNone(self.client.api_client.rest_client.pool_manager)
self.assertIsNone(api_client._pool)
self.assertIsNone(self.client.api_client)
class ServerWithSelfSingedSSL(http.server.SimpleHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self._set_headers()
response = json.dumps(
dict(name="influxdb", message="ready for queries and writes", status="pass", checks=[], version="2.0.0",
commit="abcdefgh")).encode('utf-8')
self.wfile.write(response)
|
multi_client.py | """Run the client an N processes"""
import os
from multiprocessing import Process
import single_client
from utils.helpers import str_to_int
FF_ID = str_to_int(os.environ['FIRST_FRAME_ID'])
LF_ID = str_to_int(os.environ['LAST_FRAME_ID'])
F_IDS = list(range(FF_ID, LF_ID))
PROCESSES_COUNT = str_to_int(os.environ['PROCESSES_COUNT'])
assert PROCESSES_COUNT > 0, f"The processes count must be > 0. Received {PROCESSES_COUNT=}. " \
f"Check value provided in .env file."
if __name__ == "__main__":
processes_count = PROCESSES_COUNT
processes = {}
for proc in range(processes_count):
processes[proc] = Process(target=single_client.run, args=(F_IDS, ))
for proc in range(processes_count):
processes[proc].start()
|
cli.py | # -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import platform
import re
import ssl
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from werkzeug.utils import import_string
from . import __version__
from ._compat import getargspec, itervalues, reraise, text_type
from .globals import current_app
from .helpers import get_debug_flag, get_env, get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in itervalues(module.__dict__) if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
"one.".format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
"could not call it without arguments. Use "
"\"FLASK_APP='{module}:{factory}(args)'\" to specify "
"arguments.".format(factory=attr_name, module=module.__name__)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(module=module.__name__)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if "script_info" in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from flask import Flask
match = re.match(r"^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$", app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
"expression.".format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval("({args},)".format(args=args))
except (ValueError, SyntaxError) as e:
raise NoAppException(
"Could not parse the arguments in "
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
"be called with the specified arguments.".format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from "
'"{module}:{app_name}".'.format(module=module.__name__, app_name=app_name)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
"\n\n{tb}".format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException('Could not import "{name}".'.format(name=module_name))
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
message = "Python %(python)s\n" "Flask %(flask)s\n" "Werkzeug %(werkzeug)s"
click.echo(
message
% {
"python": platform.python_version(),
"flask": __version__,
"werkzeug": werkzeug.version(),
},
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path)
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(" * Environment: {0}".format(env))
if env == "production":
click.secho(
' WARNING: This is a development server. '
'Do not use it in a production deployment.', fg='red')
click.secho(' Use a production WSGI server instead.', dim=True)
if debug is not None:
click.echo(" * Debug mode: {0}".format("on" if debug else "off"))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import OpenSSL
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires pyOpenSSL.", ctx, param
)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super(SeparatedPathType, self).convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loader",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
" are separated by '{}'.".format(os.path.pathsep)
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command():
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = "Python %s on %s\nApp: %s [%s]\nInstance: %s" % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main(as_module=False):
cli.main(prog_name="python -m flask" if as_module else None)
if __name__ == "__main__":
main(as_module=True)
|
login.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Dusan Klinec, ph4r05, 2018
import requests
import argparse
import time
import re
import os
import logging
import coloredlogs
import threading
from requests.auth import HTTPBasicAuth
from fadmin.trace_logger import Tracelogger
from fadmin import shell_ping
logger = logging.getLogger(__name__)
coloredlogs.CHROOT_FILES = []
coloredlogs.install(level=logging.INFO, use_chroot=False)
URL = 'https://fadmin.fi.muni.cz/auth/sit/wireless/login2.mpl'
FADMIN_HOST = 'fadmin.fi.muni.cz'
MUNI_HOST = 'muni.cz'
class Login(object):
def __init__(self):
self.args = None
self.password = None
self.sess = None
self.running = True
self.stop_event = threading.Event()
self.trace_logger = Tracelogger(logger)
self.auth_attempts = []
self.ip_thread = None
self.last_ip_check = None
self.ip_results = []
self.ping_thread = [None, None]
self.last_ping_time = [None, None]
self.result_window = [[], []]
self.window_length = 100
self.monitored = [FADMIN_HOST, MUNI_HOST]
self.last_connectivity_state = None
self.last_check = 0
def ip_main(self):
"""
IP check address
:return:
"""
logger.info('IP thread started %s %s %s'
% (os.getpid(), os.getppid(), threading.current_thread()))
try:
while not self.stop_event.is_set():
try:
cur_time = time.time()
self.ip_job()
self.last_ip_check = cur_time
time.sleep(2)
except Exception as e:
logger.error('Exception in IP job: %s' % e)
self.trace_logger.log(e)
time.sleep(5)
except Exception as e:
logger.error('Exception: %s' % e)
self.trace_logger.log(e)
logger.info('IP loop terminated')
def ip_job(self):
"""
IP resolve
:return:
"""
try:
res = requests.get('https://api.ipify.org?format=json', timeout=10)
res.raise_for_status()
js = res.json()
self.ip_results.append(js['ip'])
except Exception as e:
logger.info('IP exception: %s' % e)
self.trace_logger.log(e)
self.ip_results.append(None)
self.ip_results = self.ip_results[-100:]
def ping_main(self, monitored_host):
"""
Main ping diagnostic thread
:return:
"""
logger.info('Ping thread started %s %s %s %s'
% (monitored_host, os.getpid(), os.getppid(), threading.current_thread()))
try:
while not self.stop_event.is_set():
try:
cur_time = time.time()
self.ping_job(monitored_host)
self.last_ping_time[monitored_host] = cur_time
time.sleep(1)
except Exception as e:
logger.error('Exception in ping job: %s' % e)
self.trace_logger.log(e)
time.sleep(5)
except Exception as e:
logger.error('Exception: %s' % e)
self.trace_logger.log(e)
logger.info('Ping loop %s terminated' % monitored_host)
def ping_job(self, monitored_host, attempts=1, timeout=1):
"""
Ping diagnosis
:param monitored_host:
:param attempts:
:param timeout:
:return:
"""
r = shell_ping.ping(self.monitored[monitored_host], attempts)
ip, time_min, time_avg, time_max, lost = r
logger.info('Pinging %s res: %s' % (self.monitored[monitored_host], r))
# if monitored_host==0:
# succ_rtt = 1
# else:
succ_rtt = None if lost >= 100 else time_min
self.result_window[monitored_host].append(succ_rtt)
if len(self.result_window[monitored_host]) > self.window_length:
self.result_window[monitored_host].pop(0)
def has_enough_data(self):
return len(self.result_window[0]) > 4
def is_on_fi(self):
suffix = self.result_window[0][-4:]
return sum([1 for x in suffix if x is not None]) >= 2
def is_world_pingable(self):
suffix = self.result_window[1][-4:]
return sum([1 for x in suffix if x is not None]) >= 2
def is_on_fi_ip(self):
if len(self.ip_results) == 0 or self.ip_results[-1] is None:
return None
last = self.ip_results[-1]
if not last.startswith('147.251.'):
return False
parts = last.split('.')
return parts[2] in ['42', '43', '44']
def prune_attempts(self, state_changed=False):
if state_changed:
self.auth_attempts = self.auth_attempts[-10:]
else:
self.auth_attempts = self.auth_attempts[-100:] # keep last 100 records
def last_auths_in(self, num, span):
last = self.auth_attempts[-num:]
if len(last) < num:
return False
return sum([1 for x in last if x[0] > span]) == num
def curr_connectivity_state(self):
state = 0
if self.is_on_fi():
state |= 1
if self.is_world_pingable():
state |= 2
return state
def reauth_target(self):
if len(self.auth_attempts) == 0:
return 0
last = self.auth_attempts[-1]
cur_time = time.time()
if cur_time - last[0] < 1.:
return cur_time + 10
if cur_time - self.last_check < 1.:
return cur_time + 10
cur_state = self.curr_connectivity_state()
if cur_state != self.last_connectivity_state:
self.prune_attempts(True)
logger.debug('Last: %s diff %s, cur state: %s, last state: %s'
% (last, cur_time-last[0], cur_state, self.last_connectivity_state))
self.last_connectivity_state = cur_state
self.last_check = cur_time
# World is not pingable, get time to start auth.
if not self.is_world_pingable():
# If last 20 auths were within 5 minute, slow down to 5 minutes
if self.last_auths_in(20, cur_time - 3 * 60):
logger.debug('C1')
return cur_time + 3 * 60
# If last 10 auths were within 1 minute, slow down to 30 sec
if self.last_auths_in(10, cur_time - 60):
logger.debug('C2')
return cur_time + 30
# If last 2 reauths in 5 seconds:
if self.last_auths_in(2, cur_time - 5):
logger.debug('C3')
return cur_time + 2
logger.debug('C4')
return last[0] + 1
# World is pingable
if last[1]:
logger.debug('C5')
return last[0] + self.args.timeout # last auth OK
else:
if self.last_auths_in(20, cur_time - 3 * 60):
logger.debug('C6')
return cur_time + 3 * 60
if self.last_auths_in(10, cur_time - 30):
logger.debug('C7')
return cur_time + 30
if self.last_auths_in(2, cur_time - 5):
logger.debug('C8')
return cur_time + 2
logger.debug('C9')
return last[0] + 5 # last auth failed with exception
def do_auth(self):
res = self.sess.get(URL, auth=HTTPBasicAuth(self.args.user, self.password), timeout=30)
logger.debug(res.status_code)
matches = re.findall(r'"(https://.+?)"', res.text)
for m in matches:
if 'thetis' not in m:
continue
if self.args.no_ipv6 and 'ip6' in m:
continue
try:
r = self.sess.get(m, timeout=30)
logger.debug('Resp %s to %s' % (r.status_code, m))
except Exception as e:
logger.warning('Exc: %s' % e)
def process_args(self):
if self.args.user is None:
raise ValueError('Please use --user')
if self.args.key_file:
with open(self.args.key_file) as fh:
self.password = fh.read().strip()
elif self.args.key_ring:
import keyring
keyring.get_keyring()
self.password = keyring.get_password(URL, self.args.user)
else:
raise ValueError('Password not specified')
def boot(self):
self.ping_thread[0] = threading.Thread(target=self.ping_main, args=(0, ))
self.ping_thread[0].setDaemon(True)
self.ping_thread[0].start()
self.ping_thread[1] = threading.Thread(target=self.ping_main, args=(1, ))
self.ping_thread[1].setDaemon(True)
self.ping_thread[1].start()
self.ip_thread = threading.Thread(target=self.ip_main, args=())
self.ip_thread.setDaemon(True)
self.ip_thread.start()
self.sess = requests.Session()
def entry(self, args):
self.args = args
self.process_args()
self.boot()
logger.info('Starting authentication loop')
while True:
# Check whether we have enough running information to decide what to do
if not self.has_enough_data():
time.sleep(0.2)
continue
# If not on FI network, put into rest
if not self.is_on_fi():
time.sleep(0.5)
continue
# If has non-FI IP continue
if self.is_on_fi_ip() == False:
time.sleep(0.2)
continue
# Main reauth loop.
# Timing depends on the current state, whether world is pingable, whether
# the last auth was successful.
res = False
auth_called = False
try:
if time.time() >= self.reauth_target():
auth_called = True
self.do_auth()
res = True
except Exception as e:
logger.error('Exception: %s' % e)
finally:
if auth_called:
self.auth_attempts.append((time.time(), res))
self.prune_attempts()
time.sleep(0.2)
def main(self):
parser = argparse.ArgumentParser(description="FI MUNI Agent utils")
# fmt: off
parser.add_argument("--sign", dest="sign", default=None,
help="Sign the unsigned file")
parser.add_argument("--user", dest="user", default=None,
help="Username to login",)
parser.add_argument("--key-file", dest="key_file", default=None,
help="password file",)
parser.add_argument("--timeout", dest="timeout", default=60*30, type=int,
help="Request timeout",)
parser.add_argument("--debug", dest="debug", default=False, action="store_const", const=True,
help="Debugging output",)
parser.add_argument("--no-ipv6", dest="no_ipv6", default=False, action="store_const", const=True,
help="Debugging output",)
parser.add_argument("--key-ring", dest="key_ring", default=False, action="store_const", const=True,
help="Use Key ring to obtain credentials",)
parser.add_argument("--ping-check", dest="ping_check", default=False, action="store_const", const=True,
help="Ping network periodically, if problem is detected tries to reauth",)
# fmt: on
args = parser.parse_args()
if args.debug:
coloredlogs.install(level=logging.DEBUG, use_chroot=False)
self.entry(args)
def main():
l = Login()
l.main()
if __name__ == "__main__":
main()
|
darknet2.py | #!python3
'''
##############################
### Receive Video stream #####
### from Android client #######
### Use yolo to do detect ####
## (return a message to the mobile device) ##
##############################
'''
from ctypes import *
import math
import random
import os
import socket
import time
import cv2
import numpy as np
from PIL import Image
import sys
import pickle
import struct
import timeit
import time
import threading
import ctypes
# generate different colors for different classes
COLORS = np.random.uniform(0, 255, size=(80,3))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
lib = CDLL("/home/vYOLO/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE, c_int]
predict_image.restype = POINTER(c_float)
#def classify(net, meta, im):
# out = predict_image(net, im)
# res = []
# for i in range(meta.classes):
# res.append((meta.names[i], out[i]))
# res = sorted(res, key=lambda x: -x[1])
# return res
### modified ###
HOST=''
USER_PORT=9002
CTL_PORT=11112
BUFFER_SIZE = 256
QUATO = 100
num_points = 2
wait_time = 0.01
Latency = []
Count = 0
def threading_controller(controller):
global QUATO
global Latency
print ("entered controller threading.", controller)
while True:
recv_data = controller.recv(ctypes.sizeof(ctypes.c_double)*BUFFER_SIZE)
if len(recv_data)<=0: break
data = np.fromstring(recv_data, dtype=np.double)
#print(data)
QUATO = int(data[0])
print('GPU virtual resource is ' + str(QUATO))
Latency = []
while(len(Latency)<num_points): time.sleep(wait_time)
assert(len(Latency)>=num_points) #make sure there has data in the latency
send_data = np.mean(Latency[1:]) * np.ones(BUFFER_SIZE, dtype=np.double)
#try to send data, if error break
controller.sendall(send_data)
# if controller drop, then close and re-accept
controller.close()
def connect_controller():
ctl = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ctl.bind((HOST, CTL_PORT))
ctl.listen(10)
print('Controller Socket now listening')
while True:
controller, ctl_addr = ctl.accept()
print("Get new controller socket" + str(ctl_addr))
# start the thread darknet
threads = threading.Thread(target=threading_controller, args=(controller,))
threads.start()
def recv_image_from_socket(client):
buffers = b''
while len(buffers)<4:
try:
buf = client.recv(4-len(buffers))
except:
return False
buffers += buf
size, = struct.unpack('!i', buffers)
#print "receiving %d bytes" % size
recv_data = b''
while len(recv_data) < size:
try:
data = client.recv(1024)
except:
return False
recv_data += data
frame_data = recv_data[:size]
#recv_data = recv_data[size:]
imgdata = np.fromstring(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
return decimg
def detect(net, meta, image, quato, thresh=.5, hier_thresh=.5, nms=.45):
# GET C,H,W, and DATA values
img = image.transpose(2, 0, 1)
c, h, w = img.shape[0], img.shape[1], img.shape[2]
nump_data = img.ravel() / 255.0
nump_data = np.ascontiguousarray(nump_data, dtype=np.float32)
# make c_type pointer to numpy array
ptr_data = nump_data.ctypes.data_as(POINTER(c_float))
# make IMAGE data type
im = IMAGE(w=w, h=h, c=c, data=ptr_data)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im, quato)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
classid = i
calssnamess = meta.names[i].decode('UTF-8')
res.append((calssnamess, dets[j].prob[i], (b.x, b.y, b.w, b.h),classid))
res = sorted(res, key=lambda x: -x[1])
#free_image(im)
free_detections(dets, num)
return res
# display the pic after detecting
def showPicResult(r,im):
for i in range(len(r)):
x1=r[i][2][0]-r[i][2][2]/2
y1=r[i][2][1]-r[i][2][3]/2
x2=r[i][2][0]+r[i][2][2]/2
y2=r[i][2][1]+r[i][2][3]/2
color = COLORS[r[i][3]]
cv2.rectangle(im,(int(x1),int(y1)),(int(x2),int(y2)),color,2)
#putText
x3 = int(x1+5)
y3 = int(y1-10)
font = cv2.FONT_HERSHEY_SIMPLEX
text = "{}: {:.4f}".format(str(r[i][0]), float(r[i][1]))
if ((x3<=im.shape[0]) and (y3>=0)):
cv2.putText(im, text, (x3,y3), font, 0.5, color, 1,cv2.CV_AA)
else:
cv2.putText(im, text, (int(x1),int(y1+6)), font, 0.5, color, 1,cv2.CV_AA)
cv2.imshow('Detection Window', im)
cv2.waitKey(0)
#cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(target = connect_controller)
t1.setDaemon(True)
t1.start()
# detect_net = load_net(b"./cfg/yolov3-tiny.cfg", b"yolov3-tiny.weights", 0)
detect_net = load_net(b"./cfg/yolov3-416.cfg", b"yolov3.weights", 0)
# detect_net = load_net(b"./cfg/yolov3-608.cfg", b"yolov3.weights", 0)
detect_meta = load_meta(b"cfg/coco.data")
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((HOST,USER_PORT))
s.listen(10)
while True:
client,addr=s.accept()
print ("Get new user socket")
StartTime = time.time()
while True:
decimg = recv_image_from_socket(client)
if decimg is False:
print("client droped, break, waiting other clients")
break
result = detect(detect_net, detect_meta, decimg, QUATO, thresh=0.7)
Latency.append(time.time() - StartTime)
print(str(time.time() - StartTime))
#print(result)
#time.sleep(1)
str1 = '0'+'\n'
client.sendall(str1.encode())
StartTime = time.time()
# if client drop, then close and re-accept
client.close()
|
PC_Miner.py | #!/usr/bin/env python3
"""
Duino-Coin Official PC Miner 2.75 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from time import time, sleep, strptime, ctime
from hashlib import sha1
from socket import socket
from multiprocessing import Lock as thread_lock
from multiprocessing import cpu_count, current_process
from multiprocessing import Process, Manager
from threading import Thread
from datetime import datetime
from random import randint
from os import execl, mkdir, _exit
from subprocess import DEVNULL, Popen, check_call
import pip
import sys
import os
import json
import requests
from pathlib import Path
from re import sub
from random import choice
from platform import machine as osprocessor
from signal import SIGINT, signal
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from configparser import ConfigParser
configparser = ConfigParser()
def handler(signal_received, frame):
"""
Nicely handle CTRL+C exit
"""
if current_process().name == "MainProcess":
pretty_print(
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
_exit(0)
def install(package):
"""
Automatically installs python pip package and restarts the program
"""
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
import cpuinfo
except ModuleNotFoundError:
print("Cpuinfo is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install py-cpuinfo")
install("py-cpuinfo")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
class Settings:
"""
Class containing default miner and server settings
"""
ENCODING = "UTF8"
SEPARATOR = ","
VER = 2.75
DATA_DIR = "Duino-Coin PC Miner " + str(VER)
TRANSLATIONS = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
TRANSLATIONS_FILE = "/Translations.json"
SETTINGS_FILE = "/Settings.cfg"
SOC_TIMEOUT = 15
REPORT_TIME = 120
DONATE_LVL = 0
BLOCK = " ‖ "
PICK = ""
COG = " @"
if (os.name != "nt"
or bool(os.name == "nt"
and os.environ.get("WT_SESSION"))):
# Windows' cmd does not support emojis, shame!
PICK = " ⛏"
COG = " ⚙"
class Algorithms:
"""
Class containing algorithms used by the miner
For more info about the implementation refer to the Duino whitepaper:
https://github.com/revoxhere/duino-coin/blob/gh-pages/assets/whitepaper.pdf
"""
def DUCOS1(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
base_hash = sha1(last_h.encode('ascii'))
for nonce in range(100 * diff + 1):
temp_h = base_hash.copy()
temp_h.update(str(nonce).encode('ascii'))
d_res = temp_h.hexdigest()
if eff != 0:
if nonce % 5000 == 0:
sleep(eff / 100)
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
global s
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
def send(msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
"""
Fetches the best pool from the /getPool API endpoint
"""
while True:
pretty_print(" " + get_string("connection_search"),
"warning", "net0")
try:
response = requests.get(
"https://server.duinocoin.com/getPool",
timeout=5).json()
if response["success"] == True:
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(15)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
pretty_print(f"Error fetching mining node: {e}"
+ ", retrying in 15s", "error", "net0")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if os.name == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url, timeout=10)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif os.name == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url, timeout=10)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if os.name == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
elif os.name == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
if donation_level <= 0:
pretty_print(
Fore.YELLOW + get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning', 'sys0')
sleep(5)
if donation_level > 0:
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print(get_string('thanks_donation').replace("\n", "\n\t\t"),
'error', 'sys0')
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
val = str(round(val)) + " "
return val + symbol
def periodic_report(start_time, end_time,
shares, hashrate, uptime):
"""
Displays nicely formated uptime stats
"""
seconds = round(end_time - start_time)
pretty_print(get_string("periodic_mining_report")
+ Fore.RESET + Style.NORMAL
+ get_string("report_period")
+ str(seconds) + get_string("report_time")
+ get_string("report_body1")
+ str(shares) + get_string("report_body2")
+ str(round(shares/seconds, 1))
+ get_string("report_body3")
+ get_string("report_body4")
+ str(get_prefix("H/s", hashrate, 2))
+ get_string("report_body5")
+ str(int(hashrate*seconds))
+ get_string("report_body6")
+ get_string("total_mining_time")
+ str(uptime), "success")
def calculate_uptime(start_time):
"""
Returns seconds, minutes or hours passed since timestamp
"""
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string("uptime_seconds")
elif uptime == 60:
return str(round(uptime // 60)) + get_string("uptime_minute")
elif uptime >= 60:
return str(round(uptime // 60)) + get_string("uptime_minutes")
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string("uptime_hour")
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string("uptime_hours")
def pretty_print(msg: str = None,
state: str = "success",
sender: str = "sys0"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("cpu"):
bg_color = Back.YELLOW
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT + bg_color + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type,
accept, reject,
hashrate, total_hashrate,
computetime, diff, ping,
back_color):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |cpuN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
total_hashrate = get_prefix("H/s", total_hashrate, 2)
diff = get_prefix("", int(diff), 0)
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + back_color + Fore.RESET
+ " cpu" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
class Miner:
def greeting():
diff_str = get_string("net_diff_short")
if user_settings["start_diff"] == "LOW":
diff_str = get_string("low_diff_short")
elif user_settings["start_diff"] == "MEDIUM":
diff_str = get_string("medium_diff_short")
current_hour = strptime(ctime(time())).tm_hour
greeting = get_string("greeting_back")
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
print("\n" + Style.DIM + Fore.YELLOW + Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string("banner") + Style.RESET_ALL
+ Fore.MAGENTA + " (" + str(Settings.VER) + ") "
+ Fore.RESET + "2019-2021")
print(Style.DIM + Fore.YELLOW + Settings.BLOCK + Style.NORMAL
+ Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + lang.capitalize()
+ " translation: " + Fore.YELLOW
+ get_string("translation_autor"))
try:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x " + str(cpu["brand_raw"]))
except:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x threads")
if os.name == "nt" or os.name == "posix":
print(Style.DIM + Fore.YELLOW
+ Settings.BLOCK + Style.NORMAL + Fore.RESET
+ get_string("donation_level") + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["donate"]))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("algorithm")
+ Style.BRIGHT + Fore.YELLOW + user_settings["algorithm"]
+ Settings.COG + " " + diff_str)
if user_settings["identifier"] != "None":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("rig_identifier")
+ Style.BRIGHT + Fore.YELLOW + user_settings["identifier"])
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + str(greeting)
+ ", " + Style.BRIGHT + Fore.YELLOW
+ str(user_settings["username"]) + "!\n")
def preload():
"""
Creates needed directories and files for the miner
"""
global lang_file
global lang
if not Path(Settings.DATA_DIR).is_dir():
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE).is_file():
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE,
"wb") as f:
f.write(requests.get(Settings.TRANSLATIONS,
timeout=5).content)
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE, "r",
encoding=Settings.ENCODING) as file:
lang_file = json.load(file)
try:
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("fa"):
lang = "farsi"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portuguese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("sk"):
lang = "slovak"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
elif locale.startswith("ko"):
lang = "korean"
else:
lang = "english"
else:
try:
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
lang = configparser["PC Miner"]["language"]
except Exception:
lang = "english"
except Exception as e:
print("Error with lang file, falling back to english: " + str(e))
lang = "english"
def load_cfg():
"""
Loads miner settings file or starts the config tool
"""
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
print(get_string("basic_config_tool")
+ Settings.DATA_DIR
+ get_string("edit_config_file_warning")
+ "\n"
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = "pr9jamdar"
if not username:
username = choice(["revox", "Bilaboz", "JoyBed", "Connor2"])
algorithm = "DUCO-S1"
intensity = None
intensity = sub(r"\D", "",
input(Style.NORMAL +
get_string("ask_intensity") +
Style.BRIGHT))
if not intensity:
intensity = 95
elif float(intensity) > 100:
intensity = 100
elif float(intensity) < 1:
intensity = 1
threads = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_threads")
+ str(cpu_count()) + "): " + Style.BRIGHT))
if not threads:
threads = cpu_count()
if int(threads) > 8:
threads = 8
pretty_print(
Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threads) < 1:
threads = 1
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - " + get_string("low_diff")
+ "\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - " + get_string("medium_diff")
+ "\n" + Style.BRIGHT
+ "3" + Style.NORMAL + " - " + get_string("net_diff"))
start_diff = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_difficulty")
+ Style.BRIGHT))
if start_diff == "1":
start_diff = "LOW"
elif start_diff == "3":
start_diff = "NET"
else:
start_diff = "MEDIUM"
rig_id = input(Style.NORMAL + get_string("ask_rig_identifier")
+ Style.BRIGHT)
if rig_id.lower() == "y":
rig_id = str(input(Style.NORMAL + get_string("ask_rig_name")
+ Style.BRIGHT))
else:
rig_id = "None"
donation_level = '0'
if os.name == 'nt' or os.name == 'posix':
donation_level = input(Style.NORMAL
+ get_string('ask_donation_level')
+ Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
configparser["PC Miner"] = {
"username": username,
"intensity": intensity,
"threads": threads,
"start_diff": start_diff,
"donate": int(donation_level),
"identifier": rig_id,
"algorithm": algorithm,
"language": lang,
"soc_timeout": Settings.SOC_TIMEOUT,
"report_sec": Settings.REPORT_TIME,
"discord_rp": "y"}
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
return configparser["PC Miner"]
def m_connect(id, pool):
retry_counter = 0
while True:
try:
if retry_counter > 3:
pool = Client.fetch_pool()
retry_counter = 0
socket_connection = Client.connect(pool)
POOL_VER = Client.recv(5)
if id == 0:
Client.send("MOTD")
motd = Client.recv(512).replace("\n", "\n\t\t")
pretty_print("MOTD: " + Fore.RESET + Style.NORMAL
+ str(motd), "success", "net" + str(id))
if float(POOL_VER) <= Settings.VER:
pretty_print(get_string("connected") + Fore.RESET
+ Style.NORMAL +
get_string("connected_server")
+ str(POOL_VER) + ", " + pool[0] + ":"
+ str(pool[1]) + ")", "success",
"net" + str(id))
else:
pretty_print(get_string("outdated_miner")
+ str(Settings.VER) + ") -"
+ get_string("server_is_on_version")
+ str(POOL_VER) + Style.NORMAL
+ Fore.RESET +
get_string("update_warning"),
"warning", "net" + str(id))
sleep(5)
break
except:
pretty_print(get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error', 'net0')
retry_counter += 1
sleep(10)
def mine(id: int, user_settings: list,
pool: tuple,
accept: int, reject: int,
hashrate: list,
single_miner_id: str):
"""
Main section that executes the functionalities from the sections above.
"""
using_algo = get_string("using_algo")
pretty_print(get_string("mining_thread") + str(id)
+ get_string("mining_thread_starting")
+ Style.NORMAL + Fore.RESET + using_algo + Fore.YELLOW
+ str(user_settings["intensity"])
+ "% " + get_string("efficiency"),
"success", "sys"+str(id))
last_report = time()
r_shares, last_shares = 0, 0
while True:
try:
Miner.m_connect(id, pool)
while True:
try:
while True:
job_req = "JOB"
Client.send(job_req
+ Settings.SEPARATOR
+ str(user_settings["username"])
+ Settings.SEPARATOR
+ str(user_settings["start_diff"]))
job = Client.recv().split(Settings.SEPARATOR)
if len(job) == 3:
break
else:
pretty_print(
"Node message: " + str(job[1]),
"warning")
sleep(3)
while True:
time_start = time()
back_color = Back.YELLOW
eff = 0
eff_setting = int(user_settings["intensity"])
if 99 > eff_setting >= 90:
eff = 0.005
elif 90 > eff_setting >= 70:
eff = 0.1
elif 70 > eff_setting >= 50:
eff = 0.8
elif 50 > eff_setting >= 30:
eff = 1.8
elif 30 > eff_setting >= 1:
eff = 3
result = Algorithms.DUCOS1(
job[0], job[1], int(job[2]), eff)
computetime = time() - time_start
hashrate[id] = result[1]
total_hashrate = sum(hashrate.values())
while True:
Client.send(f"{result[0]}"
+ Settings.SEPARATOR
+ f"{result[1]}"
+ Settings.SEPARATOR
+ "Official PC Miner"
+ f" {Settings.VER}"
+ Settings.SEPARATOR
+ f"{user_settings['identifier']}"
+ Settings.SEPARATOR
+ Settings.SEPARATOR
+ f"{single_miner_id}")
time_start = time()
feedback = Client.recv(
).split(Settings.SEPARATOR)
ping = (time() - time_start) * 1000
if feedback[0] == "GOOD":
accept.value += 1
share_print(id, "accept",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BLOCK":
reject.value += 1
share_print(id, "block",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BAD":
reject.value += 1
share_print(id, "reject",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
if id == 0:
end_time = time()
elapsed_time = end_time - last_report
if elapsed_time >= Settings.REPORT_TIME:
r_shares = accept.value - last_shares
uptime = calculate_uptime(
mining_start_time)
periodic_report(last_report, end_time,
r_shares,
sum(hashrate.values()),
uptime)
last_report = time()
last_shares = accept.value
break
break
except Exception as e:
pretty_print(get_string("error_while_mining")
+ " " + str(e), "error", "net" + str(id))
sleep(5)
break
except Exception as e:
pass
class Discord_rp:
def connect():
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update():
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
Miner.preload()
p_list = []
mining_start_time = time()
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
accept = Manager().Value("i", 0)
reject = Manager().Value("i", 0)
hashrate = Manager().dict()
signal(SIGINT, handler)
user_settings = Miner.load_cfg()
Miner.greeting()
fastest_pool = Client.fetch_pool()
Donate.load(int(user_settings["donate"]))
Donate.start(int(user_settings["donate"]))
"""
Generate a random number that's used only to
make the wallets display one miner with many threads
instead of many separate miners clogging it up
(like it was before release 2.7.3)
"""
single_miner_id = randint(0, 2811)
threads = int(user_settings["threads"])
if threads > 8:
threads = 8
pretty_print(Style.BRIGHT
+ get_string("max_threads_notice"))
for i in range(threads):
p = Process(target=Miner.mine,
args=[i, user_settings,
fastest_pool, accept, reject,
hashrate, single_miner_id])
p_list.append(p)
p.start()
sleep(0.05)
if user_settings["discord_rp"] == 'y':
Discord_rp.connect()
for p in p_list:
p.join()
|
main.py | import warnings
warnings.simplefilter('ignore')
import serial
import sys
from multiprocessing import Value, Process
from run_webcam import openpose_start
from mapping import mapping
# プロセス1 ... 関節位置の取得
# プロセス2 ... マッピング & シリアル通信
password = str(input("Password : "))
if password != "Hello, Mr.CoBOT.":
print("INVALID INPUT")
sys.exit()
stats = Value('i',0)
p1 = Process(target=openpose_start, args=[stats])
p2 = Process(target=mapping, args=[stats])
p1.start()
p2.start()
p1.join()
p2.join()
|
coinbaseproWebSocket.py | import cbpro
from decimal import *
import logging
import sys
import dateutil.parser
import time
from multiprocessing import Process, Event
# Init logger
logger = logging.getLogger('coinbaseproTestLogger')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s - [%(filename)s:%(funcName)s:%(lineno)s]',
datefmt="%Y-%m-%d %H:%M:%S"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
class CryptoArbOrderBook(cbpro.OrderBook):
def __init__(self, maxEntryCount=10, timeLimiterSeconds=0.05,product_id='BTC-USD', log_to=None):
self.maxEntryCount = maxEntryCount
self.asksConsolidatedOld = []
self.bidsConsolidatedOld = []
self.timeLastRun = time.time()
self.timeLimiterSeconds = timeLimiterSeconds
return super().__init__(product_id=product_id, log_to=log_to)
def on_message(self, message):
super().on_message(message)
if (time.time() - self.timeLastRun) > self.timeLimiterSeconds:
try:
book = self.get_current_book()
if len(book["asks"])>0 and len(book["bids"])>0:
asksConsolidated = self.getConsolidatedOrderbook(book["asks"], reverse=False)
bidsConsolidated = self.getConsolidatedOrderbook(book["bids"], reverse=True)
print("asks:"+str(asksConsolidated)+", bids:"+str(bidsConsolidated))
if self.asksConsolidatedOld != asksConsolidated or self.bidsConsolidatedOld != bidsConsolidated:
payload = {}
payload['exchange'] = "coinbasepro"
payload['symbol'] = message['product_id'].replace('-','/')
payload['data'] = {}
payload['data']['asks'] = asksConsolidated
payload['data']['bids'] = bidsConsolidated
payload['timestamp'] = time.mktime(dateutil.parser.parse(message['time']).timetuple())
logger.info("Received " + payload['symbol'] + " prices from coinbasepro")
self.asksConsolidatedOld = asksConsolidated
self.bidsConsolidatedOld = bidsConsolidated
if book["asks"][0][0] <= book["bids"][-1][0]:
logger.error("Bid higher than ask")
except Exception as err:
logger.error("Error during message processing:" + str(err))
finally:
self.timeLastRun = time.time()
def getConsolidatedOrderbook(self, entries, reverse=False):
orderbook = []
size = Decimal(0)
sizeAccumulator = 0
if reverse:
price = entries[-1][0]
entries = reversed(entries)
else:
price = entries[0][0]
for entry in entries:
if entry[0] == price:
size += entry[1]
else:
orderbook.append([float(price), float(size)])
sizeAccumulator += size
if len(orderbook) >= self.maxEntryCount:
break
price = entry[0]
size = entry[1]
return orderbook
def CryptoArbOrderBookProcess(pair,stopProcessesEvent):
orderbook=CryptoArbOrderBook(product_id=pair.replace('/', '-'))
orderbook.start()
stopProcessesEvent.wait()
orderbook.close()
pairs = ['BCH/BTC', 'BTC/EUR', 'LTC/EUR', 'BTC/USD', 'BTC/EUR', 'ETH/USD','ETH/EUR', 'BCH/EUR', 'ETH/BTC', 'BCH/USD']
stopProcessesEvent = Event()
processes = [Process(target=CryptoArbOrderBookProcess, args=(pair,stopProcessesEvent)) for pair in pairs]
# start order books
for process in processes:
process.daemon = True
process.start()
input("Press Enter to stop")
stopProcessesEvent.set()
for process in processes:
process.join()
logger.info("coinbaseproWebsocket exited normally. Bye.")
|
coap.py | import logging.config
import random
import socket
import struct
import threading
import xml.etree.ElementTree as ElementTree
import os
import re
from coapthon import defines
from coapthon.client.helperclient import HelperClient
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.cachelayer import CacheLayer
from coapthon.layers.forwardLayer import ForwardLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.resourcelayer import ResourceLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.resources.remoteResource import RemoteResource
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
from coapthon.utils import Tree, create_logging
__author__ = 'Giacomo Tanganelli'
if not os.path.isfile("logging.conf"):
create_logging()
logger = logging.getLogger(__name__)
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
class CoAP(object):
"""
Implementation of the Reverse Proxy
"""
def __init__(self, server_address, xml_file, multicast=False, starting_mid=None, cache=False, sock=None):
"""
Initialize the Reverse Proxy.
:param server_address: Server address for incoming connections
:param xml_file: the xml file that describe remote servers
:param multicast: if the ip is a multicast address
:param starting_mid: used for testing purposes
:param cache: if a cache must be used
:param sock: if a socket has been created externally, it can be used directly
"""
self.stopped = threading.Event()
self.stopped.clear()
self.to_be_stopped = []
self.purge = threading.Thread(target=self.purge)
self.purge.start()
self._messageLayer = MessageLayer(starting_mid)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._forwardLayer = ForwardLayer(self)
self.resourceLayer = ResourceLayer(self)
self.cache_enable = cache
if self.cache_enable:
self._cacheLayer = CacheLayer(defines.REVERSE_PROXY)
else:
self._cacheLayer = None
# Resource directory
root = Resource('root', self, visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self._serializer = None
self.server_address = server_address
self.multicast = multicast
self.file_xml = xml_file
self._mapping = {}
addrinfo = socket.getaddrinfo(self.server_address[0], None)[0]
if sock is not None:
# Use given socket, could be a DTLS socket
self._socket = sock
elif self.multicast: # pragma: no cover
# Create a socket
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES, self.server_address[1]))
mreq = struct.pack("4sl", socket.inet_aton(defines.ALL_COAP_NODES), socket.INADDR_ANY)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES_IPV6, self.server_address[1]))
addrinfo_multicast = socket.getaddrinfo(defines.ALL_COAP_NODES_IPV6, 5683)[0]
group_bin = socket.inet_pton(socket.AF_INET6, addrinfo_multicast[4][0])
mreq = group_bin + struct.pack('@I', 0)
self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.server_address)
self.parse_config()
def parse_config(self):
"""
Parse the xml file with remote servers and discover resources on each found server.
"""
tree = ElementTree.parse(self.file_xml)
root = tree.getroot()
for server in root.findall('server'):
destination = server.text
name = server.get("name")
self.discover_remote(destination, name)
def discover_remote(self, destination, name):
"""
Discover resources on remote servers.
:param destination: the remote server (ip, port)
:type destination: tuple
:param name: the name of the remote server
:type name: String
"""
assert (isinstance(destination, str))
if destination.startswith("["):
split = destination.split("]", 1)
host = split[0][1:]
port = int(split[1][1:])
else:
split = destination.split(":", 1)
host = split[0]
port = int(split[1])
server = (host, port)
client = HelperClient(server)
response = client.discover()
client.stop()
self.discover_remote_results(response, name)
def discover_remote_results(self, response, name):
"""
Create a new remote server resource for each valid discover response.
:param response: the response to the discovery request
:param name: the server name
"""
host, port = response.source
if response.code == defines.Codes.CONTENT.number:
resource = Resource('server', self, visible=True, observable=False, allow_children=True)
self.add_resource(name, resource)
self._mapping[name] = (host, port)
self.parse_core_link_format(response.payload, name, (host, port))
else:
logger.error("Server: " + response.source + " isn't valid.")
def parse_core_link_format(self, link_format, base_path, remote_server):
"""
Parse discovery results.
:param link_format: the payload of the response to the discovery request
:param base_path: the base path used to create child resources discovered on the remote server
:param remote_server: the (ip, port) of the remote server
"""
while len(link_format) > 0:
pattern = "<([^>]*)>;"
result = re.match(pattern, link_format)
path = result.group(1)
path = path.split("/")
path = path[1:][0]
link_format = link_format[result.end(1) + 2:]
pattern = "([^<,])*"
result = re.match(pattern, link_format)
attributes = result.group(0)
dict_att = {}
if len(attributes) > 0:
attributes = attributes.split(";")
for att in attributes:
a = att.split("=")
if len(a) > 1:
dict_att[a[0]] = a[1]
else:
dict_att[a[0]] = a[0]
link_format = link_format[result.end(0) + 1:]
# TODO handle observing
resource = RemoteResource('server', remote_server, path, coap_server=self, visible=True, observable=False,
allow_children=True)
resource.attributes = dict_att
self.add_resource(base_path + "/" + path, resource)
logger.info(self.root.dump())
def purge(self):
"""
Clean old transactions
"""
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge()
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
self.receive_datagram((data, client_address))
except RuntimeError:
logger.exception("Exception with Executor")
self._socket.close()
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close()
def receive_datagram(self, args):
"""
Handle messages coming from the udp socket.
:param args: (data, client_address)
"""
data, client_address = args
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
self.send_datagram(rst)
return
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated,transaction completed")
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated,transaction NOT completed")
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"""
call to the cache layer to check if there's a cached response for the request
if not, call the forward layer
"""
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
logger.debug(transaction.request)
transaction = self._forwardLayer.receive_request_reverse(transaction)
logger.debug(transaction.response)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request_reverse(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retrasmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else: # pragma: no cover
logger.error("Received response from %s", message.source)
def send_datagram(self, message):
"""
Send a message through the udp socket.
:type message: Message
:param message: the message to send
"""
if not self.stopped.isSet():
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def add_resource(self, path, resource):
""""
Helper function to add resources to the resource directory during server initialization.
:param path: the path for the new created resource
:type resource: Resource
:param resource: the resource to be added
"""
assert isinstance(resource, Resource)
path = path.strip("/")
paths = path.split("/")
actual_path = ""
i = 0
for p in paths:
i += 1
actual_path += "/" + p
try:
res = self.root[actual_path]
except KeyError:
res = None
if res is None:
if len(paths) != i:
return False
resource.path = actual_path
self.root[actual_path] = resource
return True
def _start_retrasmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def _start_separate_timer(self, transaction):
"""
Start a thread to handle separate mode.
:type transaction: Transaction
:param transaction: the transaction that is in processing
:rtype : the Timer object
"""
t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,))
t.start()
return t
@staticmethod
def _stop_separate_timer(timer):
"""
Stop the separate Thread if an answer has been already provided to the client.
:param timer: The Timer object
"""
timer.cancel()
def _send_ack(self, transaction):
"""
Sends an ACK message for the request.
:param transaction: the transaction that owns the request
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.request.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.request, ack)
self.send_datagram(ack)
|
athenad.py | #!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common import android
from common.api import Api
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.loggerd.config import ROOT
from selfdrive.swaglog import cloudlog
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = os.getenv('HANDLER_THREADS', 4)
LOCAL_PORT_WHITELIST = set([8022])
dispatcher["echo"] = lambda s: s
payload_queue: Any = queue.Queue()
response_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id'])
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event)),
threading.Thread(target=ws_send, args=(ws, end_event)),
threading.Thread(target=upload_handler, args=(end_event,))
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,))
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for i, thread in enumerate(threads):
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = payload_queue.get(timeout=1)
response = JSONRPCResponseManager.handle(data, dispatcher)
response_queue.put_nowait(response)
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
response_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
while not end_event.is_set():
try:
item = upload_queue.get(timeout=1)
if item.id in cancelled_uploads:
cancelled_uploads.remove(item.id)
continue
_do_upload(item)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=10)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def reboot():
thermal_sock = messaging.sub_sock("thermal", timeout=1000)
ret = messaging.recv_one(thermal_sock)
if ret is None or ret.thermal.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
android.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time()*1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
return [item._asdict() for item in list(upload_queue.queue)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST+'/comma/id_rsa.pub'):
return None
with open(PERSIST+'/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
sim_state = android.getprop("gsm.sim.state").split(",")
network_type = android.getprop("gsm.network.type").split(',')
mcc_mnc = android.getprop("gsm.sim.operator.numeric") or None
sim_id = android.parse_service_call_string(android.service_call(['iphonesubinfo', '11']))
cell_data_state = android.parse_service_call_unpack(android.service_call(['phone', '46']), ">q")
cell_data_connected = (cell_data_state == 2)
return {
'sim_id': sim_id,
'mcc_mnc': mcc_mnc,
'network_type': network_type,
'sim_state': sim_state,
'data_connected': cell_data_connected
}
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
ssock.close()
local_sock.close()
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
def ws_recv(ws, end_event):
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
payload_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
Params().put("LastAthenaPingTime", str(int(sec_since_boot()*1e9)))
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
response = response_queue.get(timeout=1)
ws.send(response.json)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId").decode('utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
ws.settimeout(1)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
data_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from contextlib import closing
import hashlib
import multiprocessing
from multiprocessing.pool import ThreadPool
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import traceback
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
try:
import queue
except ImportError:
import Queue as queue
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Arguments:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
return tf_inspect.isgenerator(x) or isinstance(x, Sequence)
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@tf_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
@tf_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Creates an infinite generator that iterate over the Sequence.
Yields:
Sequence items.
"""
while True:
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Arguments:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@tf_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Examples:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
@abstractmethod
def is_running(self):
raise NotImplementedError
@abstractmethod
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`).
"""
raise NotImplementedError
@abstractmethod
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
Arguments:
timeout: maximum time to wait on thread.join()
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Returns:
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@tf_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.shuffle = shuffle
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = lambda seqs: multiprocessing.Pool( # pylint: disable=g-long-lambda
workers, initializer=init_pool, initargs=(seqs,))
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
six.raise_from(StopIteration(e), e)
def _send_sequence(self):
"""Send current Sequence to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@tf_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self,
generator,
use_multiprocessing=False,
wait_time=0.05,
seed=None):
self.wait_time = wait_time
self._generator = generator
if os.name is 'nt' and use_multiprocessing is True:
# On Windows, avoid **SYSTEMATIC** error in `multiprocessing`:
# `TypeError: can't pickle generator objects`
# => Suggest multithreading instead of multiprocessing on Windows
raise ValueError('Using a generator with `use_multiprocessing=True`'
' is not supported on Windows (no marshalling of'
' generators across process boundaries). Instead,'
' use single thread/process or multithreading.')
else:
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self._manager = None
self.queue = None
self.seed = seed
def _data_generator_task(self):
if self._use_multiprocessing is False:
while not self._stop_event.is_set():
with self.genlock:
try:
if (self.queue is not None and
self.queue.qsize() < self.max_queue_size):
# On all OSes, avoid **SYSTEMATIC** error
# in multithreading mode:
# `ValueError: generator already executing`
# => Serialize calls to
# infinite iterator/generator's next() function
generator_output = next(self._generator)
self.queue.put((True, generator_output))
else:
time.sleep(self.wait_time)
except StopIteration:
break
except Exception as e: # pylint: disable=broad-except
# Can't pickle tracebacks.
# As a compromise, print the traceback and pickle None instead.
if not hasattr(e, '__traceback__'):
setattr(e, '__traceback__', sys.exc_info()[2])
self.queue.put((False, e))
self._stop_event.set()
break
else:
while not self._stop_event.is_set():
try:
if (self.queue is not None and
self.queue.qsize() < self.max_queue_size):
generator_output = next(self._generator)
self.queue.put((True, generator_output))
else:
time.sleep(self.wait_time)
except StopIteration:
break
except Exception as e: # pylint: disable=broad-except
# Can't pickle tracebacks.
# As a compromise, print the traceback and pickle None instead.
traceback.print_exc()
setattr(e, '__traceback__', None)
self.queue.put((False, e))
self._stop_event.set()
break
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
try:
self.max_queue_size = max_queue_size
if self._use_multiprocessing:
self._manager = multiprocessing.Manager()
self.queue = self._manager.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
# On all OSes, avoid **SYSTEMATIC** error in multithreading mode:
# `ValueError: generator already executing`
# => Serialize calls to infinite iterator/generator's next() function
self.genlock = threading.Lock()
self.queue = queue.Queue(maxsize=max_queue_size)
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.seed)
thread = multiprocessing.Process(target=self._data_generator_task)
thread.daemon = True
if self.seed is not None:
self.seed += 1
else:
thread = threading.Thread(target=self._data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if self._use_multiprocessing:
if thread.is_alive():
thread.terminate()
else:
# The thread.is_alive() test is subject to a race condition:
# the thread could terminate right after the test and before the
# join, rendering this test meaningless -> Call thread.join()
# always, which is ok no matter what the status of the thread.
thread.join(timeout)
if self._manager:
self._manager.shutdown()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
if not self.queue.empty():
success, value = self.queue.get()
# Rethrow any exceptions found in the queue
if not success:
six.reraise(value.__class__, value, value.__traceback__)
# Yield regular values
if value is not None:
yield value
else:
all_finished = all([not thread.is_alive() for thread in self._threads])
if all_finished and self.queue.empty():
raise StopIteration()
else:
time.sleep(self.wait_time)
# Make sure to rethrow the first exception in the queue, if any
while not self.queue.empty():
success, value = self.queue.get()
if not success:
six.reraise(value.__class__, value, value.__traceback__)
|
better_pics_from_google.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os, re, io
import requests, queue, threading
from PIL import Image
# Using google script seeks better quality copies of given images.
# Parses given dir with images, seeks similar with google and if there is a similar enough pic with bigger width, script replaces/adds it.
# Requires installed findimagedupes and imagemagick
# Uploader was copied from Rast1234
# https://github.com/Rast1234/imagesearch/blob/master/imagesearch.py
##################################################
# number of threads; google might block your IP and redirect to captcha-checks if thread_count exceeds 5 or so
thread_count = 3
# timeout for connection to image's url in sec.
dtimeout = 10
# number of images to get from google; google sorts them by size, so 5 is enough
depth = 10
# similarity for findimagedupes; in percentage
similarity = 98
# True, if you want to replace old images with found ones
# otherwise found images will be named [filename]_new.[extension] and placed alongside
move = 1
# dir with images
imdir = os.path.expanduser('~/d/pic/')
# processed images will be moved to this dir; slash at the end
imdir_done = os.path.expanduser('~/d/pic2/')
# dir for temporary images; will be created with mkdir -p; slash at the end
# !!! It will be removed at the end of execution
tmpdir = '/tmp/dev/gimgs/'
def fs (line):
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(line, shell=isinstance('', str),bufsize=-1, stdin=PIPE, stdout=PIPE,stderr=subprocess.STDOUT, close_fds=True)
return p.stdout.read().decode('utf-8').strip()
def tow(filename, data):
idop = open(filename, 'w')
idop.write(data)
idop.close()
def towb(filename, data):
idop = open(filename, 'wb')
#idop.write(bytes(data, 'UTF-8'))
idop.write(data)
idop.close()
def worker(dir_num):
cur_tmpdir = tmpdir + str(dir_num) + '/'
fs('mkdir -p "' + cur_tmpdir + '"')
while 1:
try:
print(str(aicount) + '/' + str(q.qsize()) + '/' + str(threading.activeCount() - 1))
image = q.get(False)
######################################
# required input name and file name
if not os.path.isfile(image):
print(image)
continue
fileDict = {'encoded_image': (image, open(image, 'rb'))}
# submit file via multipart/form-data, other fields not required
r = requests.post(postUrl, files=fileDict, cookies=grail.cookies, headers=headers)
# get the last redirect url, thank you Wireshark!
result = r.history[-1].url
#print(result)
########################################
# getting page with link to google-images
dd = requests.get(result, headers={'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}).text
#tow('ddc' + str(dir_num) + '.htm', dd)
ll = re.findall('<span class="gl"><a href="(.*?)">Все размеры</a>', dd)
#print(ll)
try:
ll = ll[0].replace('&', '&')
except:
if 'Изображения других размеров не найдены' in dd:
fs('mv "' + image + '" "' + image.replace(imdir, imdir_done) + '"')
continue
elif 'This page appears when Google automatically detects requests coming from your computer network which appear to be in violation of the' in dd:
print('CAPTCHA!!! exiting...')
break
else:
#tow('dd' + str(dir_num) + '.htm', dd)
print('pushing image back')
q.put(image)
# getting page with links to images
dd = requests.get('https://www.google.ru' + ll, headers={'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}).text
#tow('ddc' + str(dir_num) + '.htm', dd)
links = re.findall('"ou":"(.*?)"', dd)
#print(links)
# download images
cur_n = 1
for url in links:
try:
dd = requests.get(url, timeout = dtimeout).content
Image.open(io.BytesIO(dd)).verify()
towb(cur_tmpdir + url.split('/')[-1], dd)
if depth > cur_n:
cur_n += 1
else:
break
except:
pass
best_size = 0
for img in fs('ls ' + cur_tmpdir).split('\n'):
s_image = fs('identify -format "%w" "' + image + '"')
#print(s_image)
dupe = fs('findimagedupes -q -t ' + str(similarity) + '% -i \'VIEW(){ identify -format "%w" "$1"; echo ---$1; identify -format "%w" "$2"; echo ---$2; }\' -- "' + image + '" "' + cur_tmpdir + img + '"').replace(s_image + '---' + image, '').strip().split('---')
if len(dupe) == 2:
if int(dupe[0]) > best_size:
best_size = int(dupe[0])
best_image = dupe[1]
if best_size > int(s_image)*1.05:
print(s_image + ' -> ' + str(best_size) + ' ' + image.replace(imdir, ''))
if move:
if not os.path.isfile(image.replace(imdir, imdir_done)):
fs('mv "' + best_image + '" "' + image.replace(imdir, imdir_done) + '"')
fs('rm "' + image + '"')
else:
if not os.path.isfile(image.replace(imdir, imdir_done).rsplit('.',1)[0] + '_new.' + image.rsplit('.',1)[1]):
fs('mv "' + best_image + '" "' + image.replace(imdir, imdir_done).rsplit('.',1)[0] + '_new.' + image.rsplit('.',1)[1] + '"')
fs('mv "' + image + '" "' + image.replace(imdir, imdir_done) + '"')
else:
if not os.path.isfile(image.replace(imdir, imdir_done)):
fs('mv "' + image + '" "' + image.replace(imdir, imdir_done) + '"')
fs('rm ' + cur_tmpdir + '*')
except queue.Empty:
fs('rmdir ' + cur_tmpdir)
break
except:
print(' Some Error!!!')
############################
url = "https://www.google.ru/imghp"
postUrl = "https://www.google.ru/searchbyimage/upload"
headers = {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) \\'
'Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1',
'origin': 'https://www.google.ru',
'referer': 'https://www.google.ru/imghp'
}
# crusade for cookies
grail = requests.get(url, headers = headers)
############################
fs('mkdir -p "' + imdir_done + '"')
q = queue.Queue()
for image in fs('ls "' + imdir + '"').split('\n'):
image = imdir + image
q.put(image)
aicount = q.qsize()
for i in range(thread_count):
t = threading.Thread(target=worker, args = (i,))
t.start()
fs('rmdir ' + tmpdir)
|
__init__.py | import os
import pathlib
import subprocess
from pathlib import Path
from queue import Queue
from threading import Thread
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from uvicorn import Config, Server
from jina import __version__, __resources_path__
from jina.logging.logger import JinaLogger
from .excepts import (
RequestValidationError,
Runtime400Exception,
daemon_runtime_exception_handler,
validation_exception_handler,
)
from .parser import get_main_parser, _get_run_args
jinad_args = get_main_parser().parse_args([])
daemon_logger = JinaLogger('DAEMON', **vars(jinad_args))
__task_queue__ = Queue()
__root_workspace__ = jinad_args.workspace
__rootdir__ = str(Path(__file__).parent.parent.absolute())
__dockerfiles__ = str(Path(__file__).parent.absolute() / 'Dockerfiles')
def _get_app(mode=None):
from .api.endpoints import router
app = FastAPI(
title='JinaD (Daemon)',
description='REST interface for managing distributed Jina',
version=__version__,
openapi_tags=[
{
'name': 'daemon',
'description': 'API to manage the Daemon',
},
],
)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
app.include_router(router)
app.add_exception_handler(Runtime400Exception, daemon_runtime_exception_handler)
app.add_exception_handler(RequestValidationError, validation_exception_handler)
if mode is None:
from .api.endpoints import flows, pods, peas, logs, workspaces
app.include_router(logs.router)
app.include_router(peas.router)
app.include_router(pods.router)
app.include_router(flows.router)
app.include_router(workspaces.router)
app.openapi_tags.extend(
[
{
'name': 'flows',
'description': 'API to manage Flows',
},
{
'name': 'pods',
'description': 'API to manage Pods',
},
{
'name': 'peas',
'description': 'API to manage Peas',
},
{
'name': 'logs',
'description': 'API to stream Logs',
},
{
'name': 'workspaces',
'description': 'API to manage Workspaces',
},
]
)
elif mode == 'pod':
from .api.endpoints.partial import pod
app.include_router(pod.router)
app.openapi_tags.append(
{
'name': 'pod',
'description': 'API to manage a Pod',
}
)
elif mode == 'pea':
from .api.endpoints.partial import pea
app.include_router(pea.router)
app.openapi_tags.append(
{
'name': 'pea',
'description': 'API to manage a Pea',
},
)
elif mode == 'flow':
from .api.endpoints.partial import flow
app.include_router(flow.router)
app.openapi_tags.append(
{
'name': 'flow',
'description': 'API to manage a Flow',
}
)
return app
def _start_uvicorn(app: 'FastAPI'):
config = Config(
app=app,
host=jinad_args.host,
port=jinad_args.port_expose,
loop='uvloop',
log_level='error',
)
server = Server(config=config)
server.run()
from jina import __stop_msg__
daemon_logger.success(__stop_msg__)
def _start_fluentd():
daemon_logger.info('starting fluentd...')
cfg = os.path.join(__resources_path__, 'fluent.conf')
try:
fluentd_proc = subprocess.Popen(
['fluentd', '-c', cfg],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=0,
universal_newlines=True,
)
for line in fluentd_proc.stdout:
daemon_logger.debug(f'fluentd: {line.strip()}')
except FileNotFoundError:
daemon_logger.warning('fluentd not found locally, jinad cannot stream logs!')
jinad_args.no_fluentd = True
def _start_consumer():
from .tasks import ConsumerThread
ConsumerThread().start()
def _update_default_args():
global jinad_args, __root_workspace__
jinad_args = _get_run_args()
__root_workspace__ = '/workspace' if jinad_args.mode else jinad_args.workspace
def main():
"""Entrypoint for jinad"""
_update_default_args()
pathlib.Path(__root_workspace__).mkdir(parents=True, exist_ok=True)
if not jinad_args.no_fluentd:
Thread(target=_start_fluentd, daemon=True).start()
_start_consumer()
_start_uvicorn(app=_get_app(mode=jinad_args.mode))
|
gaussian_device_manager.py | import cupy as cp
import numpy as np
import sys
from threading import Event, Thread
from typing import List, Tuple, Callable
from gpucsl.pc.helpers import transform_to_pmax_cupy, postprocess_pmax_cupy, timed
from gpucsl.pc.kernel_management import Kernels
WorkerFunction = Callable[
[int, "GaussianDeviceManager"], None
] # GaussianDeviceManager is not defined at that point of time
# Wrapper to measure the initialization time of GaussianDeviceManager
@timed
def create_gaussian_device_manager(
skeleton: np.ndarray,
correlation_matrix: np.ndarray,
thresholds: List[float],
num_observations: int,
max_level: int,
devices: List[int],
sync_device: int = None,
kernels: Kernels = None,
is_debug: bool = False,
should_log: bool = False,
):
return GaussianDeviceManager(
skeleton,
correlation_matrix,
thresholds,
num_observations,
max_level,
devices,
sync_device,
kernels,
is_debug,
should_log,
)
# GaussianDeviceManager:
# - allocates ressources on the corresponding devices
# - compiles kernels on the corresponding devices
# - performs synchronization in the multi-GPU setting
# sync_device: device, where P2P synchronization is done in multi-GPU case; ignored in a single GPU setting
class GaussianDeviceManager:
def __init__(
self,
skeleton: np.ndarray,
correlation_matrix: np.ndarray,
thresholds: List[float],
num_observations: int,
max_level: int,
devices: List[int],
sync_device: int = None,
kernels: Kernels = None,
is_debug: bool = False,
should_log: bool = False,
):
self.sync_device = sync_device
self.devices = devices
self.n_devices = len(devices)
self.max_level = max_level
self.variable_count = skeleton.shape[0]
self.num_observations = num_observations
self.thresholds = thresholds
self.is_debug = is_debug
self.should_log = should_log
self.multi_gpu_execution = self.n_devices > 1
self._initialize_sync_device_index_ressources()
self._initialize_device_ressources()
self._fill_device_ressources(skeleton, correlation_matrix)
self.kernels = kernels
self._initialize_kernels()
def _initialize_sync_device_index_ressources(self):
self.sync_device_index = (
self.devices.index(self.sync_device) if self.sync_device is not None else 0
)
assert (self.sync_device_index >= 0) and (
self.sync_device_index < self.n_devices
), "Invalid sync_device_index value"
self.remaining_device_indexes = list(range(0, self.n_devices))
self.remaining_device_indexes.remove(self.sync_device_index)
def _initialize_device_ressources(self):
self.d_skeletons = [None for _ in range(self.n_devices)]
self.d_compacted_skeletons = [None for _ in range(self.n_devices)]
self.d_correlation_matrices = [None for _ in range(self.n_devices)]
self.d_seperation_sets_array = [None for _ in range(self.n_devices)]
self.d_zmins = [None for _ in range(self.n_devices)]
self.device_streams = [None for _ in range(self.n_devices)]
self.stop_flags = [False for _ in range(self.n_devices)]
self.ready_events = [Event() for _ in range(self.n_devices)]
self.main_events = [Event() for _ in range(self.n_devices)]
def _fill_device_ressources(
self, skeleton: np.ndarray, correlation_matrix: np.ndarray
):
for device_index, device in enumerate(self.devices):
with cp.cuda.Device(device):
self.device_streams[device_index] = cp.cuda.Stream()
with self.device_streams[device_index]:
self.d_correlation_matrices[device_index] = cp.asarray(
correlation_matrix
)
d_skeleton = cp.asarray(skeleton)
self.d_skeletons[device_index] = d_skeleton
self.d_compacted_skeletons[device_index] = d_skeleton.astype(
np.int32, copy=True
)
self.d_seperation_sets_array[device_index] = cp.full(
self.variable_count * self.variable_count * self.max_level,
-1,
np.int32,
)
self.d_zmins[device_index] = cp.full(
(self.variable_count, self.variable_count),
sys.float_info.max,
np.float32,
)
self.sync_streams()
def _initialize_kernels(self):
if self.kernels is not None:
assert len(self.kernels) == self.n_devices
else:
self.kernels = [None] * self.n_devices
self._compile_kernels(compile_kernels)
def _compile_kernels(self, compile_function: Callable):
threads = [
Thread(target=compile_function, args=(device_index, self))
for device_index in range(self.n_devices)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def sync_streams(self):
for stream in self.device_streams:
stream.synchronize()
def get_static_data(self):
return (
self.variable_count,
self.max_level,
self.thresholds,
self.num_observations,
self.stop_flags,
self.n_devices,
self.devices,
)
def get_data_for_device_index(self, device_index: int):
return (
self.d_skeletons[device_index],
self.d_compacted_skeletons[device_index],
self.d_correlation_matrices[device_index],
self.d_zmins[device_index],
self.d_seperation_sets_array[device_index],
self.device_streams[device_index],
self.ready_events[device_index],
self.main_events[device_index],
self.kernels[device_index],
)
def get_skeleton_for_device_index(self, device_index: int):
return self.d_skeletons[device_index]
@timed
def compute_skeleton(self, worker_function: WorkerFunction) -> np.ndarray:
if self.multi_gpu_execution:
final_skeleton = self.execute_ci_workers_in_parallel(worker_function)
else:
worker_function(0, self)
final_skeleton = self.get_skeleton_for_device_index(0).get()
return final_skeleton
def execute_ci_workers_in_parallel(
self, worker_function: WorkerFunction
) -> np.ndarray:
threads = [
Thread(target=worker_function, args=(device_index, self))
for device_index in range(self.n_devices)
]
for thread in threads:
thread.start()
for level in range(0, self.max_level + 1):
if sum(self.stop_flags) == self.n_devices:
break
for device_index, ready_event in enumerate(self.ready_events):
if not self.stop_flags[device_index]:
ready_event.wait()
for ready_event in self.ready_events:
ready_event.clear()
d_merged_skeleton = self.synchronize_skeletons()
for main_event in self.main_events:
main_event.set()
for thread in threads:
thread.join()
final_skeleton = d_merged_skeleton.get()
return final_skeleton
def synchronize_skeletons(self) -> cp.ndarray:
d_merged_skeleton = self.merge_skeletons()
self.broadcast_merged_skeleton(d_merged_skeleton)
return d_merged_skeleton
def merge_skeletons(self) -> cp.ndarray:
d_merged_skeleton = self.d_skeletons[self.sync_device_index]
if self.multi_gpu_execution:
with cp.cuda.Device(self.devices[self.sync_device_index]):
with self.device_streams[self.sync_device_index]:
for device_index in self.remaining_device_indexes:
d_merged_skeleton = cp.minimum(
# NOTE: arrays reside on different devices; cupy automatically enables P2P access
d_merged_skeleton,
self.d_skeletons[device_index],
)
self.sync_streams()
return d_merged_skeleton
def broadcast_merged_skeleton(self, d_merged_skeleton: cp.ndarray):
assert (
self.d_skeletons[self.sync_device_index].device == d_merged_skeleton.device
)
# d_merged_skeleton already resides on the sync_device
self.d_skeletons[self.sync_device_index] = d_merged_skeleton
for device_index in self.remaining_device_indexes:
with cp.cuda.Device(self.devices[device_index]):
with self.device_streams[device_index]:
# move a copy of d_merged_skeleton to the corresponding device
self.d_skeletons[device_index] = d_merged_skeleton.copy()
def merge_zmins(self) -> cp.ndarray:
d_merged_zmin = self.d_zmins[self.sync_device_index]
if self.multi_gpu_execution:
with cp.cuda.Device(self.devices[self.sync_device_index]):
with self.device_streams[self.sync_device_index]:
for device_index in self.remaining_device_indexes:
d_merged_zmin = cp.minimum(
# NOTE: arrays reside on different devices; cupy automatically enables P2P access
d_merged_zmin,
self.d_zmins[device_index],
)
self.sync_streams()
return d_merged_zmin
# In d_zmin, only the upper right triangle is filled. This operation fills the whole
# matrix using mirroring along the diagonal.
def mirror_array(self, d_zmin: cp.ndarray) -> cp.ndarray:
return cp.minimum(d_zmin, cp.transpose(d_zmin))
def create_merge_masks(self, d_merged_zmin: cp.ndarray) -> List[cp.ndarray]:
d_merge_masks = [None for _ in range(self.n_devices)]
with cp.cuda.Device(self.devices[self.sync_device_index]):
with self.device_streams[self.sync_device_index]:
d_mirrored_merged_zmin = self.mirror_array(d_merged_zmin)
for device_index, device in enumerate(self.devices):
with cp.cuda.Device(self.devices[device_index]):
with self.device_streams[device_index]:
# NOTE: arrays reside on different devices; cupy automatically enables P2P access
d_current_mirrored_zmin = self.mirror_array(
self.d_zmins[device_index]
)
d_merge_masks[device_index] = (
d_mirrored_merged_zmin == d_current_mirrored_zmin
)
return d_merge_masks
def merge_separation_sets(self, d_merge_masks: cp.ndarray) -> np.ndarray:
# When the same edge was deleted on different devices, return the separation set with the highest pmax value
d_merged_seperation_sets = self.d_seperation_sets_array[self.sync_device_index]
with cp.cuda.Device(self.devices[self.sync_device_index]):
with self.device_streams[self.sync_device_index]:
for device_index in self.remaining_device_indexes:
d_current_separation_sets = self.d_seperation_sets_array[
device_index
]
d_current_merge_mask = d_merge_masks[device_index]
repeated_mask = cp.repeat(d_current_merge_mask, self.max_level)
d_merged_seperation_sets[repeated_mask] = d_current_separation_sets[
repeated_mask
]
self.sync_streams()
return d_merged_seperation_sets.get()
def transform_merged_zmin_to_postprocessed_pmax(
self, d_merged_zmin: cp.ndarray
) -> np.ndarray:
with cp.cuda.Device(self.devices[self.sync_device_index]):
with self.device_streams[self.sync_device_index]:
pmax = postprocess_pmax_cupy(
transform_to_pmax_cupy(d_merged_zmin, self.num_observations)
)
return pmax.get()
@timed
def get_merged_pmaxes_and_separation_sets(self) -> Tuple[np.ndarray, np.ndarray]:
if self.multi_gpu_execution:
d_merged_zmin = self.merge_zmins()
d_merge_masks = self.create_merge_masks(d_merged_zmin)
merged_separation_sets = self.merge_separation_sets(d_merge_masks)
postprocessed_merged_pmax = (
self.transform_merged_zmin_to_postprocessed_pmax(d_merged_zmin)
)
else:
postprocessed_merged_pmax = (
self.transform_merged_zmin_to_postprocessed_pmax(self.d_zmins[0])
)
merged_separation_sets = self.d_seperation_sets_array[0].get()
return postprocessed_merged_pmax, merged_separation_sets
# Precompile kernels to separate the kernel compilation and kernel execution stages
# which allows separate measurements
def compile_kernels(device_index: int, device_manager: GaussianDeviceManager):
with cp.cuda.Device(device_manager.devices[device_index]):
with device_manager.device_streams[device_index]:
device_manager.kernels[device_index] = Kernels.for_gaussian_ci(
device_manager.variable_count,
device_manager.n_devices,
device_manager.max_level,
device_manager.should_log,
device_manager.is_debug,
)
device_manager.device_streams[device_index].synchronize()
|
btmaketorrentgui.py | #!/usr/bin/env python
# Written by Bram Cohen
# modified for multitracker by John Hoffman
# see LICENSE.txt for license information
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from sys import argv, version
from BitTornado.BT1.makemetafile import make_meta_file, completedir
from threading import Event, Thread
from BitTornado.bencode import bdecode
import sys
from os import getcwd
from os.path import join, isdir
try:
from wxPython.wx import *
except:
print 'wxPython is either not installed or has not been installed properly.'
sys.exit(1)
try:
True
except:
True = 1
False = 0
wxEVT_INVOKE = wxNewEventType()
def EVT_INVOKE(win, func):
win.Connect(-1, -1, wxEVT_INVOKE, func)
class InvokeEvent(wxPyEvent):
def __init__(self, func, args, kwargs):
wxPyEvent.__init__(self)
self.SetEventType(wxEVT_INVOKE)
self.func = func
self.args = args
self.kwargs = kwargs
class DownloadInfo:
def __init__(self):
frame = wxFrame(None, -1, 'BitTorrent Torrent File Maker', size = wxSize(550, 410))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 2, rows = 2, vgap = 0, hgap = 8)
gridSizer.Add(wxStaticText(panel, -1, 'make torrent of:'))
b = wxBoxSizer(wxHORIZONTAL)
self.dirCtl = wxTextCtrl(panel, -1, '')
b.Add(self.dirCtl, 1, wxEXPAND)
# b.Add(10, 10, 0, wxEXPAND)
button = wxButton(panel, -1, 'dir', size = (30,20))
EVT_BUTTON(frame, button.GetId(), self.selectdir)
b.Add(button, 0)
button2 = wxButton(panel, -1, 'file', size = (30,20))
EVT_BUTTON(frame, button2.GetId(), self.selectfile)
b.Add(button2, 0)
gridSizer.Add(b, 0, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'announce url:'))
self.annCtl = wxTextCtrl(panel, -1, 'http://my.tracker:6969/announce')
gridSizer.Add(self.annCtl, 0, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
a = wxFlexGridSizer(cols = 1)
a.Add(wxStaticText(panel, -1, 'announce list:'))
a.Add(wxStaticText(panel, -1, ''))
abutton = wxButton(panel, -1, 'copy\nannounces\nfrom\ntorrent', size = (50,70))
EVT_BUTTON(frame, abutton.GetId(), self.announcecopy)
a.Add(abutton, 0, wxEXPAND)
gridSizer.Add(a, 0, wxEXPAND)
self.annListCtl = wxTextCtrl(panel, -1, '\n\n\n\n\n', wxPoint(-1,-1), (400,120),
wxTE_MULTILINE|wxHSCROLL|wxTE_DONTWRAP)
gridSizer.Add(self.annListCtl, -1, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
exptext = wxStaticText(panel, -1,
"a list of announces separated by commas " +
"or whitespace and on several lines -\n" +
"trackers on the same line will be tried randomly," +
"and all the trackers on one line\n" +
"will be tried before the trackers on the next line.")
exptext.SetFont(wxFont(6, wxDEFAULT, wxNORMAL, wxNORMAL, False))
gridSizer.Add(exptext)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'piece size:'))
self.piece_length = wxChoice(panel, -1,
choices = ['automatic', '2MiB', '1MiB', '512KiB', '256KiB', '128KiB', '64KiB', '32KiB'])
self.piece_length_list = [0, 21, 20, 19, 18, 17, 16, 15]
self.piece_length.SetSelection(0)
gridSizer.Add(self.piece_length)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'comment:'))
self.commentCtl = wxTextCtrl(panel, -1, '')
gridSizer.Add(self.commentCtl, 0, wxEXPAND)
gridSizer.AddGrowableCol(1)
border = wxBoxSizer(wxVERTICAL)
border.Add(gridSizer, 0, wxEXPAND | wxNORTH | wxEAST | wxWEST, 25)
b2 = wxButton(panel, -1, 'make')
# border.Add(10, 10, 1, wxEXPAND)
border.Add(b2, 0, wxALIGN_CENTER | wxSOUTH, 20)
EVT_BUTTON(frame, b2.GetId(), self.complete)
panel.SetSizer(border)
panel.SetAutoLayout(True)
# panel.DragAcceptFiles(True)
# EVT_DROP_FILES(panel, self.selectdrop)
def selectdir(self, x):
dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
if dl.ShowModal() == wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
def selectfile(self, x):
dl = wxFileDialog (self.frame, 'Choose file or directory to use', '', '', '', wxOPEN)
if dl.ShowModal() == wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
def selectdrop(self, x):
print x
list = x.m_files
self.dirCtl.SetValue(x[0])
def announcecopy(self, x):
dl = wxFileDialog (self.frame, 'Choose .torrent file to use', '', '', '*.torrent', wxOPEN)
if dl.ShowModal() == wxID_OK:
try:
h = open(dl.GetPath(), 'rb')
metainfo = bdecode(h.read())
h.close()
self.annCtl.SetValue(metainfo['announce'])
if metainfo.has_key('announce-list'):
list = []
for tier in metainfo['announce-list']:
for tracker in tier:
list += [tracker, ', ']
del list[-1]
list += ['\n']
liststring = ''
for i in list:
liststring += i
self.annListCtl.SetValue(liststring+'\n\n')
else:
self.annListCtl.SetValue('')
except:
return
def getannouncelist(self):
list = []
for t in self.annListCtl.GetValue().split('\n'):
tier = []
t = t.replace(',',' ')
for tr in t.split(' '):
if tr != '':
tier += [tr]
if len(tier)>0:
list.append(tier)
return list
def complete(self, x):
if self.dirCtl.GetValue() == '':
dlg = wxMessageDialog(self.frame, message = 'You must select a\n file or directory',
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
params = {'piece_size_pow2': self.piece_length_list[self.piece_length.GetSelection()]}
annlist = self.getannouncelist()
if len(annlist)>0:
params['real_announce_list'] = annlist
comment = self.commentCtl.GetValue()
if comment != '':
params['comment'] = comment
try:
CompleteDir(self.dirCtl.GetValue(), self.annCtl.GetValue(), params)
except:
print_exc()
from traceback import print_exc
class CompleteDir:
def __init__(self, d, a, params):
self.d = d
self.a = a
self.params = params
self.flag = Event()
self.separatetorrents = False
if isdir(d):
self.choicemade = Event()
frame = wxFrame(None, -1, 'BitTorrent make torrent', size = (1,1))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 1, vgap = 8, hgap = 8)
gridSizer.AddGrowableRow(1)
gridSizer.Add(wxStaticText(panel, -1,
'Do you want to make a separate .torrent'),0,wxALIGN_CENTER)
gridSizer.Add(wxStaticText(panel, -1,
'for every item in this directory?'),0,wxALIGN_CENTER)
gridSizer.Add(wxStaticText(panel, -1, ''))
b = wxFlexGridSizer(cols = 3, hgap = 10)
yesbut = wxButton(panel, -1, 'Yes')
def saidyes(e, self = self):
self.frame.Destroy()
self.separatetorrents = True
self.begin()
EVT_BUTTON(frame, yesbut.GetId(), saidyes)
b.Add(yesbut, 0)
nobut = wxButton(panel, -1, 'No')
def saidno(e, self = self):
self.frame.Destroy()
self.begin()
EVT_BUTTON(frame, nobut.GetId(), saidno)
b.Add(nobut, 0)
cancelbut = wxButton(panel, -1, 'Cancel')
def canceled(e, self = self):
self.frame.Destroy()
EVT_BUTTON(frame, cancelbut.GetId(), canceled)
b.Add(cancelbut, 0)
gridSizer.Add(b, 0, wxALIGN_CENTER)
border = wxBoxSizer(wxHORIZONTAL)
border.Add(gridSizer, 1, wxEXPAND | wxALL, 4)
panel.SetSizer(border)
panel.SetAutoLayout(True)
frame.Show()
border.Fit(panel)
frame.Fit()
else:
self.begin()
def begin(self):
if self.separatetorrents:
frame = wxFrame(None, -1, 'BitTorrent make directory', size = wxSize(550, 250))
else:
frame = wxFrame(None, -1, 'BitTorrent make torrent', size = wxSize(550, 250))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
if self.separatetorrents:
self.currentLabel = wxStaticText(panel, -1, 'checking file sizes')
else:
self.currentLabel = wxStaticText(panel, -1, 'building ' + self.d + '.torrent')
gridSizer.Add(self.currentLabel, 0, wxEXPAND)
self.gauge = wxGauge(panel, -1, range = 1000, style = wxGA_SMOOTH)
gridSizer.Add(self.gauge, 0, wxEXPAND)
gridSizer.Add((10, 10), 1, wxEXPAND)
self.button = wxButton(panel, -1, 'cancel')
gridSizer.Add(self.button, 0, wxALIGN_CENTER)
gridSizer.AddGrowableRow(2)
gridSizer.AddGrowableCol(0)
g2 = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
g2.Add(gridSizer, 1, wxEXPAND | wxALL, 25)
g2.AddGrowableRow(0)
g2.AddGrowableCol(0)
panel.SetSizer(g2)
panel.SetAutoLayout(True)
EVT_BUTTON(frame, self.button.GetId(), self.done)
EVT_CLOSE(frame, self.done)
EVT_INVOKE(frame, self.onInvoke)
frame.Show(True)
Thread(target = self.complete).start()
def complete(self):
try:
if self.separatetorrents:
completedir(self.d, self.a, self.params, self.flag,
self.valcallback, self.filecallback)
else:
make_meta_file(self.d, self.a, self.params, self.flag,
self.valcallback, progress_percent = 1)
if not self.flag.isSet():
self.currentLabel.SetLabel('Done!')
self.gauge.SetValue(1000)
self.button.SetLabel('Close')
self.frame.Refresh()
except (OSError, IOError), e:
self.currentLabel.SetLabel('Error!')
self.button.SetLabel('Close')
dlg = wxMessageDialog(self.frame, message = 'Error - ' + str(e),
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def valcallback(self, amount):
self.invokeLater(self.onval, [amount])
def onval(self, amount):
self.gauge.SetValue(int(amount * 1000))
def filecallback(self, f):
self.invokeLater(self.onfile, [f])
def onfile(self, f):
self.currentLabel.SetLabel('building ' + join(self.d, f) + '.torrent')
def onInvoke(self, event):
if not self.flag.isSet():
apply(event.func, event.args, event.kwargs)
def invokeLater(self, func, args = [], kwargs = {}):
if not self.flag.isSet():
wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
def done(self, event):
self.flag.set()
self.frame.Destroy()
class btWxApp(wxApp):
def OnInit(self):
d = DownloadInfo()
d.frame.Show(True)
self.SetTopWindow(d.frame)
return True
if __name__ == '__main__':
btWxApp().MainLoop()
|
daemon_test.py | import os
import json
import time
import threading
import pytest
from wampy.peers import Client
from tuxeatpi_common.cli import main_cli, set_daemon_class
from tuxeatpi_nlu_nuance.daemon import NLU
from tuxeatpi_common.message import Message
from click.testing import CliRunner
def shutdown(self):
super(NLU, self).shutdown()
class TestDaemon(object):
@classmethod
def setup_class(self):
workdir = "tests/workdir"
intents = "intents"
dialogs = "dialogs"
from unittest.mock import MagicMock
from pynuance import credentials
credentials.save_cookies = MagicMock()
from pynuance import nlu
nlu.understand_text = _fake_nlu_text2
self.nlu_daemon = NLU('nlu_test', workdir, intents, dialogs)
self.nlu_daemon.settings.language = "en_US"
self.disable = False
self.enable = False
self.speech = False
self.nlutest = False
def get_message(message, meta):
payload = json.loads(message)
self.message = payload.get("data", {}).get("arguments", {})
if meta['topic'] == "hotword.disable":
self.disable = True
elif meta['topic'] == "hotword.enable":
self.enable = True
elif meta['topic'] == "nlu_test.test":
self.nlutest = True
self.message_topic = meta['topic']
def hotword_disable():
self.disable = True
def hotword_enable():
self.enable = True
def speech_say(text):
self.speech = True
def main_loop():
time.sleep(1)
self.nlu_daemon.main_loop = main_loop
def fake_registry():
return {"nlu_test": {"state": "ALIVE"}}
self.nlu_daemon.registry.read = fake_registry
self.nlu_daemon.shutdown = shutdown
self.wamp_client = Client(realm="tuxeatpi")
self.wamp_client.start()
self.wamp_client.session._register_procedure("hotword.disable")
setattr(self.wamp_client, "hotword.disable", hotword_disable)
self.wamp_client.session._register_procedure("hotword.enable")
setattr(self.wamp_client, "hotword.enable", hotword_enable)
self.wamp_client.session._register_procedure("speech.say")
setattr(self.wamp_client, "speech.say", speech_say)
self.wamp_client.session._subscribe_to_topic(get_message, "nlu_test.test")
@classmethod
def teardown_class(self):
self.message = None
self.nlu_daemon.settings.delete("/config/global")
self.nlu_daemon.settings.delete("/config/nlu_test")
self.nlu_daemon.settings.delete()
self.nlu_daemon.registry.clear()
try: # CircleCI specific
self.nlu_daemon.shutdown(self.nlu_daemon)
except AttributeError:
pass
@pytest.mark.order1
def test_nlu(self, capsys):
t = threading.Thread(target=self.nlu_daemon.start)
t = t.start()
# time.sleep(1)
# Global
global_config = {"language": "en_US",
"nlu_engine": "fake_nlu",
}
self.nlu_daemon.settings.save(global_config, "global")
# Config
config = {"app_id": "FAKE_app_id",
"app_key": "FAKE_app_key",
"username": "USERNAME",
"password": "PASSWORD",
}
self.nlu_daemon.settings.save(config)
self.nlu_daemon.set_config(config)
time.sleep(2)
assert self.nlu_daemon.app_id == "FAKE_app_id"
assert self.nlu_daemon.app_key == "FAKE_app_key"
assert self.nlu_daemon.username == "USERNAME"
assert self.nlu_daemon.password == "PASSWORD"
from pynuance import nlu
nlu.understand_audio = _fake_nlu_text2
self.nlu_daemon.audio()
time.sleep(1)
assert self.disable == True
assert self.enable == True
nlu.understand_text = _fake_nlu_text2
self.nlu_daemon.text("What time is it ?")
time.sleep(1)
self.nlu_daemon.test()
assert self.speech == True
return
def _fake_nlu_text2(*args, **kargs):
return {'NMAS_PRFX_SESSION_ID': 'FAKE',
'NMAS_PRFX_TRANSACTION_ID': '1',
'audio_transfer_info': {'audio_id': 1,
'end_time': '20170914031931004',
'nss_server': 'nss-server:1',
'packages': [{'bytes': 640, 'time': '20170914031929988'},
{'bytes': 640, 'time': '20170914031930916'}],
'start_time': '20170914031929862'},
'cadence_regulatable_result': 'completeRecognition',
'final_response': 1,
'message': 'query_response',
'nlu_interpretation_results': {'final_response': 1,
'payload': {'diagnostic_info': {'adk_dialog_manager_status': 'undefined',
'application': 'FAKE',
'context_tag': 'general',
'ext_map_time': '0',
'fieldId': 'dm_main',
'int_map_time': '0',
'nlps_host': 'FAKE',
'nlps_ip': '172.17.70.5',
'nlps_nlu_type': 'quicknludynamic',
'nlps_profile': 'QUICKNLUDYN',
'nlps_profile_package': 'QUICKNLU',
'nlps_profile_package_version': 'FAKE',
'nlps_version': 'FAKE',
'nlu_annotator': 'FAKE',
'nlu_component_flow': 'FAKE',
'nlu_language': 'eng-USA',
'nlu_use_literal_annotator': '0',
'nlu_version': 'FAKE',
'nmaid': 'FAKE',
'qws_project_id': 'FAKE',
'third_party_delay': '2',
'timing': {'finalRespSentDelay': '106', 'intermediateRespSentDelay': '5'}},
'interpretations': [{'action': {'intent': {'confidence': 1.0,
'value': 'nlu_test__test'}},
'literal': 'What time is it'}],
'type': 'nlu-1.0'},
'payload_format': 'nlu-base',
'payload_version': '1.0',
'status': 'success'},
'prompt': '',
'result_format': 'nlu_interpretation_results',
'result_type': 'NDSP_ASR_APP_CMD',
'status_code': 0,
'transaction_id': 1}
|
ultimate.py | # -*- coding: utf-8 -*-
import os
import sys
import threading
import time
from glob import glob
import config
sys.path.append(os.path.join(sys.path[0], "../../"))
import schedule # noqa: E402
from instabot import Bot, utils # noqa: E402
bot = Bot(
comments_file=config.COMMENTS_FILE,
blacklist_file=config.BLACKLIST_FILE,
whitelist_file=config.WHITELIST_FILE,
friends_file=config.FRIENDS_FILE,
)
bot.login()
bot.logger.info("ULTIMATE script. Safe to run 24/7!")
random_user_file = utils.file(config.USERS_FILE)
random_hashtag_file = utils.file(config.HASHTAGS_FILE)
photo_captions_file = utils.file(config.PHOTO_CAPTIONS_FILE)
posted_pic_list = utils.file(config.POSTED_PICS_FILE).list
pics = sorted([os.path.basename(x) for x in glob(config.PICS_PATH + "/*.jpg")])
def stats():
bot.save_user_stats(bot.user_id)
def like_hashtags():
bot.like_hashtag(random_hashtag_file.random(), amount=700 // 24)
def like_timeline():
bot.like_timeline(amount=300 // 24)
def like_followers_from_random_user_file():
bot.like_followers(random_user_file.random(), nlikes=3)
def follow_followers():
bot.follow_followers(
random_user_file.random(), nfollows=config.NUMBER_OF_FOLLOWERS_TO_FOLLOW
)
def comment_medias():
bot.comment_medias(bot.get_timeline_medias())
def unfollow_non_followers():
bot.unfollow_non_followers(
n_to_unfollows=config.NUMBER_OF_NON_FOLLOWERS_TO_UNFOLLOW
)
def follow_users_from_hashtag_file():
bot.follow_users(bot.get_hashtag_users(random_hashtag_file.random()))
def comment_hashtag():
hashtag = random_hashtag_file.random()
bot.logger.info("Commenting on hashtag: " + hashtag)
bot.comment_hashtag(hashtag)
def upload_pictures(): # Automatically post a pic in 'pics' folder
try:
for pic in pics:
if pic in posted_pic_list:
continue
caption = photo_captions_file.random()
full_caption = caption + "\n" + config.FOLLOW_MESSAGE
bot.logger.info("Uploading pic with caption: " + caption)
bot.upload_photo(config.PICS_PATH + pic, caption=full_caption)
if bot.api.last_response.status_code != 200:
bot.logger.error("Something went wrong, read the following ->\n")
bot.logger.error(bot.api.last_response)
break
if pic not in posted_pic_list:
# After posting a pic, comment it with all the hashtags specified
# In config.PICS_HASHTAGS
posted_pic_list.append(pic)
with open("pics.txt", "a") as f:
f.write(pic + "\n")
bot.logger.info("Succesfully uploaded: " + pic)
bot.logger.info("Commenting uploaded photo with hashtags...")
medias = bot.get_your_medias()
last_photo = medias[0] # Get the last photo posted
bot.comment(last_photo, config.PICS_HASHTAGS)
break
except Exception as e:
bot.logger.error("Couldn't upload pic")
bot.logger.error(str(e))
def put_non_followers_on_blacklist(): # put non followers on blacklist
try:
bot.logger.info("Creating non-followers list")
followings = set(bot.following)
followers = set(bot.followers)
friends = bot.friends_file.set # same whitelist (just user ids)
non_followers = followings - followers - friends
for user_id in non_followers:
bot.blacklist_file.append(user_id, allow_duplicates=False)
bot.logger.info("Done.")
except Exception as e:
bot.logger.error("Couldn't update blacklist")
bot.logger.error(str(e))
def run_threaded(job_fn):
job_thread = threading.Thread(target=job_fn)
job_thread.start()
schedule.every(1).hour.do(run_threaded, stats)
schedule.every(8).hours.do(run_threaded, like_hashtags)
schedule.every(2).hours.do(run_threaded, like_timeline)
schedule.every(1).days.at("16:00").do(
run_threaded, like_followers_from_random_user_file
)
schedule.every(2).days.at("11:00").do(run_threaded, follow_followers)
schedule.every(16).hours.do(run_threaded, comment_medias)
schedule.every(1).days.at("08:00").do(run_threaded, unfollow_non_followers)
schedule.every(12).hours.do(run_threaded, follow_users_from_hashtag_file)
schedule.every(6).hours.do(run_threaded, comment_hashtag)
#schedule.every(1).days.at("21:28").do(run_threaded, upload_pictures)
schedule.every(4).days.at("07:50").do(run_threaded, put_non_followers_on_blacklist)
while True:
schedule.run_pending()
time.sleep(1)
|
common.py | """Test the helper method for writing tests."""
import asyncio
from collections import OrderedDict
from datetime import timedelta
import functools as ft
import json
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import auth, core as ha, config_entries
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers)
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform, storage, device_registry)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[entity_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[device_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name='Mock Group'):
"""Mock a group."""
kwargs = {
'name': name
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False, groups=None):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
'groups': groups or [],
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform:
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None,
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
|
__init__.py | #
# Copyright 2013-2015 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import json
import threading
from concurrent import futures
import cotyledon
import croniter
from futurist import periodics
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from oslo_utils import uuidutils
import pytz
import six
from stevedore import extension
import aodh
from aodh import coordination
from aodh import keystone_client
from aodh import messaging
from aodh import queue
from aodh import storage
from aodh.storage import models
LOG = log.getLogger(__name__)
UNKNOWN = 'insufficient data'
OK = 'ok'
ALARM = 'alarm'
OPTS = [
cfg.BoolOpt('record_history',
default=True,
help='Record alarm change events.'
),
]
@six.add_metaclass(abc.ABCMeta)
class Evaluator(object):
"""Base class for alarm rule evaluator plugins."""
def __init__(self, conf):
self.conf = conf
self.notifier = queue.AlarmNotifier(self.conf)
self.storage_conn = None
self._ks_client = None
self._alarm_change_notifier = None
@property
def ks_client(self):
if self._ks_client is None:
self._ks_client = keystone_client.get_client(self.conf)
return self._ks_client
@property
def _storage_conn(self):
if not self.storage_conn:
self.storage_conn = storage.get_connection_from_config(self.conf)
return self.storage_conn
@property
def alarm_change_notifier(self):
if not self._alarm_change_notifier:
transport = messaging.get_transport(self.conf)
self._alarm_change_notifier = messaging.get_notifier(
transport, publisher_id="aodh.evaluator")
return self._alarm_change_notifier
def _record_change(self, alarm, reason):
if not self.conf.record_history:
return
type = models.AlarmChange.STATE_TRANSITION
detail = json.dumps({'state': alarm.state,
'transition_reason': reason})
user_id, project_id = self.ks_client.user_id, self.ks_client.project_id
on_behalf_of = alarm.project_id
now = timeutils.utcnow()
severity = alarm.severity
payload = dict(event_id=uuidutils.generate_uuid(),
alarm_id=alarm.alarm_id,
type=type,
detail=detail,
user_id=user_id,
project_id=project_id,
on_behalf_of=on_behalf_of,
timestamp=now,
severity=severity)
try:
self._storage_conn.record_alarm_change(payload)
except aodh.NotImplementedError:
pass
notification = "alarm.state_transition"
self.alarm_change_notifier.info({},
notification, payload)
def _refresh(self, alarm, state, reason, reason_data, always_record=False):
"""Refresh alarm state."""
try:
previous = alarm.state
alarm.state = state
alarm.state_reason = reason
if previous != state or always_record:
LOG.info('alarm %(id)s transitioning to %(state)s because '
'%(reason)s', {'id': alarm.alarm_id,
'state': state,
'reason': reason})
try:
self._storage_conn.update_alarm(alarm)
except storage.AlarmNotFound:
LOG.warning("Skip updating this alarm's state, the"
"alarm: %s has been deleted",
alarm.alarm_id)
else:
self._record_change(alarm, reason)
self.notifier.notify(alarm, previous, reason, reason_data)
elif alarm.repeat_actions:
self.notifier.notify(alarm, previous, reason, reason_data)
except Exception:
# retry will occur naturally on the next evaluation
# cycle (unless alarm state reverts in the meantime)
LOG.exception('alarm state update failed')
@classmethod
def within_time_constraint(cls, alarm):
"""Check whether the alarm is within at least one of its time limits.
If there are none, then the answer is yes.
"""
if not alarm.time_constraints:
return True
now_utc = timeutils.utcnow().replace(tzinfo=pytz.utc)
for tc in alarm.time_constraints:
tz = pytz.timezone(tc['timezone']) if tc['timezone'] else None
now_tz = now_utc.astimezone(tz) if tz else now_utc
start_cron = croniter.croniter(tc['start'], now_tz)
if cls._is_exact_match(start_cron, now_tz):
return True
# start_cron.cur has changed in _is_exact_match(),
# croniter cannot recover properly in some corner case.
start_cron = croniter.croniter(tc['start'], now_tz)
latest_start = start_cron.get_prev(datetime.datetime)
duration = datetime.timedelta(seconds=tc['duration'])
if latest_start <= now_tz <= latest_start + duration:
return True
return False
@staticmethod
def _is_exact_match(cron, ts):
"""Handle edge in case when both parameters are equal.
Handle edge case where if the timestamp is the same as the
cron point in time to the minute, croniter returns the previous
start, not the current. We can check this by first going one
step back and then one step forward and check if we are
at the original point in time.
"""
cron.get_prev()
diff = (ts - cron.get_next(datetime.datetime)).total_seconds()
return abs(diff) < 60 # minute precision
@abc.abstractmethod
def evaluate(self, alarm):
"""Interface definition.
evaluate an alarm
alarm Alarm: an instance of the Alarm
"""
class AlarmEvaluationService(cotyledon.Service):
PARTITIONING_GROUP_NAME = "alarm_evaluator"
EVALUATOR_EXTENSIONS_NAMESPACE = "aodh.evaluator"
def __init__(self, worker_id, conf):
super(AlarmEvaluationService, self).__init__(worker_id)
self.conf = conf
ef = lambda: futures.ThreadPoolExecutor(max_workers=10)
self.periodic = periodics.PeriodicWorker.create(
[], executor_factory=ef)
self.evaluators = extension.ExtensionManager(
namespace=self.EVALUATOR_EXTENSIONS_NAMESPACE,
invoke_on_load=True,
invoke_args=(self.conf,)
)
self.storage_conn = storage.get_connection_from_config(self.conf)
self.partition_coordinator = coordination.PartitionCoordinator(
self.conf)
self.partition_coordinator.start()
self.partition_coordinator.join_group(self.PARTITIONING_GROUP_NAME)
# allow time for coordination if necessary
delay_start = self.partition_coordinator.is_active()
if self.evaluators:
@periodics.periodic(spacing=self.conf.evaluation_interval,
run_immediately=not delay_start)
def evaluate_alarms():
self._evaluate_assigned_alarms()
self.periodic.add(evaluate_alarms)
if self.partition_coordinator.is_active():
heartbeat_interval = min(self.conf.coordination.heartbeat,
self.conf.evaluation_interval / 4)
@periodics.periodic(spacing=heartbeat_interval,
run_immediately=True)
def heartbeat():
self.partition_coordinator.heartbeat()
self.periodic.add(heartbeat)
t = threading.Thread(target=self.periodic.start)
t.daemon = True
t.start()
def terminate(self):
self.periodic.stop()
self.partition_coordinator.stop()
self.periodic.wait()
def _evaluate_assigned_alarms(self):
try:
alarms = self._assigned_alarms()
LOG.info('initiating evaluation cycle on %d alarms',
len(alarms))
for alarm in alarms:
self._evaluate_alarm(alarm)
except Exception:
LOG.exception('alarm evaluation cycle failed')
def _evaluate_alarm(self, alarm):
"""Evaluate the alarms assigned to this evaluator."""
if alarm.type not in self.evaluators:
LOG.debug('skipping alarm %s: type unsupported', alarm.alarm_id)
return
LOG.debug('evaluating alarm %s', alarm.alarm_id)
try:
self.evaluators[alarm.type].obj.evaluate(alarm)
except Exception:
LOG.exception('Failed to evaluate alarm %s', alarm.alarm_id)
def _assigned_alarms(self):
# NOTE(r-mibu): The 'event' type alarms will be evaluated by the
# event-driven alarm evaluator, so this periodical evaluator skips
# those alarms.
all_alarms = self.storage_conn.get_alarms(enabled=True,
exclude=dict(type='event'))
all_alarms = list(all_alarms)
all_alarm_ids = [a.alarm_id for a in all_alarms]
selected = self.partition_coordinator.extract_my_subset(
self.PARTITIONING_GROUP_NAME, all_alarm_ids)
return list(filter(lambda a: a.alarm_id in selected, all_alarms))
|
serve.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
"""
import argparse
import os
import json
import base64
import time
from multiprocessing import Process
import sys
if sys.version_info.major == 2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
elif sys.version_info.major == 3:
from http.server import BaseHTTPRequestHandler, HTTPServer
from contextlib import closing
import socket
from paddle_serving_server.env import CONF_HOME
import signal
from paddle_serving_server.util import *
from paddle_serving_server.env_check.run import *
# web_service.py is still used by Pipeline.
def port_is_available(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
if result != 0:
return True
else:
return False
def format_gpu_to_strlist(unformatted_gpus):
gpus_strlist = []
if isinstance(unformatted_gpus, int):
gpus_strlist = [str(unformatted_gpus)]
elif isinstance(unformatted_gpus, list):
if unformatted_gpus == [""]:
gpus_strlist = ["-1"]
elif len(unformatted_gpus) == 0:
gpus_strlist = ["-1"]
else:
gpus_strlist = [str(x) for x in unformatted_gpus]
elif isinstance(unformatted_gpus, str):
if unformatted_gpus == "":
gpus_strlist = ["-1"]
else:
gpus_strlist = [unformatted_gpus]
elif unformatted_gpus == None:
gpus_strlist = ["-1"]
else:
raise ValueError("error input of set_gpus")
# check cuda visible
if "CUDA_VISIBLE_DEVICES" in os.environ:
env_gpus = os.environ["CUDA_VISIBLE_DEVICES"].split(",")
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
# op_gpu_list == ["-1"] means this op use CPU
# so don`t check cudavisible.
if op_gpu_list == ["-1"]:
continue
for ids in op_gpu_list:
if ids not in env_gpus:
print("gpu_ids is not in CUDA_VISIBLE_DEVICES.")
exit(-1)
# check gpuid is valid
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
use_gpu = False
for ids in op_gpu_list:
if int(ids) < -1:
raise ValueError("The input of gpuid error.")
if int(ids) >= 0:
use_gpu = True
if int(ids) == -1 and use_gpu:
raise ValueError("You can not use CPU and GPU in one model.")
return gpus_strlist
def is_gpu_mode(unformatted_gpus):
gpus_strlist = format_gpu_to_strlist(unformatted_gpus)
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
for ids in op_gpu_list:
if int(ids) >= 0:
return True
return False
def serve_args():
parser = argparse.ArgumentParser("serve")
parser.add_argument(
"server",
type=str,
default="start",
nargs="?",
help="stop or start PaddleServing, check running environemnt")
parser.add_argument(
"--thread",
type=int,
default=4,
help="Concurrency of server,[4,1024]",
choices=range(4, 1025))
parser.add_argument(
"--port", type=int, default=9393, help="Port of the starting gpu")
parser.add_argument(
"--device", type=str, default="cpu", help="Type of device")
parser.add_argument(
"--gpu_ids", type=str, default="", nargs="+", help="gpu ids")
parser.add_argument(
"--runtime_thread_num",
type=int,
default=0,
nargs="+",
help="Number of each op")
parser.add_argument(
"--batch_infer_size",
type=int,
default=32,
nargs="+",
help="Max batch of each op")
parser.add_argument(
"--model", type=str, default="", nargs="+", help="Model for serving")
parser.add_argument(
"--op", type=str, default="", nargs="+", help="Model for serving")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--use_mkl", default=False, action="store_true", help="Use MKL")
parser.add_argument(
"--precision",
type=str,
default="fp32",
help="precision mode(fp32, int8, fp16, bf16)")
parser.add_argument(
"--use_calib",
default=False,
action="store_true",
help="Use TensorRT Calibration")
parser.add_argument(
"--mem_optim_off",
default=False,
action="store_true",
help="Memory optimize")
parser.add_argument(
"--ir_optim", default=False, action="store_true", help="Graph optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=512 * 1024 * 1024,
help="Limit sizes of messages")
parser.add_argument(
"--use_encryption_model",
default=False,
action="store_true",
help="Use encryption model")
parser.add_argument(
"--use_trt", default=False, action="store_true", help="Use TensorRT")
parser.add_argument(
"--use_lite", default=False, action="store_true", help="Use PaddleLite")
parser.add_argument(
"--use_xpu", default=False, action="store_true", help="Use XPU")
parser.add_argument(
"--use_ascend_cl",
default=False,
action="store_true",
help="Use Ascend CL")
parser.add_argument(
"--product_name",
type=str,
default=None,
help="product_name for authentication")
parser.add_argument(
"--container_id",
type=str,
default=None,
help="container_id for authentication")
parser.add_argument(
"--gpu_multi_stream",
default=False,
action="store_true",
help="Use gpu_multi_stream")
return parser.parse_args()
def start_gpu_card_model(gpu_mode, port, args): # pylint: disable=doc-string-missing
device = "cpu"
if gpu_mode == True:
device = "gpu"
import paddle_serving_server as serving
op_maker = serving.OpMaker()
op_seq_maker = serving.OpSeqMaker()
server = serving.Server()
thread_num = args.thread
model = args.model
mem_optim = args.mem_optim_off is False
ir_optim = args.ir_optim
use_mkl = args.use_mkl
max_body_size = args.max_body_size
workdir = "{}_{}".format(args.workdir, port)
dag_list_op = []
if model == "":
print("You must specify your serving model")
exit(-1)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
# 如果通过--op GeneralDetectionOp GeneralRecOp
# 将不存在的自定义OP加入到DAG图和模型的列表中
# 并将传入顺序记录在dag_list_op中。
if args.op != "":
for single_op in args.op:
temp_str_list = single_op.split(':')
if len(temp_str_list) >= 1 and temp_str_list[0] != '':
if temp_str_list[0] not in op_maker.op_list:
op_maker.op_list.append(temp_str_list[0])
if len(temp_str_list) >= 2 and temp_str_list[1] == '0':
pass
else:
server.default_engine_types.append(temp_str_list[0])
dag_list_op.append(temp_str_list[0])
read_op = op_maker.create('GeneralReaderOp')
op_seq_maker.add_op(read_op)
#如果dag_list_op不是空,那么证明通过--op 传入了自定义OP或自定义的DAG串联关系。
#此时,根据--op 传入的顺序去组DAG串联关系
if len(dag_list_op) > 0:
for single_op in dag_list_op:
op_seq_maker.add_op(op_maker.create(single_op))
#否则,仍然按照原有方式根虎--model去串联。
else:
for idx, single_model in enumerate(model):
infer_op_name = "GeneralInferOp"
# 目前由于ocr的节点Det模型依赖于opencv的第三方库
# 只有使用ocr的时候,才会加入opencv的第三方库并编译GeneralDetectionOp
# 故此处做特殊处理,当不满足下述情况时,所添加的op默认为GeneralInferOp
# 以后可能考虑不用python脚本来生成配置
if len(model) == 2 and idx == 0 and single_model == "ocr_det_model":
infer_op_name = "GeneralDetectionOp"
else:
infer_op_name = "GeneralInferOp"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker.add_op(general_response_op)
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.use_mkl(use_mkl)
server.set_precision(args.precision)
server.set_use_calib(args.use_calib)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_max_body_size(max_body_size)
if args.use_trt and device == "gpu":
server.set_trt()
server.set_ir_optimize(True)
if args.gpu_multi_stream and device == "gpu":
server.set_gpu_multi_stream()
if args.runtime_thread_num:
server.set_runtime_thread_num(args.runtime_thread_num)
if args.batch_infer_size:
server.set_batch_infer_size(args.batch_infer_size)
if args.use_lite:
server.set_lite()
server.set_device(device)
if args.use_xpu:
server.set_xpu()
if args.use_ascend_cl:
server.set_ascend_cl()
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
if gpu_mode == True or args.use_xpu or args.use_ascend_cl:
server.set_gpuid(args.gpu_ids)
server.load_model_config(model)
server.prepare_server(
workdir=workdir,
port=port,
device=device,
use_encryption_model=args.use_encryption_model)
server.run_server()
def start_multi_card(args, serving_port=None): # pylint: disable=doc-string-missing
if serving_port == None:
serving_port = args.port
if args.use_lite:
print("run using paddle-lite.")
start_gpu_card_model(False, serving_port, args)
else:
start_gpu_card_model(is_gpu_mode(args.gpu_ids), serving_port, args)
class MainService(BaseHTTPRequestHandler):
def get_available_port(self):
default_port = 12000
for i in range(1000):
if port_is_available(default_port + i):
return default_port + i
def start_serving(self):
start_multi_card(args, serving_port)
def get_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "wb") as f:
f.write(key)
return True
def check_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "rb") as f:
cur_key = f.read()
if key != cur_key:
return False
return True
def start(self, post_data):
post_data = json.loads(post_data.decode('utf-8'))
global p_flag
if not p_flag:
if args.use_encryption_model:
print("waiting key for model")
if not self.get_key(post_data):
print("not found key in request")
return False
global serving_port
global p
serving_port = self.get_available_port()
p = Process(target=self.start_serving)
p.start()
time.sleep(3)
if p.is_alive():
p_flag = True
else:
return False
else:
if p.is_alive():
if not self.check_key(post_data):
return False
else:
return False
return True
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
if self.start(post_data):
response = {"endpoint_list": [serving_port]}
else:
response = {"message": "start serving failed"}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(response).encode())
def stop_serving(command: str, port: int=None):
'''
Stop PaddleServing by port.
Args:
command(str): stop->SIGINT, kill->SIGKILL
port(int): Default to None, kill all processes in ProcessInfo.json.
Not None, kill the specific process relating to port
Returns:
True if stop serving successfully.
False if error occured
Examples:
.. code-block:: python
stop_serving("stop", 9494)
'''
filepath = os.path.join(CONF_HOME, "ProcessInfo.json")
infoList = load_pid_file(filepath)
if infoList is False:
return False
lastInfo = infoList[-1]
for info in infoList:
storedPort = info["port"]
pid = info["pid"]
model = info["model"]
start_time = info["start_time"]
if port is not None:
if port in storedPort:
kill_stop_process_by_pid(command, pid)
infoList.remove(info)
if len(infoList):
with open(filepath, "w") as fp:
json.dump(infoList, fp)
else:
os.remove(filepath)
return True
else:
if lastInfo == info:
raise ValueError(
"Please confirm the port [%s] you specified is correct."
% port)
else:
pass
else:
kill_stop_process_by_pid(command, pid)
if lastInfo == info:
os.remove(filepath)
return True
if __name__ == "__main__":
# args.device is not used at all.
# just keep the interface.
# so --device should not be recommended at the HomePage.
args = serve_args()
if args.server == "stop" or args.server == "kill":
result = 0
if "--port" in sys.argv:
result = stop_serving(args.server, args.port)
else:
result = stop_serving(args.server)
if result == 0:
os._exit(0)
else:
os._exit(-1)
elif args.server == "check":
check_env()
os._exit(0)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
if port_is_available(args.port):
portList = [args.port]
dump_pid_file(portList, args.model)
if args.use_encryption_model:
p_flag = False
p = None
serving_port = 0
server = HTTPServer(('0.0.0.0', int(args.port)), MainService)
print(
'Starting encryption server, waiting for key from client, use <Ctrl-C> to stop'
)
server.serve_forever()
else:
start_multi_card(args)
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum_atom.bitcoin import TYPE_ADDRESS
from electrum_atom.storage import WalletStorage
from electrum_atom.wallet import Wallet
from electrum_atom.paymentrequest import InvoiceStore
from electrum_atom.util import profiler, InvalidPassword
from electrum_atom.plugin import run_hook
from electrum_atom.util import format_satoshis, format_satoshis_plain
from electrum_atom.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_atom.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_atom.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_atom.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_atom.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_atom.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum_atom/gui/kivy/data/fonts/Roboto.ttf',
'electrum_atom/gui/kivy/data/fonts/Roboto.ttf',
'electrum_atom/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum_atom/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_atom.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_atom import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == b.get_name():
self.network.follow_chain(index)
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_atom.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoinatom:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_atom.transaction import Transaction
from electrum_atom.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_atom.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum_atom/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_atom.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum_atom/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum_atom.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum_atom/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoinatom: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum Atom: Wallet not found or action needed. Launching install wizard')
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum_atom/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum_atom/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_atom.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_atom.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum_atom.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
from electrum_atom.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum_atom/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum_atom/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum_atom/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast_transaction(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg[:500] if msg else _('There was an error broadcasting the transaction.')
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
plugin.py | # DSC/Honeywell EnvisaLink 3 & 4 Alarm interface Plugin
#
# Author: Dnpwwo & Wagner Oliveira, 2019
#
"""
<plugin key="EnvisaLink" name="DSC/Honeywell Alarm via EnvisaLink" author="dnpwwo & Wagner Oliveira" version="3.0" wikilink="https://github.com/guino/Domoticz-DSCEnvisalink-Plugin" externallink="http://www.eyezon.com/?page_id=176">
<description>
<h2>EnvisaLink 3 & 4 Alarm interface for DSC/Honeywell Alarms</h2><br/>
<h3>Features</h3>
<ul style="list-style-type:square">
<li>Shows Zone and Partition status in Domoticz</li>
<li>Can be integrated with the Domoticz Security Panel to allow Arm & Disarm operations from Domoticz</li>
<li>Bypassed Zones shown with Red banner in Domoticz *DSC ONLY</li>
</ul>
<h3>Devices</h3>
<ul style="list-style-type:square">
<li>Zones - Contact device per zone that show Open/Closed status. These can be changed to 'Motion' devices in the Device Edit page and they will show On/Off (recommend setting an Off Delay otherwise activity is rarely seen in the Web UI)</li>
<li>Partition - Alert per partition that shows partition state, useful if you don't want to use the Security Panel integration or you have more than one partition.</li>
<li>Command Output - Contact device for each Command Output/Partition combination seen. The DSC only reports activation so an Off Delay must exist for the device to reset) *DSC ONLY</li>
<li>Security Panel - Optionally creates a Security Panel device that allows arming and disarming via Domoticz.</li>
<li>Alarm Selector - Device that allows arming and disarming via Domoticz. *HONEYWELL ONLY</li>
</ul>
</description>
<params>
<param field="Address" label="IP Address" width="200px" required="true" default="127.0.0.1"/>
<param field="Port" label="Port" width="30px" required="true" default="4025"/>
<param field="Password" label="Envisalink Password" width="200px" required="true" default="" password="true"/>
<param field="Mode1" label="Max. Partitions" width="50px">
<options>
<option label="1" value="1" default="true"/>
<option label="2" value="2" />
</options>
</param>
<param field="Mode2" label="Max. Zones" width="50px">
<options>
<option label="1" value="1"/>
<option label="2" value="2" />
<option label="3" value="3" />
<option label="4" value="4" />
<option label="5" value="5" />
<option label="6" value="6" default="true"/>
<option label="7" value="7" />
<option label="8" value="8" />
<option label="9" value="9" />
<option label="10" value="10" />
<option label="11" value="11" />
<option label="12" value="12" />
<option label="13" value="13" />
<option label="14" value="14" />
<option label="15" value="15" />
<option label="16" value="16"/>
<option label="17" value="17" />
<option label="18" value="18" />
<option label="19" value="19" />
<option label="20" value="20"/>
<option label="64" value="64" />
</options>
</param>
<param field="Mode3" label="Integrated Security Panel" width="75px">
<options>
<option label="True" value="True" default="True"/>
<option label="False" value="False" />
</options>
</param>
<param field="Mode4" label="Alarm Passcode" width="75px" default="" password="true" />
<param field="Mode5" label="Time Out Lost Devices" width="75px">
<options>
<option label="True" value="True" default="True"/>
<option label="False" value="False" />
</options>
</param>
<param field="Mode6" label="Debug" width="150px">
<options>
<option label="None" value="0" default="true" />
<option label="Python Only" value="2"/>
<option label="Basic Debugging" value="62"/>
<option label="Basic+Messages" value="126"/>
<option label="Connections Only" value="16"/>
<option label="Connections+Python" value="18" default="true" />
<option label="Connections+Queue" value="144"/>
<option label="All" value="-1"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
from dsc_envisalinkdefs import *
from alarm_state import AlarmState
from datetime import datetime
from time import time, sleep
import sys
import re
import json
import threading
import traceback
ZONE_BASE = 0
SECURITY_PANEL = 100
PARTITION_BASE = 100 # First partition will be this + partition number so 101
OUTPUT_BASE = 100 # Device will be number will be this + 10*partition + output. i.e partition 1 output 2 = Device 112
ARMIDX = 103
ACIDX = 104
CHIMEIDX = 105
class BasePlugin:
alarmConn = None
alarmState = None
nextConnect = 3
heartbeatInterval = 20
nextTimeSync = 0
oustandingPings = 0
HWTYPE=0
lastMsg=0
def onStart(self):
if Parameters["Mode6"] != "0":
Domoticz.Debugging(int(Parameters["Mode6"]))
DumpConfigToLog()
self.alarmState = AlarmState.get_initial_alarm_state(int(Parameters["Mode2"]), int(Parameters["Mode1"]))
self.alarmConn = Domoticz.Connection(Name="EnvisaLink", Transport="TCP/IP", Protocol="Line", Address=Parameters["Address"], Port=Parameters["Port"])
self.alarmConn.Connect()
Domoticz.Heartbeat(self.heartbeatInterval)
# Reset watchdog and start it up
self.lastMsg = time()
self.updateThread = threading.Thread(name="AlarmWatchdogThread", target=BasePlugin.handleThread, args=(self,))
self.updateThread.start()
def onConnect(self, Connection, Status, Description):
if (Status == 0):
Domoticz.Log("Connected successfully to: "+Connection.Address+":"+Connection.Port)
self.nextTimeSync = 0
else:
Domoticz.Error("Failed to connect ("+str(Status)+") to: "+Connection.Address+":"+Connection.Port)
for Key in Devices:
UpdateDevice(Key, 0, Devices[Key].sValue, 1)
def onMessage(self, Connection, Data):
global evl_ResponseTypes
strData = Data.decode("utf-8", "ignore").strip()
# Reset watchdog
self.lastMsg = time()
if (ValidChecksum(strData)):
dataoffset = 0
if re.match('\d\d:\d\d:\d\d\s', strData):
dataoffset = dataoffset + 9
code = strData[dataoffset:dataoffset+3]
data = strData[dataoffset+3:][:-2]
if code in evl_ResponseTypes:
try:
handlerFunc = getattr(self, "handle_"+evl_ResponseTypes[code]['handler'], self.notHandled)
result = handlerFunc(code, data)
self.updateDomoticz()
except AttributeError:
Domoticz.Error(str.format("No handler exists for code: {0}. Skipping.", evl_ResponseTypes[code]['handler']))
except KeyError as err:
Domoticz.Error("No handler configured for '"+str(code)+"' code.")
except TypeError as e:
Domoticz.Error("Type error: {0}".format(e))
else:
self.notHandled(code, data)
else:
# Message doesn't have valid DSC protocol checksum, so check if it matches Honeywell login
if str(strData) == "Login:":
self.HWTYPE = 1
Domoticz.Log("Honeywell System Detected!")
message = Parameters["Password"]+"\r\n"
self.alarmConn.Send(message)
# If it's Honeywell hardware forward messages to its handler
elif self.HWTYPE == 1:
self.handle_honeywell(str(strData))
else:
Domoticz.Error("EnvisaLink returned invalid message: '"+str(strData)+"'. Checksums: Calculated "+str(checkSum)+" and Original "+str(int(origChecksum,16)))
def handle_honeywell(self, data):
# If Login OK response
if data == "OK":
Domoticz.Debug("Honeywell Login OK!")
# Add special devices if not already done
if (not ACIDX in Devices):
Domoticz.Device(Name="AC Power", Unit=ACIDX, Type=244, Subtype=73, Switchtype=0, Image=9).Create()
if (not CHIMEIDX in Devices):
Domoticz.Device(Name="Chime", Unit=CHIMEIDX, Type=244, Subtype=73, Switchtype=0, Image=8).Create()
if (not ARMIDX in Devices):
Options = {"LevelActions": "||||", "LevelNames": "Disarm|Arm Away|Arm Stay|Arm Away Zero Delay|Arm Stay Zero Delay", \
"LevelOffHidden": "false", "SelectorStyle": "1"}
Domoticz.Device(Name="Arm Mode", Unit=ARMIDX, TypeName="Selector Switch", Options=Options, Image=13).Create()
# Request zone timer dump (to detect/add zones as devices)
self.alarmConn.Send("^02,$\r\n")
# Request status
self.alarmConn.Send("*\r\n")
# If Login FAILED response
elif data == "FAILED":
Domoticz.Error("Honeywell Login FAILED!")
# Nothing to do
# If it's a data update message
elif data.startswith('%'):
Domoticz.Debug("Honeywell Data message: "+data)
# If it's zone timer dump
if data.startswith('%FF'):
self.handle_hwzonedump(data)
# If it's a generic status update (sent at least every 10s)
elif data.startswith('%00'):
self.handle_hwstatus(data)
# If it's a zone status update
elif data.startswith('%01'):
self.handle_hwzoneupdate(data)
# If it's a partition status update
elif data.startswith('%02'):
self.handle_hwpartupdate(data)
# Something else we don't know about
else:
Domoticz.Debug("Honeywell unhandled data: "+data)
else:
Domoticz.Debug("Honeywell unhandled data: "+data)
def handle_hwstatus(self, data):
# Get partition for status update
part = int(data[4:6])
# Get overall status
status = int(data[7:11], 16)
# check armed status
if status & 0x8000 > 0:
# armed stay (3=with zero delay)
self.alarmState['partition'][part]['status'].update(evl_ArmModes['3' if status & 0x80 > 0 else '1']['status'])
elif status & 0x4 > 0:
# armed away (2=with zero delay)
self.alarmState['partition'][part]['status'].update(evl_ArmModes['2' if status & 0x80 > 0 else '0']['status'])
else:
# disarmed, but is it 'ready' or not
if status & 0x1000 > 0:
# Ready
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes['650']['status'])
# zones bypassed ?
if status & 0x80 > 0:
self.alarmState['partition'][part]['status']['alpha'] = "Ready with zones bypassed"
else:
# Not ready
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes['651']['status'])
# Read zone number and mark it as open and save last fault time
zone = int(data[12:14])
self.alarmState['zone'][zone]['partition'] = part
self.alarmState['zone'][zone]['last_fault'] = time()
self.alarmState['zone'][zone]['status']['open'] = True
# If we don't have a device for it
if (not ZONE_BASE+zone in Devices):
Domoticz.Device(Name="Zone "+str(ZONE_BASE+zone), Unit=ZONE_BASE+zone, Type=244, Subtype=73, Switchtype=11).Create()
# Make sure device has initial state
UpdateDevice(ZONE_BASE+zone, 1, 'Open', self.alarmState['zone'][zone]['status']['bypass'])
# Get AC status
if status & 0x8:
self.alarmState['partition'][part]['status']['ac_present'] = True
UpdateDevice(ACIDX, 1, "ON", False)
else:
self.alarmState['partition'][part]['status']['ac_present'] = False
UpdateDevice(ACIDX, 0, "OFF", False)
# Get Chime status
if status & 0x20:
self.alarmState['partition'][part]['status']['chime'] = True
UpdateDevice(CHIMEIDX, 1, "ON", False)
else:
self.alarmState['partition'][part]['status']['chime'] = False
UpdateDevice(CHIMEIDX, 0, "OFF", False)
self.updateDomoticz()
def handle_hwzoneupdate(self, data):
for zone in self.alarmState['zone']:
pos = 4+((zone//8)*2)
bit = int(data[pos:pos+2], 16) & (1<<((zone-1) % 8))
if bit > 0:
self.alarmState['zone'][zone]['last_fault'] = time()
self.alarmState['zone'][zone]['status']['open'] = True
# If we don't have a device for it
if (not ZONE_BASE+zone in Devices):
Domoticz.Device(Name="Zone "+str(ZONE_BASE+zone), Unit=ZONE_BASE+zone, Type=244, Subtype=73, Switchtype=11).Create()
else:
self.alarmState['zone'][zone]['status']['open'] = False
# Update device status
UpdateDevice(ZONE_BASE+zone, 1 if self.alarmState['zone'][zone]['status']['open'] else 0, \
'Closed' if bit == 0 else 'Open', \
self.alarmState['zone'][zone]['status']['bypass'])
def handle_hwpartupdate(self, data):
for part in self.alarmState['partition']:
status = int(data[(2+2*part):(4+2*part)])
Domoticz.Debug("partupdate "+str(part)+" status="+str(status)+" / "+data[(2+2*part):(4+2*part)])
# Set this to false (only set to true in status 9)
self.alarmState['partition'][part]['status']['alarm_in_memory'] = False;
# Ready
if status == 1 or status == 2:
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes['650']['status'])
if status == 2:
self.alarmState['partition'][part]['status']['alpha'] = "Ready with zones bypassed"
# Not Ready
elif status == 3:
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes['651']['status'])
# Armed STAY
elif status == 4:
self.alarmState['partition'][part]['status'].update(evl_ArmModes['1']['status'])
# Armed AWAY
elif status == 5:
self.alarmState['partition'][part]['status'].update(evl_ArmModes['0']['status'])
# Armed STAY Zero Delay
elif status == 6:
self.alarmState['partition'][part]['status'].update(evl_ArmModes['3']['status'])
# Exit Delay
elif status == 7:
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes['656']['status'])
# Alarm
elif status == 8:
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes['654']['status'])
# Ready with alarm in memory
elif status == 9:
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes['650']['status'])
self.alarmState['partition'][part]['status']['alpha'] = "Ready Alarm in Memory"
self.alarmState['partition'][part]['status']['alarm_in_memory'] = True;
# Armed AWAY Zero Delay
elif status == 10:
self.alarmState['partition'][part]['status'].update(evl_ArmModes['2']['status'])
# If the partition is ready
if self.alarmState['partition'][part]['status']['ready']:
# For each zone we know to be in this partition set status to false
for zone in self.alarmState['zone']:
if self.alarmState['zone'][zone]['partition'] == part:
self.alarmState['zone'][zone]['status']['open'] = False
UpdateDevice(ZONE_BASE+zone, 0, 'Closed', self.alarmState['zone'][zone]['status']['bypass'])
self.updateDomoticz()
def handle_hwzonedump(self, data):
# Process timer from each zone
for zone in self.alarmState['zone']:
pos = 4 + (zone-1)*4;
# If this zone has ever been seen
if data[pos:pos+4] != "0000":
# if it's not already added as device, add it
if (not ZONE_BASE+zone in Devices):
Domoticz.Device(Name="Zone "+str(ZONE_BASE+zone), Unit=ZONE_BASE+zone, Type=244, Subtype=73, Switchtype=11).Create()
# Read timer, set open if it's less than 60 seconds ago
timer = (0xFFFF-int(str(data[pos+2:pos+4])+str(data[pos:pos+2]), 16))*5
self.alarmState['zone'][zone]['last_fault'] = time()
self.alarmState['zone'][zone]['status']['open'] = (timer < 60)
Domoticz.Debug("zone "+str(zone)+" timer="+str(timer)+" epoch="+str(time()))
# Make sure device has initial state
UpdateDevice(ZONE_BASE+zone, 1 if self.alarmState['zone'][zone]['status']['open'] else 0, \
'Closed' if timer > 60 else 'Open', \
self.alarmState['zone'][zone]['status']['bypass'])
self.updateDomoticz()
def onSecurityEvent(self, Unit, Level, Description):
Domoticz.Status("onSecurityEvent called for Level " + str(Level) + ": Description '" + str(Description) + "', Connected: " + str(self.alarmConn.Connected()))
# Multiple events can be passed for the same action, e.g during arming 1 event when requested, 1 event after exit timer counts to 0
if (Level == 0): # Disarm
if ((self.alarmState['partition'][1]['status']['armed_stay'] == True) or (self.alarmState['partition'][1]['status']['armed_away'] == True)):
Domoticz.Status("Requesting partition Disarm")
if self.HWTYPE==1:
self.alarmConn.Send(Parameters['Mode4']+'1')
else:
self.alarmConn.Send(CreateChecksum(evl_Commands['Disarm']+'1'+Parameters["Mode4"]))
elif (Level == 1): # Arm Stay
if ((self.alarmState['partition'][1]['status']['armed_stay'] == False) and (self.alarmState['partition'][1]['status']['armed_away'] == False)):
Domoticz.Status("Requesting partition Armed Stay")
if self.HWTYPE==1:
self.alarmConn.Send(Parameters['Mode4']+'3')
else:
self.alarmConn.Send(CreateChecksum(evl_Commands['ArmStay']+'1'))
elif (Level == 2): # Arm Away
if ((self.alarmState['partition'][1]['status']['armed_stay'] == False) and (self.alarmState['partition'][1]['status']['armed_away'] == False)):
Domoticz.Status("Requesting partition Armed Away")
if self.HWTYPE==1:
self.alarmConn.Send(Parameters['Mode4']+'2')
else:
self.alarmConn.Send(CreateChecksum(evl_Commands['ArmAway']+'1'))
else:
Domoticz.Error("Security Event contains unknown data: '"+str(Level)+"' with description: '"+Description+"'")
def updateDomoticz(self):
# Sync Devices to Alarm state
for zone in self.alarmState['zone']:
# For DSC Always check/add zones as devices, For Honeywell they're added on auto-detection (during zone timer dump)
if self.HWTYPE == 0 and (not ZONE_BASE+zone in Devices):
Domoticz.Device(Name="Zone "+str(ZONE_BASE+zone), Unit=ZONE_BASE+zone, Type=244, Subtype=73, Switchtype=2).Create()
sValue = 'Closed'
if self.alarmState['zone'][zone]['status']['open']: sValue='Open'
if self.alarmState['zone'][zone]['status']['bypass']: sValue='Bypass'
if self.alarmState['zone'][zone]['status']['tamper']: sValue='Tamper'
UpdateDevice(ZONE_BASE+zone, \
1 if self.alarmState['zone'][zone]['status']['open'] else 0, \
sValue, \
self.alarmState['zone'][zone]['status']['bypass'])
for part in self.alarmState['partition']:
if (not PARTITION_BASE+part in Devices):
Domoticz.Device(Name="Partition "+str(part), Unit=PARTITION_BASE+part, TypeName='Alert').Create()
nValue = 1 if self.alarmState['partition'][part]['status']['ready'] else 2
if self.alarmState['partition'][part]['status']['trouble']: nValue=2
if self.alarmState['partition'][part]['status']['alarm']: nValue=3
UpdateDevice(PARTITION_BASE+part, nValue, \
self.alarmState['partition'][part]['status']['alpha'], \
self.alarmState['partition'][part]['status']['trouble'])
if Parameters["Mode3"] != "False":
if (not SECURITY_PANEL in Devices):
#Domoticz.Device(Name="Security Panel", Unit=SECURITY_PANEL, TypeName="Security Panel").Create()
Domoticz.Device(Name="Security Panel", Unit=SECURITY_PANEL, Type=32, Subtype=131).Create()
Domoticz.Log("Created Domoticz integrated Security Panel device for partition 1.")
nValue=0 # sStatusNormal
if self.alarmState['partition'][1]['status']['alarm']: nValue=2 # sStatusAlarm
if self.alarmState['partition'][1]['status']['armed_away']: nValue=9 # sStatusArmAway
if self.alarmState['partition'][1]['status']['armed_stay']: nValue=11 # sStatusArmHome
if self.alarmState['partition'][1]['status']['trouble']: nValue=nValue+128 # sStatusNormalTamper or sStatusAlarmTamper
UpdateDevice(SECURITY_PANEL, nValue, "", self.alarmState['partition'][1]['status']['trouble'])
if self.HWTYPE == 1:
sText = '0'
if self.alarmState['partition'][1]['status']['armed_zero_entry_delay']:
if self.alarmState['partition'][1]['status']['armed_away']: sText='30'
if self.alarmState['partition'][1]['status']['armed_stay']: sText='40'
else:
if self.alarmState['partition'][1]['status']['armed_away']: sText='10'
if self.alarmState['partition'][1]['status']['armed_stay']: sText='20'
UpdateDevice(ARMIDX, 2, sText, self.alarmState['partition'][1]['status']['trouble'])
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Debug("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level) + ", Connected: " + str(self.alarmConn.Connected()))
Command = Command.strip()
action, sep, params = Command.partition(' ')
action = action.capitalize()
# If Chime
if Unit == CHIMEIDX and self.alarmConn.Connected():
# Toggle it
self.alarmConn.Send(Parameters['Mode4']+'9')
# Update Device Status
UpdateDevice(Unit, 1 if Command == 'On' else 0, Command, False)
if Unit == ARMIDX:
# If already armed, disarm first
if self.alarmState['partition'][1]['status']['armed_away'] or self.alarmState['partition'][1]['status']['armed_stay']:
self.alarmConn.Send(Parameters['Mode4']+'1')
# Arm Away
if Level == 10:
self.alarmConn.Send(Parameters['Mode4']+'2')
# Arm Stay
elif Level == 20:
self.alarmConn.Send(Parameters['Mode4']+'3')
# Arm Away Zero Delay (Not available on ADT Safewatch pro 3000)
elif Level == 30:
self.alarmConn.Send(Parameters['Mode4']+'4')
# Arm Stay Zero Delay
elif Level == 40:
self.alarmConn.Send(Parameters['Mode4']+'7')
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Debug("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onHeartbeat(self):
if self.HWTYPE == 1:
return self.alarmConn.Connected()
try:
if (self.alarmConn.Connected()):
if (self.nextTimeSync <= 0):
now = datetime.now()
message = '{:02}{:02}{:02}{:02}{:02}'.format(now.hour, now.minute, now.month, now.day, now.year-2000)
Domoticz.Debug("Sending time synchronization command ('"+message+"')")
self.alarmConn.Send(CreateChecksum(evl_Commands['TimeSync']+message))
self.nextTimeSync = int(3600/self.heartbeatInterval) # sync time hourly
else:
if (self.oustandingPings <= 0):
self.oustandingPings = int(300/self.heartbeatInterval) # heartbeat every 5 minutes
self.alarmConn.Send(CreateChecksum(evl_Commands['KeepAlive']))
self.nextTimeSync = self.nextTimeSync - 1
self.oustandingPings = self.oustandingPings - 1
elif (self.alarmConn.Connecting() != True):
Domoticz.Log("Alarm not connected, requesting re-connect.")
self.alarmConn.Connect()
return True
except:
Domoticz.Error("Unhandled exception in onHeartbeat, forcing disconnect.")
self.alarmConn.Disconnect()
def onDisconnect(self, Connection):
Domoticz.Debug("Device has disconnected")
if Parameters["Mode5"] != "False":
for Device in Devices:
UpdateDevice(Device, Devices[Device].nValue, Devices[Device].sValue, 1)
return
def onStop(self):
Domoticz.Debug("onStop called")
self.alarmConn = None
return True
def handle_zone_state_change(self, code, data):
"""Event 601-610."""
parse = re.match('^[0-9]{3,4}$', data)
if parse:
zoneNumber = int(data[-3:])
if (zoneNumber <= int(Parameters["Mode2"])):
self.alarmState['zone'][zoneNumber]['status'].update(evl_ResponseTypes[code]['status'])
Domoticz.Debug(str.format("[zone {0}] state has updated: {1}", zoneNumber, json.dumps(evl_ResponseTypes[code]['status'])))
else:
Domoticz.Debug(str.format("[zone {0}] state change ignored, invalid zone number.", zoneNumber))
return zoneNumber
else:
Domoticz.Error("Invalid data ("+data+") has been passed in the zone update.")
def handle_zone_timer_dump(self, code, data):
parse = re.match('^[0-9A-F]{2}$', data)
try:
Domoticz.Debug(str.format("Message: '{0}' with data: {1}", evl_ResponseTypes[code]['name'], data))
except:
Domoticz.Error("zone_timer_dump error: '"+code+"' command, data: "+data)
def handle_zone_bypass_update(self, code, data):
"""Event 616, Bypassed Zones Bit field Dump."""
parse = re.match('^[0-9A-F]{2}$', data)
Domoticz.Debug(str.format("Message: '{0}' with data: {1}", evl_ResponseTypes[code]['name'], data))
allBypasses = [data[i:i+2] for i in range(0, len(data), 2)]
zoneOffset = 0
maxZone = int(Parameters["Mode2"])
for bypasses in allBypasses:
mask = int(bypasses,16)
if (zoneOffset+1 <= maxZone): self.alarmState['zone'][zoneOffset+1]['status'].update({'bypass' : (mask & 1) > 0})
if (zoneOffset+2 <= maxZone): self.alarmState['zone'][zoneOffset+2]['status'].update({'bypass' : (mask & 2) > 0})
if (zoneOffset+3 <= maxZone): self.alarmState['zone'][zoneOffset+3]['status'].update({'bypass' : (mask & 4) > 0})
if (zoneOffset+4 <= maxZone): self.alarmState['zone'][zoneOffset+4]['status'].update({'bypass' : (mask & 8) > 0})
if (zoneOffset+5 <= maxZone): self.alarmState['zone'][zoneOffset+5]['status'].update({'bypass' : (mask & 16) > 0})
if (zoneOffset+6 <= maxZone): self.alarmState['zone'][zoneOffset+6]['status'].update({'bypass' : (mask & 32) > 0})
if (zoneOffset+7 <= maxZone): self.alarmState['zone'][zoneOffset+7]['status'].update({'bypass' : (mask & 64) > 0})
if (zoneOffset+8 <= maxZone): self.alarmState['zone'][zoneOffset+8]['status'].update({'bypass' : (mask & 128) > 0})
zoneOffset = zoneOffset + 8
if (zoneOffset > maxZone): break
def handle_partition_state_change(self, code, data):
"""Event 650-674, 652 is an exception, because 2 bytes are passed for partition and zone type."""
partitionNumber = int(data[0])
if (partitionNumber <= int(Parameters["Mode1"])):
if code == '652':
parse = re.match('^[0-9]{2}$', data)
if parse:
self.alarmState['partition'][partitionNumber]['status'].update(evl_ArmModes[data[1]]['status'])
Domoticz.Debug(str.format("[partition {0}] state has updated: {1}", partitionNumber, json.dumps(evl_ArmModes[data[1]]['status'])))
return partitionNumber
else:
Domoticz.Error("Invalid data ("+data+") has been passed when arming the alarm.")
else:
parse = re.match('^[0-9]+$', data)
if parse:
self.alarmState['partition'][partitionNumber]['status'].update(evl_ResponseTypes[code]['status'])
Domoticz.Debug(str.format("[partition {0}] state has updated: {1}", partitionNumber, json.dumps(evl_ResponseTypes[code]['status'])))
'''Log the user who last armed or disarmed the alarm'''
if code == '700':
lastArmedBy = {'last_armed_by_user': int(data[1:5])}
self.alarmState['partition'][partitionNumber]['status'].update(lastArmedBy)
elif code == '750':
lastDisarmedBy = {'last_disarmed_by_user': int(data[1:5])}
self.alarmState['partition'][partitionNumber]['status'].update(lastDisarmedBy)
return partitionNumber
else:
Domoticz.Error("Invalid data ("+data+") has been passed in the partition update.")
else:
Domoticz.Debug(str.format("[partition {0}] state change ignored, invalid partition number.", partitionNumber))
def handle_keypad_led_change(self, code, data):
"""Event 510-511, detail the led state and led flash state respectively."""
parse = re.match('^[0-9A-F]{2}$', data)
flash = 'ON'
if (code == '511'):
flash = 'FLASH'
if parse:
mask = int(data,16)
for LED in evl_LedMask:
if (mask & evl_LedMask[LED]):
Domoticz.Log("Keypad LED "+flash+": "+LED)
return 1
else:
Domoticz.Error("Invalid data ("+data+") has been passed for code: '"+code+"'.")
def handle_keypad_update(self, code, data):
"""Handle general- non partition based info"""
for part in self.alarmState['partition']:
self.alarmState['partition'][part]['status'].update(evl_ResponseTypes[code]['status'])
Domoticz.Debug(str.format("[All partitions] state has updated: {0}", json.dumps(evl_ResponseTypes[code]['status'])))
def handle_verbose_status(self, code, data):
"""Event 849, This command is issued when a trouble appears on the system and roughly every 5 minutes until the trouble is cleared.."""
parse = re.match('^[0-9]{2}$', data)
if parse:
mask = int(data,16)
for trouble in evl_TroubleMask:
if (mask & evl_TroubleMask[trouble]):
Domoticz.Log("Verbose Trouble Status: "+trouble)
return 1
else:
Domoticz.Error("Invalid data ("+data+") has been passed for code: '"+code+"'.")
def handle_poll_response(self, code, data):
"""Handle command responses"""
Domoticz.Debug("'"+evl_ResponseTypes[code]['name']+"' command acknowledged.")
def handle_time_response(self, code, data):
"""Handle time responses, e.g. '2128042318'"""
parse = re.match('^[0-9]{10}$', data)
if parse:
try:
theTime = datetime.now()
theTime.replace(hour=int(data[:2]),minute=int(data[2:4]),month=int(data[4:6]),day=int(data[6:8]),year=2000+int(data[8:]))
message = '{:02}:{:02} {:02}/{:02}/{:04}'.format(theTime.hour, theTime.minute, theTime.day, theTime.month, theTime.year)
Domoticz.Debug("Received time synchronization ('"+message+"')")
except ValueError:
Domoticz.Error(str.format("Error processing time synchronization: '{0}'. Skipping.", data))
else:
Domoticz.Error("Invalid time data ("+data+") has been passed for code: '"+code+"'.")
def handle_output_pressed(self, code, data):
"""Command Output Pressed, code: '912', Data: <partition><output> e.g '11'"""
part = data[:1]
output = data[-1:]
deviceNo = OUTPUT_BASE+int(data)
if (not deviceNo in Devices):
Domoticz.Device(Name="Partition "+part+" Output "+output, Unit=deviceNo, Type=17, Subtype=0, Switchtype=9).Create()
Domoticz.Log("Created Command Output device for Partition "+part+" Output "+output)
UpdateDevice(deviceNo, 1, "On", False)
Domoticz.Log("Command output pressed for Partition "+part+" Output "+output)
def handle_system_response_error(self, code, data):
"""Handle system error responses"""
try:
Domoticz.Error(str.format("System Error: '{0}' with data: {1}", evl_ResponseTypes[code]['name'], data))
Domoticz.Error(str.format("---> Details: '{0}'", evl_Errors[data]['description'] ))
except:
Domoticz.Error("Response error not handled: '"+code+"' command, data: "+data)
def handle_command_response_error(self, code, data):
"""Handle command error responses"""
try:
Domoticz.Error(str.format("System Error: '{0}' with data: {1}", evl_ResponseTypes[code]['name'], data))
except:
Domoticz.Error("Response error not handled: '"+code+"' command, data: "+data)
def handle_message_response_error(self, code, data):
"""Handle command message responses"""
try:
Domoticz.Log(str.format("Message {0}: '{1}' with data: {2}", code, evl_ResponseTypes[code]['name'], data))
except:
Domoticz.Error("Response message not handled: '"+code+"' command, data: "+data)
def handle_login(self, command, data):
if (data == "0"):
Domoticz.Error("Login Unsuccessful.")
elif (data == "1"):
Domoticz.Debug("Login Successful.")
self.alarmConn.Send(CreateChecksum(evl_Commands['StatusReport']))
self.alarmConn.Send(CreateChecksum(evl_Commands['TimeBroadcast']), 3)
self.alarmConn.Send(CreateChecksum(evl_Commands['PartitionKeypress']+'1*1#'), 5)
self.alarmConn.Send(CreateChecksum(evl_Commands['DumpZoneTimers']), 8)
elif (data == "3"):
message = evl_Commands['Login']+Parameters["Password"]
message = CreateChecksum(message)
Domoticz.Debug("Sending Login Response.")
self.alarmConn.Send(message)
def notHandled(self, command, data):
Domoticz.Error("EnvisaLink returned unhandled message: '"+command+"', ignored. Data: '"+data+"'")
def SyncDevices(self, TimedOut):
# Make sure that the Domoticz devices are in sync (by definition, the device is connected)
if (1 in Devices):
UpdateDevice(1, self.playerState, self.mediaDescrption, TimedOut)
if (2 in Devices):
if (Devices[2].nValue != self.mediaLevel) or (Devices[2].TimedOut != TimedOut):
UpdateDevice(2, self.mediaLevel, str(self.mediaLevel), TimedOut)
if (4 in Devices):
if (self.playerState == 4) or (self.playerState == 5):
UpdateDevice(4, 2, str(self.percentComplete), TimedOut)
else:
UpdateDevice(4, 0, str(self.percentComplete), TimedOut)
return
def handleThread(self):
chkTime = 0
Domoticz.Debug("WATCHDOG started.")
while self.alarmConn != None:
try:
sleep(1)
chkTime = chkTime + 1
if chkTime >= 10:
chkTime = 0
if self.lastMsg + 300 < time():
self.lastMsg = time()
Domoticz.Debug("WATCHDOG saw no messages in 5 minutes, reconnecting...")
self.alarmConn.Disconnect()
self.alarmConn.Connect()
except:
pass
Domoticz.Debug("WATCHDOG exited!")
def ValidChecksum(message):
checkSum = 0
for c in message[:-2]:
checkSum = checkSum + ord(c)
checkSum = 255 & checkSum
try:
origChecksum = int(message[-2:],16)
except:
return False
if (checkSum == origChecksum):
return True
return False
def CreateChecksum(message):
checkSum = 0
for c in message:
checkSum = checkSum + ord(c)
return message+('%02X'% checkSum)[-2:]+"\r\n"
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onSecurityEvent(Unit, Level, Description):
global _plugin
_plugin.onSecurityEvent(Unit, Level, Description)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# Generic helper functions
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Settings count: " + str(len(Settings)))
for x in Settings:
Domoticz.Debug( "'" + x + "':'" + str(Settings[x]) + "'")
Domoticz.Debug("Image count: " + str(len(Images)))
for x in Images:
Domoticz.Debug( "'" + x + "':'" + str(Images[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
Domoticz.Debug("Device Image: " + str(Devices[x].Image))
return
def UpdateDevice(Unit, nValue, sValue, TimedOut):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue) or (Devices[Unit].TimedOut != TimedOut):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue), TimedOut=TimedOut)
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+") TimedOut="+str(TimedOut))
return
|
oits_runner.py | from django.core.management.base import BaseCommand
from django.conf import settings
from oits_params.models import OitsParams
from results_viewer.models import TrajectoryResult
from pathlib import Path
from multiprocessing import Process
import json
import os
import sys
import time
import zipfile
class Command(BaseCommand):
executing_processes = {}
def handle(self, *args, **options):
'''
Always on task that checks for any new runs
'''
home = str(Path.home())
oits_lib_path = settings.OITS_LIBRARY.replace('~', home)
print('settings: {0}'.format(oits_lib_path))
# Finds the OITS_optimizer.py in the OITS_AH_Linux folder
sys.path.insert(0, oits_lib_path)
print('Starting OitsRunner')
while True:
# Ceancel any process that have been requested to be cancelled
cancelling_runs = OitsParams.objects.filter(status=OitsParams.CANCELLING).order_by('created_at')
for run in cancelling_runs:
if run.id in self.executing_processes:
self.executing_processes[run.id].terminate()
run.status = OitsParams.CANCELLED
run.save()
self.executing_processes.pop(run.id)
new_runs = OitsParams.objects.filter(status=OitsParams.NEW).order_by('created_at')
for run in new_runs:
if (len(self.executing_processes) < 3):
process = Process(target=self.execute_run, args=(run, oits_lib_path))
self.executing_processes[run.id] = process
process.start()
time.sleep(10)
def execute_run(self, run, oits_lib_path):
print('Processing {0}'.format(run.uid))
run.status = OitsParams.PROCESSING
run.save()
params = json.loads(run.parameters)
# Append Spice directory to filenames
bsp = []
for file in params['BSP']:
bsp.append(oits_lib_path + "SPICE/" + file)
try:
from OITS_optimizer import OITS_optimizer
OITS_instance = OITS_optimizer()
OITS_instance.set_OITS(
0 if params['trajectory_optimization'] else 1,
str(run.uid),
params['Nbody'],
params['ID'],
params['NIP'],
params['rIP'],
params['thetaIP'],
params['thiIP'],
params['thetalb'],
params['thetaub'],
params['thilb'],
params['thiub'],
params['t01'].lower(),
params['tmin1'].lower(),
params['tmax1'].lower(),
params['t0'],
params['tmin'],
params['tmax'],
params['Periacon'],
params['dVcon'],
params['Perihcon'],
0, # Peroflag
params['Duration'],
1 if params['PROGRADE_ONLY'] else 0,
1 if params['RENDEZVOUS'] else 0,
params['Ndata'],
params['RUN_TIME'],
len(bsp), #NBSP
bsp, #BSP
oits_lib_path + "SPICE/naif0012.tls") #LSF
OITS_instance.convert_python_to_C()
OITS_instance.OITS()
print('Processing {0} finished'.format(run.uid))
run.status = OitsParams.COMPLETE
self.create_archive(run)
result = TrajectoryResult(oits_params = run)
result.populate_values_from_output_file()
except Exception as e:
result = TrajectoryResult(oits_params = run)
result.exception = str(e)
result.save()
run.status = OitsParams.ERROR
run.save()
print('Completed {0}'.format(run.uid))
self.executing_threads.pop(run.id)
def create_archive(self, run):
'''
Gets all the files and stores them as a zip in media directory
for download
'''
print('creating archive')
home = str(Path.home())
filename = str(run.uid) + '.zip'
zip_name = os.path.join(settings.MEDIA_ROOT.replace('~',home), filename)
print('Writing to {0}'.format(zip_name))
zf = zipfile.ZipFile(zip_name,'w', zipfile.ZIP_DEFLATED)
cwd = os.getcwd()
for f in os.listdir(cwd):
if str(run.uid) in f:
print('Adding file {0}'.format(f))
zf.write(os.path.join(cwd,f), arcname=f)
os.remove(os.path.join(cwd,f))
zf.close()
|
test_server.py | import asyncio
import json
import os
import time
import urllib.parse
import uuid
from contextlib import ExitStack
from http import HTTPStatus
from multiprocessing import Process, Manager
from multiprocessing.managers import DictProxy
from pathlib import Path
from typing import List, Text, Type, Generator, NoReturn, Dict, Optional
from unittest.mock import Mock, ANY
import pytest
import requests
from _pytest import pathlib
from _pytest.monkeypatch import MonkeyPatch
from aioresponses import aioresponses
from freezegun import freeze_time
from mock import MagicMock
from ruamel.yaml import StringIO
from sanic import Sanic
from sanic.testing import SanicASGITestClient
import rasa
import rasa.constants
import rasa.core.jobs
import rasa.nlu
import rasa.server
import rasa.shared.constants
import rasa.shared.utils.io
import rasa.utils.io
from rasa.core import utils
from rasa.core.agent import Agent, load_agent
from rasa.core.channels import (
channel,
CollectingOutputChannel,
RestInput,
SlackInput,
CallbackInput,
)
from rasa.core.channels.slack import SlackBot
from rasa.core.tracker_store import InMemoryTrackerStore
from rasa.model import unpack_model
from rasa.nlu.test import CVEvaluationResult
from rasa.shared.core import events
from rasa.shared.core.constants import (
ACTION_SESSION_START_NAME,
ACTION_LISTEN_NAME,
REQUESTED_SLOT,
SESSION_START_METADATA_SLOT,
)
from rasa.shared.core.domain import Domain, SessionConfig
from rasa.shared.core.events import (
Event,
UserUttered,
SlotSet,
BotUttered,
ActionExecuted,
SessionStarted,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.train import TrainingResult
from rasa.utils.endpoints import EndpointConfig
from tests.core.conftest import DEFAULT_STACK_CONFIG
from tests.nlu.utilities import ResponseTest
from tests.utilities import json_of_latest_request, latest_request
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
# sequence of events expected at the beginning of trackers
session_start_sequence: List[Event] = [
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicASGITestClient:
return rasa_server_without_api.asgi_client
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicASGITestClient:
return rasa_server.asgi_client
@pytest.fixture
def rasa_non_trained_app(rasa_non_trained_server: Sanic) -> SanicASGITestClient:
return rasa_non_trained_server.asgi_client
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicASGITestClient:
return rasa_nlu_server.asgi_client
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicASGITestClient:
return rasa_core_server.asgi_client
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicASGITestClient:
return rasa_server_secured.asgi_client
@pytest.fixture
def rasa_non_trained_secured_app(
rasa_non_trained_server_secured: Sanic,
) -> SanicASGITestClient:
return rasa_non_trained_server_secured.asgi_client
@pytest.fixture()
async def tear_down_scheduler() -> Generator[None, None, None]:
yield None
rasa.core.jobs.__scheduler = None
async def test_root(rasa_non_trained_app: SanicASGITestClient):
_, response = await rasa_non_trained_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_without_enable_api(rasa_app_without_api: SanicASGITestClient):
_, response = await rasa_app_without_api.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_secured(rasa_non_trained_secured_app: SanicASGITestClient):
_, response = await rasa_non_trained_secured_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_version(rasa_non_trained_app: SanicASGITestClient):
_, response = await rasa_non_trained_app.get("/version")
content = response.json()
assert response.status == HTTPStatus.OK
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
async def test_status(rasa_app: SanicASGITestClient, trained_rasa_model: Text):
_, response = await rasa_app.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
async def test_status_nlu_only(
rasa_app_nlu: SanicASGITestClient, trained_nlu_model: Text
):
_, response = await rasa_app_nlu.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert "model_file" in response.json()
assert model_file == trained_nlu_model
async def test_status_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/status")
assert response.status == HTTPStatus.UNAUTHORIZED
async def test_status_not_ready_agent(rasa_app: SanicASGITestClient):
rasa_app.app.agent = None
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.CONFLICT
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path, monkeypatch: MonkeyPatch
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
async def mocked_training_function(*_, **__) -> TrainingResult:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return TrainingResult(model=fake_model_path)
def run_server(monkeypatch: MonkeyPatch) -> NoReturn:
import sys
monkeypatch.setattr(
sys.modules["rasa.train"], "train_async", mocked_training_function,
)
from rasa import __main__
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server, args=(monkeypatch,))
yield server
server.terminate()
@pytest.fixture()
def training_request(
shared_statuses: DictProxy, tmp_path: Path
) -> Generator[Process, None, None]:
def send_request() -> None:
payload = {}
project_path = Path("examples") / "formbot"
for file in [
"domain.yml",
"config.yml",
Path("data") / "rules.yml",
Path("data") / "stories.yml",
Path("data") / "nlu.yml",
]:
full_path = project_path / file
# Read in as dictionaries to avoid that keys, which are specified in
# multiple files (such as 'version'), clash.
content = rasa.shared.utils.io.read_yaml_file(full_path)
payload.update(content)
concatenated_payload_file = tmp_path / "concatenated.yml"
rasa.shared.utils.io.write_yaml(payload, concatenated_payload_file)
payload_as_yaml = concatenated_payload_file.read_text()
response = requests.post(
"http://localhost:5005/model/train",
data=payload_as_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"force_training": True},
)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return (
requests.get("http://localhost:5005/status").status_code
== HTTPStatus.OK
)
except Exception:
return False
# wait until server is up before sending train request and status test loop
start = time.time()
while not is_server_ready() and time.time() - start < 60:
time.sleep(1)
assert is_server_ready()
training_request.start()
# Wait until the blocking training function was called
start = time.time()
while (
shared_statuses.get("started_training") is not True and time.time() - start < 60
):
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
start = time.time()
while shared_statuses.get("training_result") is None and time.time() - start < 60:
time.sleep(1)
assert shared_statuses.get("training_result")
# Check that the training worked correctly
assert shared_statuses["training_result"] == HTTPStatus.OK
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse(rasa_app: SanicASGITestClient, response_test: ResponseTest):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
rjs = response.json()
assert response.status == HTTPStatus.OK
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse_with_different_emulation_mode(
rasa_app: SanicASGITestClient, response_test: ResponseTest
):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
assert response.status == HTTPStatus.OK
async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient):
_, response = await rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == HTTPStatus.OK
rjs = response.json()
assert all(prop in rjs for prop in ["entities", "intent", "text"])
async def test_parse_on_invalid_emulation_mode(
rasa_non_trained_app: SanicASGITestClient,
):
_, response = await rasa_non_trained_app.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_stack_success_with_md(
rasa_app: SanicASGITestClient,
default_domain_path: Text,
default_stack_config: Text,
default_nlu_data: Text,
tmp_path: Path,
):
payload = dict(
domain=Path(default_domain_path).read_text(),
config=Path(default_stack_config).read_text(),
stories=Path("data/test_stories/stories_defaultdomain.md").read_text(),
nlu=Path(default_nlu_data).read_text(),
)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.OK
assert response.headers["filename"] is not None
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_nlu_success(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_nlu_data: Text,
default_domain_path: Text,
tmp_path: Path,
):
domain_data = rasa.shared.utils.io.read_yaml_file(default_domain_path)
config_data = rasa.shared.utils.io.read_yaml_file(default_stack_config)
nlu_data = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
# combine all data into our payload
payload = {
key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items()
}
data = StringIO()
rasa.shared.utils.io.write_yaml(payload, data)
_, response = await rasa_app.post(
"/model/train",
data=data.getvalue(),
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_core_success_with(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_stories_file: Text,
default_domain_path: Text,
tmp_path: Path,
):
payload = f"""
{Path(default_domain_path).read_text()}
{Path(default_stack_config).read_text()}
{Path(default_stories_file).read_text()}
"""
_, response = await rasa_app.post(
"/model/train",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_with_retrieval_events_success(
rasa_app: SanicASGITestClient, default_stack_config: Text, tmp_path: Path
):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.yml"))
nlu_file = stack.enter_context(
open("data/test/stories_default_retrieval_intents.yml")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload, timeout=60 * 5)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
def assert_trained_model(response_body: bytes, tmp_path: Path) -> None:
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response_body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
@pytest.mark.parametrize(
"payload",
[
{"config": None, "stories": None, "nlu": None, "domain": None, "force": True},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"force": False,
"save_to_default_model_directory": True,
},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"save_to_default_model_directory": False,
},
],
)
def test_deprecation_warnings_json_payload(payload: Dict):
with pytest.warns(FutureWarning):
rasa.server._validate_json_training_payload(payload)
async def test_train_with_yaml(rasa_app: SanicASGITestClient, tmp_path: Path):
training_data = """
stories:
- story: My story
steps:
- intent: greet
- action: utter_greet
rules:
- rule: My rule
steps:
- intent: greet
- action: utter_greet
intents:
- greet
nlu:
- intent: greet
examples: |
- hi
- hello
responses:
utter_greet:
- text: Hi
language: en
policies:
- name: RulePolicy
pipeline:
- name: KeywordIntentClassifier
"""
_, response = await rasa_app.post(
"/model/train",
data=training_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
async def test_train_with_invalid_yaml(rasa_non_trained_app: SanicASGITestClient):
invalid_yaml = """
rules:
rule my rule
"""
_, response = await rasa_non_trained_app.post(
"/model/train",
data=invalid_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
@pytest.mark.parametrize(
"headers, expected",
[({}, False), ({"force_training": False}, False), ({"force_training": True}, True)],
)
def test_training_payload_from_yaml_force_training(
headers: Dict, expected: bool, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("force_training") == expected
@pytest.mark.parametrize(
"headers, expected",
[
({}, rasa.shared.constants.DEFAULT_MODELS_PATH),
({"save_to_default_model_directory": False}, ANY),
(
{"save_to_default_model_directory": True},
rasa.shared.constants.DEFAULT_MODELS_PATH,
),
],
)
def test_training_payload_from_yaml_save_to_default_model_directory(
headers: Dict, expected: Text, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("output")
assert payload.get("output") == expected
async def test_train_missing_config(rasa_non_trained_app: SanicASGITestClient):
payload = dict(domain="domain data", config=None)
_, response = await rasa_non_trained_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_missing_training_data(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_internal_error(rasa_non_trained_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = await rasa_non_trained_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_evaluate_stories(
rasa_app: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app.post(
"/model/test/stories",
data=stories,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_stories_not_ready_agent(
rasa_non_trained_app: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_non_trained_app.post("/model/test/stories", data=stories)
assert response.status == HTTPStatus.CONFLICT
async def test_evaluate_stories_end_to_end(
rasa_app: SanicASGITestClient, end_to_end_test_story_md_file: Text
):
stories = rasa.shared.utils.io.read_file(end_to_end_test_story_md_file)
_, response = await rasa_app.post("/model/test/stories?e2e=true", data=stories,)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert js["actions"] != []
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_intent(rasa_app: SanicASGITestClient, default_nlu_data: Text):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_json(rasa_app: SanicASGITestClient):
nlu_data = rasa.shared.utils.io.read_file("data/test/demo-rasa-small.json")
_, response = await rasa_app.post(
"/model/test/intents",
json=nlu_data,
headers={"Content-type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_invalid_intent_model_file(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post(
"/model/test/intents?model=invalid.tar.gz",
json={},
headers={"Content-type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_evaluate_intent_without_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post(
"/model/test/intents", headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_with_model_param(
rasa_app: SanicASGITestClient, trained_nlu_model: Text, default_nlu_data: Text
):
_, response = await rasa_app.get("/status")
previous_model_file = response.json()["model_file"]
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = await rasa_app.get("/status")
assert previous_model_file == response.json()["model_file"]
async def test_evaluate_intent_with_model_server(
rasa_app: SanicASGITestClient,
trained_rasa_model: Text,
default_nlu_data: Text,
tear_down_scheduler: None,
):
production_model_server_url = (
"https://example.com/webhooks/actions?model=production"
)
test_model_server_url = "https://example.com/webhooks/actions?model=test"
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
with aioresponses() as mocked:
# Mock retrieving the production model from the model server
mocked.get(
production_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "production"},
)
# Mock retrieving the test model from the model server
mocked.get(
test_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "test"},
)
agent_with_model_server = await load_agent(
model_server=EndpointConfig(production_model_server_url)
)
rasa_app.app.agent = agent_with_model_server
_, response = await rasa_app.post(
f"/model/test/intents?model={test_model_server_url}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
production_model_server = rasa_app.app.agent.model_server
# Assert that the model server URL for the test didn't override the production
# model server URL
assert production_model_server.url == production_model_server_url
# Assert the tests didn't break pulling the models
assert production_model_server.kwargs.get("wait_time_between_pulls") != 0
async def test_cross_validation(
rasa_non_trained_app: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3},
)
assert response.status == HTTPStatus.OK
response_body = response.json()
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_md(
rasa_non_trained_app: SanicASGITestClient, default_nlu_data: Text
):
payload = """
## intent: greet
- Hi
- Hello
"""
_, response = await rasa_non_trained_app.post(
"/model/test/intents", data=payload, params={"cross_validation_folds": 3},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_cross_validation_with_callback_success(
rasa_non_trained_app: SanicASGITestClient,
default_nlu_data: Text,
monkeypatch: MonkeyPatch,
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
mocked_cross_validation = Mock(
return_value=(
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
)
)
monkeypatch.setattr(
rasa.nlu, rasa.nlu.cross_validate.__name__, mocked_cross_validation
)
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
# Sleep to give event loop time to process things in the background
await asyncio.sleep(1)
mocked_cross_validation.assert_called_once()
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["data"]
response_body = json.loads(content)
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_callback_error(
rasa_non_trained_app: SanicASGITestClient,
default_nlu_data: Text,
monkeypatch: MonkeyPatch,
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
monkeypatch.setattr(
rasa.nlu, rasa.nlu.cross_validate.__name__, Mock(side_effect=ValueError())
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_callback_unexpected_error(
rasa_non_trained_app: SanicASGITestClient,
default_nlu_data: Text,
monkeypatch: MonkeyPatch,
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
async def raiseUnexpectedError() -> NoReturn:
raise ValueError()
monkeypatch.setattr(
rasa.server,
rasa.server._training_payload_from_yaml.__name__,
Mock(side_effect=ValueError()),
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_non_trained_app.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_predict(rasa_app: SanicASGITestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
}
}
_, response = await rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
async def test_requesting_non_existent_tracker(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/conversations/madeupid/tracker")
content = response.json()
assert response.status == HTTPStatus.OK
assert content["paused"] is False
assert content["slots"] == {
"name": None,
REQUESTED_SLOT: None,
SESSION_START_METADATA_SLOT: None,
}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": 1,
"timestamp": 1514764800,
"action_text": None,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
INTENT_NAME_KEY: "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
"action_text": None,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
async def test_pushing_event(rasa_app: SanicASGITestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
# Wait a bit so that the server-generated timestamp is strictly greater
# than time_before_adding_events
time.sleep(0.01)
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json()
assert tracker is not None
assert len(tracker.get("events")) == 4
deserialized_events = [Event.from_parameters(event) for event in tracker["events"]]
# there is an initial session start sequence at the beginning of the tracker
assert deserialized_events[:3] == session_start_sequence
assert deserialized_events[3] == event
assert deserialized_events[3].timestamp > time_before_adding_events
async def test_push_multiple_events(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is an initial session start sequence at the beginning
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
@pytest.mark.parametrize(
"params", ["?execute_side_effects=true&output_channel=callback", ""]
)
async def test_pushing_event_while_executing_side_effects(
rasa_server: Sanic, params: Text
):
input_channel = CallbackInput(EndpointConfig("https://example.com/callback"))
channel.register([input_channel], rasa_server, "/webhooks/")
rasa_app = rasa_server.asgi_client
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = test_events[1].as_dict()
with aioresponses() as mocked:
mocked.post(
"https://example.com/callback",
repeat=True,
headers={"Content-Type": "application/json"},
)
await rasa_app.post(
f"{conversation}/tracker/events{params}",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
r = latest_request(mocked, "post", "https://example.com/callback")
if not params:
assert r is None
else:
message_received = json_of_latest_request(r)
assert message_received.get("recipient_id") == sender_id
assert message_received.get("text") == serialized_event.get("text")
async def test_post_conversation_id_with_slash(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
id_len = len(conversation_id) // 2
conversation_id = conversation_id[:id_len] + "/+-_\\=" + conversation_id[id_len:]
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is a session start sequence at the start
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
async def test_put_tracker(rasa_app: SanicASGITestClient):
data = [event.as_dict() for event in test_events]
_, response = await rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = await rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json()
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
async def test_predict_without_conversation_id(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post("/conversations/non_existent_id/predict")
assert response.status == HTTPStatus.NOT_FOUND
assert response.json()["message"] == "Conversation ID not found."
async def test_sorted_predict(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = await rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json()["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
async def _create_tracker_for_sender(app: SanicASGITestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = await app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
async def test_get_tracker_with_jwt(rasa_secured_app: SanicASGITestClient):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.FORBIDDEN
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
def test_list_routes(empty_agent: Agent):
app = rasa.server.create_app(empty_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
async def test_unload_model_error(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "model_file" in response.json() and response.json()["model_file"] is not None
_, response = await rasa_app.delete("/model")
assert response.status == HTTPStatus.NO_CONTENT
async def test_get_domain(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get(
"/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE}
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
async def test_get_domain_invalid_accept_header(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/domain")
assert response.status == HTTPStatus.NOT_ACCEPTABLE
async def test_load_model(rasa_app: SanicASGITestClient, trained_core_model: Text):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
data = {"model_file": trained_core_model}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_from_model_server(
rasa_app: SanicASGITestClient, trained_core_model: Text, tear_down_scheduler: None
):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_invalid_request_body(
rasa_non_trained_app: SanicASGITestClient,
):
_, response = await rasa_non_trained_app.put("/model")
assert response.status == HTTPStatus.BAD_REQUEST
async def test_load_model_invalid_configuration(
rasa_non_trained_app: SanicASGITestClient,
):
data = {"model_file": "some-random-path"}
_, response = await rasa_non_trained_app.put("/model", json=data)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "test_execute")
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_execute_without_conversation_id(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post(
"/conversations/non_existent_id/execute", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
assert response.json()["message"] == "Conversation ID not found."
async def test_execute_with_missing_action_name(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_missing_action_name"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute_with_not_existing_action(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_not_existing_action"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_trigger_intent(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "greet"}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_trigger_intent_with_entity(rasa_app: SanicASGITestClient):
entity_name = "name"
entity_value = "Sara"
data = {INTENT_NAME_KEY: "greet", "entities": {entity_name: entity_value}}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
last_slot_set_event = [
event
for event in parsed_content["tracker"]["events"]
if event["event"] == "slot"
][-1]
assert parsed_content["tracker"]
assert parsed_content["messages"]
assert last_slot_set_event["name"] == entity_name
assert last_slot_set_event["value"] == entity_value
async def test_trigger_intent_with_missing_intent_name(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_trigger_intent_with_not_existing_intent(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
(
[RestInput(), SlackInput("test", slack_signing_secret="foobar")],
"slack",
SlackBot,
),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test", slack_signing_secret="foobar")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
@pytest.mark.parametrize(
"conversation_events,until_time,fetch_all_sessions,expected",
# conversation with one session
[
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# conversation with multiple sessions
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID, story 1
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- story: some-conversation-ID, story 2
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# conversation with multiple sessions, but setting `all_sessions=false`
# means only the last one is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
False,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# the default for `all_sessions` is `false` - this test checks that
# only the latest session is returned in that case
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
None,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# `until` parameter means only the first session is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
UserUttered("hi", {"name": "greet"}, timestamp=3),
ActionExecuted("utter_greet", timestamp=4),
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=5),
SessionStarted(timestamp=6),
UserUttered("bye bye", {"name": "goodbye"}, timestamp=7),
ActionExecuted("utter_goodbye", timestamp=8),
],
4,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# empty conversation
([], None, True, 'version: "2.0"'),
# Conversation with slot
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
SlotSet(REQUESTED_SLOT, "some value"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- slot_was_set:
- requested_slot: some value""",
),
],
)
async def test_get_story(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
conversation_events: List[Event],
until_time: Optional[float],
fetch_all_sessions: Optional[bool],
expected: Text,
):
conversation_id = "some-conversation-ID"
tracker_store = InMemoryTrackerStore(Domain.empty())
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
url = f"/conversations/{conversation_id}/story?"
query = {}
if fetch_all_sessions is not None:
query["all_sessions"] = fetch_all_sessions
if until_time is not None:
query["until"] = until_time
_, response = await rasa_app.get(url + urllib.parse.urlencode(query))
assert response.status == HTTPStatus.OK
assert response.content.decode().strip() == expected
async def test_get_story_without_conversation_id(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
url = f"/conversations/{conversation_id}/story"
_, response = await rasa_app.get(url)
assert response.status == HTTPStatus.NOT_FOUND
assert response.json()["message"] == "Conversation ID not found."
async def test_get_story_does_not_update_conversation_session(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
# domain with short session expiration time of one second
domain = Domain.empty()
domain.session_config = SessionConfig(
session_expiration_time=1 / 60, carry_over_slots=True
)
monkeypatch.setattr(rasa_app.app.agent, "domain", domain)
# conversation contains one session that has expired
now = time.time()
conversation_events = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=now - 10),
SessionStarted(timestamp=now - 9),
UserUttered("hi", {"name": "greet"}, timestamp=now - 8),
ActionExecuted("utter_greet", timestamp=now - 7),
]
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
# the conversation session has expired
assert rasa_app.app.agent.create_processor()._has_session_expired(tracker)
tracker_store = InMemoryTrackerStore(domain)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
_, response = await rasa_app.get(f"/conversations/{conversation_id}/story")
assert response.status == HTTPStatus.OK
# expected story is returned
assert (
response.content.decode().strip()
== """version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet"""
)
# the tracker has the same number of events as were initially added
assert len(tracker.events) == len(conversation_events)
# the last event is still the same as before
assert tracker.events[-1].timestamp == conversation_events[-1].timestamp
@pytest.mark.parametrize(
"initial_tracker_events,events_to_append,expected_events",
[
(
# the tracker is initially empty, and no events are appended
# so we'll just expect the session start sequence with an `action_listen`
[],
[],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
],
),
(
# the tracker is initially empty, and a user utterance is appended
# we expect a tracker with a session start sequence and a user utterance
[],
[UserUttered("/greet", {"name": "greet", "confidence": 1.0})],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
),
(
# the tracker is initially empty, and a session start sequence is appended
# we'll just expect the session start sequence
[],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
),
(
# the tracker already contains some events - we can simply append events
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
[ActionExecuted("utter_greet")],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
ActionExecuted("utter_greet"),
],
),
],
)
async def test_update_conversation_with_events(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
initial_tracker_events: List[Event],
events_to_append: List[Event],
expected_events: List[Event],
):
conversation_id = "some-conversation-ID"
domain = Domain.empty()
tracker_store = InMemoryTrackerStore(domain)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
if initial_tracker_events:
tracker = DialogueStateTracker.from_events(
conversation_id, initial_tracker_events
)
tracker_store.save(tracker)
fetched_tracker = await rasa.server.update_conversation_with_events(
conversation_id, rasa_app.app.agent.create_processor(), domain, events_to_append
)
assert list(fetched_tracker.events) == expected_events
|
tl_detector.py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
import math
import time
import threading
STATE_COUNT_THRESHOLD = 1
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
#sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb, queue_size = 1)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.model_path = self.config['model_path']
self.bridge = CvBridge()
self.light_classifier = TLClassifier(self.model_path)
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
self.process_thread = None
rate = rospy.Rate(15) # 50Hz
while not rospy.is_shutdown():
if self.camera_image is not None:
#self.process_image()
#t = threading.Thread(target=self.process_image)
#t.start()
if (self.process_thread is None or not self.process_thread.isAlive()):
self.process_thread = ImageProcess(self)
self.process_thread.start()
rate.sleep()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints.waypoints
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
class ImageProcess (threading.Thread):
def __init__(self, tl_detector):
threading.Thread.__init__(self)
self.tl_detector = tl_detector
def run(self):
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.tl_detector.state != state:
self.tl_detector.state_count = 0
self.tl_detector.state = state
elif self.tl_detector.state_count >= STATE_COUNT_THRESHOLD:
self.tl_detector.last_state = self.tl_detector.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.tl_detector.last_wp = light_wp
self.tl_detector.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.tl_detector.upcoming_red_light_pub.publish(Int32(self.tl_detector.last_wp))
self.tl_detector.state_count += 1
def get_closest_waypoint(self, pose):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
closest_idx = 0
closest_dist = float('inf')
if self.tl_detector.waypoints:
for wp_idx in range(len(self.tl_detector.waypoints)):
distance = math.sqrt((pose.position.x-self.tl_detector.waypoints[wp_idx].pose.pose.position.x)**2 +
(pose.position.y-self.tl_detector.waypoints[wp_idx].pose.pose.position.y)**2)
if(distance < closest_dist):
closest_dist = distance
closest_idx = wp_idx
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.tl_detector.has_image):
self.tl_detector.prev_light_loc = None
return False
cv_image = self.tl_detector.bridge.imgmsg_to_cv2(self.tl_detector.camera_image, "bgr8")
# note, do we want to have this line? check if network trained on RGB or BGR - i think BGR
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
#Get classification
return self.tl_detector.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
light = None
max_detection_dist = 120 # maximum distance we want to check lights for
min_dist = float('inf') #closest light
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.tl_detector.config['stop_line_positions']
if(self.tl_detector.pose and self.tl_detector.waypoints):
car_position = self.get_closest_waypoint(self.tl_detector.pose.pose)
# Find the closest visible traffic light (if one exists)
for stop_pos in stop_line_positions:
new_light = TrafficLight()
#new_light.header = Header()
new_light.header.stamp = rospy.Time.now()
new_light.header.frame_id = 'world'
new_light.pose.pose = Pose()
new_light.pose.pose.position.x = stop_pos[0]
new_light.pose.pose.position.y = stop_pos[1]
new_light.state = TrafficLight.UNKNOWN
stop_position = self.get_closest_waypoint(new_light.pose.pose)
distance_to_light = math.sqrt((self.tl_detector.waypoints[car_position].pose.pose.position.x-self.tl_detector.waypoints[stop_position].pose.pose.position.x)**2 +
(self.tl_detector.waypoints[car_position].pose.pose.position.y-self.tl_detector.waypoints[stop_position].pose.pose.position.y)**2)
if distance_to_light < min_dist and distance_to_light < max_detection_dist: # if closer than last light, but not beyond max range we are interested in,
if car_position < stop_position: # and our car has not yet passed the wp the light is at, then...
min_dist = distance_to_light
light = new_light
light_wp = stop_position
if light:
state = self.get_light_state(light)
return light_wp, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
hltv.py | # Most of this script came from https://scorebot-secure.hltv.org/scorebotClientApi.js?v5, property of HLTV.org
# This is just a simple Telegram bot querying their data, publicly available @ HLTV.org during matches.
from socketIO_client import SocketIO # instale "socketIO-client" via pip pra funcionar
from reborn import log
import time
import api
import json
import threading
import re
class MessageHandler(object):
def __init__(self):
self.is_running = False
self.received_log_history = False
self.presented_initial_scoreboard = False
self.simple_mode = False
self.alive_tr = []
self.alive_ct = []
self.scoreboard_msg_id = 0
self.log_msg_id = 0
self.last_score_update = time.time()
self.chat_id = 0
self.scoreboard_msg = ""
self.log_msg = ""
def set_scoreboard_msg(self, message):
self.scoreboard_msg = message
def add_log_msg(self, message):
self.log_msg += "\n -- " + message
def resend_messages(self):
if self.log_msg_id != 0:
api.delete_message(self.chat_id, self.log_msg_id)
log("reenviando log...")
self.log_msg_id = api.send_message(self.chat_id, self.log_msg, parse_mode="")["result"]["message_id"]
if self.scoreboard_msg_id != 0:
api.delete_message(self.chat_id, self.scoreboard_msg_id)
log('reenviando score...')
self.scoreboard_msg_id = api.send_message(self.chat_id, self.scoreboard_msg, parse_mode="")["result"]["message_id"]
def clear_log_msg(self):
self.log_msg = ""
def update_log(self):
# log("atualizando log...")
api.edit_message_text(self.chat_id, self.log_msg_id, self.log_msg, parse_mode="")
def update_scoreboard(self):
# log("atualizando scoreboard...")
api.edit_message_text(self.chat_id, self.scoreboard_msg_id, self.scoreboard_msg, parse_mode="")
opt = MessageHandler()
socketio = SocketIO("scorebot2.hltv.org", 10022)
def on_log(*args):
# Se teve atualização de log, com certeza teve atualização de scoreboard.
opt.update_scoreboard()
# Evita printar o histórico completo da partida.
if not opt.received_log_history:
opt.received_log_history = True
return
# Corrige nome de players com caracteres que a API do Telegram não parseia.
proper_string = str(args[0]).replace("#", "%23").replace("&", "%26")
entries = json.loads(proper_string)["log"][0]
for event in entries:
# Simple Mode só mostra o que aconteceu no fim do round.
if opt.simple_mode and event != "RoundEnd":
return
details = entries[event]
if event == "Kill":
hs = ""
if details["headShot"]:
hs = " (hs)"
opt.add_log_msg(details["killerName"] + " (" + details["killerSide"] + ") matou " + details["victimName"] + " (" + details["victimSide"] + ") com " + details["weapon"] + hs)
elif event == "BombPlanted":
opt.add_log_msg(details["playerName"] + " plantou a bomba com " + str(details["ctPlayers"]) + " CTs e " + str(details["tPlayers"]) + " TRs")
elif event == "BombDefused":
opt.add_log_msg(details["playerName"] + " defusou a bomba")
elif event == "MatchStarted":
opt.add_log_msg("PARTIDA INICIANDO...")
elif event == "RoundStart":
opt.clear_log_msg()
opt.add_log_msg("ROUND INICIANDO...")
opt.resend_messages()
elif event == "RoundEnd":
reason = ""
if details["winType"] == "Target_Bombed":
reason = " (bomba explodiu)"
elif details["winType"] == "Bomb_Defused":
reason = " (bomba defusada)"
message = "FIM DE ROUND. " + details["winner"] + " GANHOU" + reason + ".\nSCORE DO HALF: TR " + str(details["terroristScore"]) + " - " + str(details["counterTerroristScore"]) + " CT"
if opt.simple_mode:
api.send_message(opt.chat_id, message)
return
else:
opt.add_log_msg(message)
else:
return
opt.update_log()
def on_scoreboard(*args):
proper_string = str(args[0]).replace("'", '"').replace("True", '"True"').replace("False", '"False"')
proper_string = proper_string.encode("utf8")
entries = json.loads(proper_string)
tr_players = entries["TERRORIST"]
ct_players = entries["CT"]
for i in range(0, len(tr_players)):
tr_name = tr_players[i]["name"]
if tr_players[i]["alive"] == "True" and tr_name not in opt.alive_tr:
opt.alive_tr.append(tr_name)
elif tr_players[i]["alive"] == "False" and tr_name in opt.alive_tr:
opt.alive_tr.remove(tr_name)
for i in range(0, len(ct_players)):
ct_name = ct_players[i]["name"]
if ct_players[i]["alive"] == "True" and ct_name not in opt.alive_ct:
opt.alive_ct.append(ct_name)
elif ct_players[i]["alive"] == "False" and ct_name in opt.alive_ct:
opt.alive_ct.remove(ct_name)
scoreboard = ""
scoreboard += entries["terroristTeamName"] + " (TR) " + str(entries["terroristScore"]) + " vs. "
scoreboard += str(entries["counterTerroristScore"]) + " (CT) " + entries["ctTeamName"] + "\n"
scoreboard += "\nVIVOS: TR " + str(len(opt.alive_tr)) + " vs. " + str(len(opt.alive_ct)) + " CT"
scoreboard += "\nMAPA: " + entries["mapName"]
scoreboard += "\nROUND: " + str(entries["currentRound"])
if entries["bombPlanted"] == "True":
scoreboard += "\nBOMBA PLANTADA 🔴"
opt.scoreboard_msg = scoreboard
# Faz com que só o primeiro evento de scoreboard atualize a mensagem.
# Esse evento acontece muitas vezes. É melhor deixar que só o evento de log atualize a mensagem de scoreboard.
if not opt.presented_initial_scoreboard:
opt.update_scoreboard()
opt.presented_initial_scoreboard = True
def on_connect(*args):
print("Scorebot conectado.")
opt.is_running = True
def get_infos_from_match(matchid):
matchid = str(matchid)
print("Conectando ao scorebot...")
socketio.on("connect", on_connect)
socketio.on("scoreboard", on_scoreboard)
socketio.on("log", on_log)
socketio.emit("readyForMatch", matchid)
socketio.wait()
def perform_exit():
log("Parando de acompanhar...")
socketio.off("scoreboard")
socketio.off("log")
socketio.wait(seconds=1)
opt.clear_log_msg()
opt.add_log_msg("ae carai to me desligano ta bom tchau")
opt.update_log()
def on_msg_received(msg, matches):
opt.chat_id = msg["chat"]["id"]
# opt.log_msg_id = api.send_message(opt.chat_id, "esperando algo acontecer...")["result"]["message_id"]
# opt.scoreboard_msg_id = api.send_message(opt.chat_id, "esperando scoreboard...")["result"]["message_id"]
if re.match("(?:stop|parar?|cancelar?)", matches.group(1)):
if opt.is_running:
perform_exit()
return
if matches.group(2) is not None:
if re.match("(?:simples?|clean|resumido)", matches.group(1)):
opt.simple_mode = True
thread = threading.Thread(name="hltvThread", target=get_infos_from_match, args=(matches.group(2),))
# get_infos_from_match(matches.group(2))
else:
thread = threading.Thread(name="hltvThread", target=get_infos_from_match, args=(matches.group(1),))
# get_infos_from_match(matches.group(1))
opt.scoreboard_msg_id = api.send_message(opt.chat_id, "esperando scoreboard...")["result"]["message_id"]
# Simple Mode só não vai ter log, mas scoreboard continua sendo atualizado.
if not opt.simple_mode:
opt.log_msg_id = api.send_message(opt.chat_id, "esperando algo acontecer...")["result"]["message_id"]
thread.start()
|
dag_processing.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import enum
import importlib
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from importlib import import_module
from typing import Iterable, NamedTuple, Optional
import psutil
from setproctitle import setproctitle
from sqlalchemy import or_
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.models import errors
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.helpers import reap_process_group
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
def __init__(self, dag, pickle_id=None):
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance:
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, _ = re.search(r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:type safe_mode: bool
:param include_examples: include example DAGs
:type include_examples: bool
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as file:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in file.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
_, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(metaclass=ABCMeta):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = NamedTuple('DagParsingStat', [
('file_paths', Iterable[str]),
('done', bool),
('all_files_processed', bool)
])
DagFileStat = NamedTuple('DagFileStat', [
('num_dags', int),
('import_errors', int),
('last_finish_time', datetime),
('last_duration', float),
('run_count', int),
])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. "
"Setting parallelism to 1")
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = (
conf.getint('scheduler', 'scheduler_zombie_task_threshold'))
# Map from file path to the processor
self._processors = {}
self._heartbeat_count = 0
# Map from file path to stats about the file
self._file_stats = {} # type: dict(str, DagFileStat)
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
self._zombies = []
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
self._find_zombies()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
if STORE_SERIALIZED_DAGS:
from airflow.models import SerializedDagModel
from airflow.models.dag import DagModel
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time).total_seconds() if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
self.emit_metrics()
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, self._zombies)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._heartbeat_count += 1
return simple_dags
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
zombies = []
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
sti = SimpleTaskInstance(ti)
self.log.info(
"Detected zombie job with dag_id %s, task_id %s, and execution date %s",
sti.dag_id, sti.task_id, sti.execution_date.isoformat())
zombies.append(sti)
self._zombies = zombies
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.info(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._heartbeat_count < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
|
rse.py | # Copyright 2014-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2014-2016
# - Vincent Garonne <vgaronne@gmail.com>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019-2020
#
# PY3K COMPATIBLE
"""
Abacus-RSE is a daemon to update RSE counters.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.common.utils import get_thread_with_periodic_running_function
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rse_counter import get_updated_rse_counters, update_rse_counter, fill_rse_counter_history_table
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rse_update(once=False):
"""
Main loop to check and update the RSE Counters.
"""
logging.info('rse_update: starting')
logging.info('rse_update: started')
# Make an initial heartbeat so that all abacus-rse daemons have the correct worker number on the next try
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
live(executable='rucio-abacus-rse', hostname=hostname, pid=pid, thread=current_thread)
while not graceful_stop.is_set():
try:
# Heartbeat
heartbeat = live(executable='rucio-abacus-rse', hostname=hostname, pid=pid, thread=current_thread)
# Select a bunch of rses for to update for this worker
start = time.time() # NOQA
rse_ids = get_updated_rse_counters(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'])
logging.debug('Index query time %f size=%d' % (time.time() - start, len(rse_ids)))
# If the list is empty, sent the worker to sleep
if not rse_ids and not once:
logging.info('rse_update[%s/%s] did not get any work' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1))
time.sleep(10)
else:
for rse_id in rse_ids:
if graceful_stop.is_set():
break
start_time = time.time()
update_rse_counter(rse_id=rse_id)
logging.debug('rse_update[%s/%s]: update of rse "%s" took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rse_id, time.time() - start_time))
except Exception:
logging.error(traceback.format_exc())
if once:
break
logging.info('rse_update: graceful stop requested')
die(executable='rucio-abacus-rse', hostname=hostname, pid=pid, thread=current_thread)
logging.info('rse_update: graceful stop done')
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, fill_history_table=False):
"""
Starts up the Abacus-RSE threads.
"""
hostname = socket.gethostname()
sanity_check(executable='rucio-abacus-rse', hostname=hostname)
if once:
logging.info('main: executing one iteration only')
rse_update(once)
else:
logging.info('main: starting threads')
threads = [threading.Thread(target=rse_update, kwargs={'once': once}) for i in range(0, threads)]
if fill_history_table:
threads.append(get_thread_with_periodic_running_function(3600, fill_rse_counter_history_table, graceful_stop))
[t.start() for t in threads]
logging.info('main: waiting for interrupts')
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
z_train_abstractive.py | #!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_transformers import BertTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
COPY=False
if COPY:
from models.loss_copy import abs_loss
else:
from models.loss import abs_loss
from models.model_builder import Z_AbsSummarizer
from models.predictor import build_predictor
from models.z_trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_abs(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_abs(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_abs(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = Z_AbsSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
tokenizer = BertTokenizer.from_pretrained(args.pretrained_model, do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused9]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
if COPY:
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device, copy_generator=model.copy_generator)
else:
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = Z_AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained(args.pretrained_model, do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused9]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def test_text_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = Z_AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained(args.pretrained_model, do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused9]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, -1, None, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train_abs(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
if (args.load_from_extractive != ''):
logger.info('Loading bert from extractive model %s' % args.load_from_extractive)
bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)
bert_from_extractive = bert_from_extractive['model']
else:
bert_from_extractive = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Z_AbsSummarizer(args, device, checkpoint, bert_from_extractive)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
tokenizer = BertTokenizer.from_pretrained(args.pretrained_model, do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused9]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
if COPY:
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing, copy_generator=model.copy_generator)
else:
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
try:
trainer.train(train_iter_fct, args.train_steps)
finally:
trainer.close_writer()
|
sync.py | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import shutil
import socket
import subprocess
import sys
import time
from pyversion import is_python3
if is_python3():
import urllib.parse
import xmlrpc.client
else:
import imp
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.parse = urlparse
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_refs import R_HEADS, HEAD
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object direcotry. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, *args, **kwargs):
"""Main function of the fetch threads when jobs are > 1.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and not opt.force_broken:
break
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except Exception as e:
print('error: Cannot fetch %s (%s: %s)' \
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
return success
def _Fetch(self, projects, opt):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects))
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and not opt.force_broken:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
lock=lock,
fetched=fetched,
pm=pm,
sem=sem,
err_event=err_event)
if self.jobs > 1:
t = _threading.Thread(target = self._FetchProjectList,
kwargs = kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
gitdirs = {}
for project in projects:
gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
gitdir = os.path.join(self.manifest.topdir, path, '.git')
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
else:
print('Deleting obsolete path %s' % project.worktree,
file=sys.stderr)
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_name = "smart_sync_override.xml"
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, smart_sync_manifest_name)
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print('.netrc file does not exist or could not be opened',
file=sys.stderr)
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpc.client.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'SYNC_TARGET' in env:
target = env['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = smart_sync_manifest_name
try:
f = open(smart_sync_manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
else: # Not smart sync or smart tag mode
if os.path.isfile(smart_sync_manifest_path):
try:
os.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags,
optimized_fetch=opt.optimized_fetch)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
try:
self._times = json.load(f)
finally:
f.close()
except (IOError, ValueError):
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'w')
try:
json.dump(self._times, f, indent=2)
finally:
f.close()
except (IOError, TypeError):
try:
os.remove(self._path)
except OSError:
pass
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
if str(intent.getScheme()).lower() in ('bitcoin', 'lightning'):
self._process_invoice_str(data)
_invoice_intent_queued = None # type: Optional[str]
def _process_invoice_str(self, invoice: str) -> None:
if not self.wallet:
self._invoice_intent_queued = invoice
return
if not self.send_screen:
self.switch_to('send')
self._invoice_intent_queued = invoice
return
if invoice.lower().startswith('bitcoin:'):
self.set_URI(invoice)
elif invoice.lower().startswith('lightning:'):
self.set_ln_invoice(invoice)
def _maybe_process_queued_invoice(self, *dt):
if not self.wallet:
return
invoice_queued = self._invoice_intent_queued
if invoice_queued:
self._invoice_intent_queued = None
self._process_invoice_str(invoice_queued)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
self._periodic_process_queued_invoice = Clock.schedule_interval(self._maybe_process_queued_invoice, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
else:
def launch_wizard():
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_open_wallet(self, pw, storage):
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
try:
storage.decrypt(pw)
except StorageReadWriteError:
app.show_error(_("R/W error accessing path"))
return
self.password = pw
self._on_decrypted_storage(storage)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name)
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=None,
on_failure=self.stop)
d.open()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def change_pin_code(self, cb):
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
test_partition.py | import threading
import time
from multiprocessing import Pool, Process
import pytest
from utils import utils as ut
from common.constants import default_entities, default_fields
from common.common_type import CaseLabel
from utils.util_log import test_log as log
TIMEOUT = 120
default_nb = ut.default_nb
default_tag = ut.default_tag
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_a(self, connect, collection):
"""
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
"""
connect.create_partition(collection, default_tag)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="skip temporarily for debug")
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
"""
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
"""
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(ut.max_partition_num // threads_num):
tag_tmp = ut.gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = ut.get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = ut.gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_repeat(self, connect, collection):
"""
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
"""
connect.create_partition(collection, default_tag)
try:
connect.create_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "CreatePartition failed: partition name = %s already exists" % default_tag
assert ut.compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_collection_not_existed(self, connect):
"""
target: verify the response when creating a partition with a non_existing collection
method: create a partition with a non_existing collection
expected: raise an exception
"""
collection_name = ut.gen_unique_str()
try:
connect.create_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "CreatePartition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_name_name_none(self, connect, collection):
"""
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
"""
tag_name = None
try:
connect.create_partition(collection, tag_name)
except Exception as e:
assert e.args[0] == "`partition_name` value None is illegal"
@pytest.mark.tags(CaseLabel.L0)
def test_create_different_partition_names(self, connect, collection):
"""
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
"""
connect.create_partition(collection, default_tag)
tag_name = ut.gen_unique_str()
connect.create_partition(collection, tag_name)
assert ut.compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_insert_default(self, connect, id_collection):
"""
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag(self, connect, id_collection):
"""
target: test create partition, and insert vectors to specific partition
method: 1. create_partition
2. insert data with partition name specified
expected: insert data successfully
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
"""
target: try to insert data into a non existing partition
method: 1. create a partition in a collection
2. try to insert data into non existing partition
expected: raise an exception
"""
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
try:
connect.insert(collection, default_entities, partition_name=tag_new)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % tag_new
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_same_tags(self, connect, id_collection):
"""
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
ids = [(i+default_nb) for i in range(default_nb)]
new_result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([id_collection])
res = connect.get_collection_stats(id_collection)
assert res["row_count"] == default_nb * 2
@pytest.mark.tags(CaseLabel.L1)
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
"""
target: test create two partitions, and insert vectors with the same tag to
each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
"""
connect.create_partition(collection, default_tag)
collection_new = ut.gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
new_result = connect.insert(collection_new, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([collection, collection_new])
res = connect.get_collection_stats(collection)
assert res["row_count"] == default_nb
res = connect.get_collection_stats(collection_new)
assert res["row_count"] == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions(self, connect, collection):
"""
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
"""
connect.create_partition(collection, default_tag)
assert ut.compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions_no_partition(self, connect, collection):
"""
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
"""
res = connect.list_partitions(collection)
assert ut.compare_list_elements(res, ['_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_show_multi_partitions(self, connect, collection):
"""
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
"""
tag_new = ut.gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert ut.compare_list_elements(res, [default_tag, tag_new, '_default'])
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_a(self, connect, collection):
"""
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
"""
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
log.info(res)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_multi_partitions(self, connect, collection):
"""
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
"""
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_name_not_existed(self, connect, collection):
"""
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
"""
res = connect.has_partition(collection, default_tag)
log.info(res)
assert not res
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_collection_not_existed(self, connect, collection):
"""
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
"""
collection_name = "not_existed_collection"
try:
connect.has_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "HasPartition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
"""
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
"""
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_a(self, connect, collection):
"""
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
"""
connect.create_partition(collection, default_tag)
res1 = connect.list_partitions(collection)
assert default_tag in res1
connect.drop_partition(collection, default_tag)
res2 = connect.list_partitions(collection)
assert default_tag not in res2
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_name_not_existed(self, connect, collection):
"""
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
"""
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
try:
connect.drop_partition(collection, new_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % new_tag
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_name_not_existed_A(self, connect, collection):
"""
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
"""
connect.create_partition(collection, default_tag)
new_collection = ut.gen_unique_str()
try:
connect.drop_partition(new_collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: can't find collection: %s" % new_collection
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_repeatedly(self, connect, collection):
"""
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
"""
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
try:
connect.drop_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % default_tag
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_create(self, connect, collection):
"""
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
"""
connect.create_partition(collection, default_tag)
assert ut.compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
connect.drop_partition(collection, default_tag)
assert ut.compare_list_elements(connect.list_partitions(collection), ['_default'])
time.sleep(2)
connect.create_partition(collection, default_tag)
assert ut.compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
"""
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
"""
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
"""
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
"""
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
"""
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
"""
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.list_partitions(collection_name)
class TestNewCase(object):
@pytest.mark.tags(CaseLabel.L2)
def test_drop_default_partition(self, connect, collection):
"""
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
"""
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_after_manual_create(self, connect, collection):
"""
target: test drop partition of default, check status returned
method: call function drop_partition after manual create the default partition
expected: status not ok
"""
connect.create_partition(collection, default_tag)
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
|
worker.py | import attr
from threading import Thread, Event
from time import time
from ....config import deferred_config
from ....backend_interface.task.development.stop_signal import TaskStopSignal
from ....backend_api.services import tasks
class DevWorker(object):
prefix = attr.ib(type=str, default="MANUAL:")
report_stdout = deferred_config('development.worker.log_stdout', True)
report_period = deferred_config(
'development.worker.report_period_sec', 30.,
transform=lambda x: float(max(x, 1.0)))
ping_period = deferred_config(
'development.worker.ping_period_sec', 30.,
transform=lambda x: float(max(x, 1.0)))
def __init__(self):
self._dev_stop_signal = None
self._thread = None
self._exit_event = Event()
self._task = None
self._support_ping = False
def ping(self, timestamp=None):
try:
if self._task:
self._task.send(tasks.PingRequest(self._task.id))
except Exception:
return False
return True
def register(self, task, stop_signal_support=None):
if self._thread:
return True
if (stop_signal_support is None and TaskStopSignal.enabled) or stop_signal_support is True:
self._dev_stop_signal = TaskStopSignal(task=task)
self._support_ping = hasattr(tasks, 'PingRequest')
# if there is nothing to monitor, leave
if not self._support_ping and not self._dev_stop_signal:
return
self._task = task
self._exit_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
return True
def _daemon(self):
last_ping = time()
while self._task is not None:
try:
if self._exit_event.wait(min(float(self.ping_period), float(self.report_period))):
return
# send ping request
if self._support_ping and (time() - last_ping) >= float(self.ping_period):
self.ping()
last_ping = time()
if self._dev_stop_signal:
stop_reason = self._dev_stop_signal.test()
if stop_reason and self._task:
self._task._dev_mode_stop_task(stop_reason)
except Exception:
pass
def unregister(self):
self._dev_stop_signal = None
self._task = None
self._thread = None
self._exit_event.set()
return True
|
hdl_listener.py | import asyncio
import threading
import logging
import hdl_component
logger = logging.getLogger(__name__)
class HDLListener(asyncio.DatagramProtocol):
def __init__(self, host, port, components_ctl):
self.transport = None
self.host = host
self.port = port
self.components_ctl = components_ctl
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
try:
self.components_ctl.update(data)
except hdl_component.HDLValidationError as e:
logger.error(f"Update error: {e}")
except Exception as e:
logger.error(f"Unhandled exception: {e}")
def worker(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.debug("Starting UDP server")
def protocol_factory():
return self
listen = loop.create_datagram_endpoint(protocol_factory, local_addr=(self.host, self.port))
transport, protocol = loop.run_until_complete(listen)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
def run(self):
thread = threading.Thread(target=self.worker)
thread.start()
return thread
|
http.py | import socket
import requests
from lxml.html import fromstring
import datetime
import sys
import ipaddress
import threading
import os
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[1;94m', '\033[1;91m', '\33[1;97m', '\33[1;93m', '\033[1;35m', '\033[1;32m', '\033[0m'
class ThreadManager(object):
i = 0
def __init__(self, ipList):
self.allIps = ipList
self.size = len(ipList)
def getNextIp(self):
if not (self.i >= self.size - 1):
ip = self.allIps[self.i]
self.i += 1
return ip
return 0
def getID(self):
return self.i + 1
def coreOptions():
options = [["network", "IP range to scan", ""], ["port-timeout", "Timeout (in sec) for port 80.", "0.3"],
["title-timeout", "Timeout (in sec) for title resolve.", "3"], ["threads", "Number of threads to run.", "50"],
["verbose", "Show verbose output.", "true"]]
return options
def createIPList(network):
net4 = ipaddress.ip_network(network)
ipList = []
for x in net4.hosts():
ipList.append(x)
return ipList
def print1(data):
if verbose:
print(data)
def checkServer(address, port):
s = socket.socket()
s.settimeout(float(portTimeout))
try:
s.connect((address, port))
s.close()
return True
except socket.error:
s.close()
return False
except:
s.close()
return "FAIL"
def getHTTP(address, port):
code = None
title = None
try:
r = requests.get("http://" + address, timeout=float(titleTimeout), allow_redirects=True)
except:
return False
try:
code = r.status_code
except:
pass
try:
tree = fromstring(r.content)
title = tree.findtext('.//title')
except:
pass
return [title, code]
def writeToFile(line):
file = open(fileName, "a")
file.write(line)
file.close()
def restart_line():
sys.stdout.write('\r')
sys.stdout.flush()
def statusWidget():
sys.stdout.write(GREEN + "[" + status + "] " + YELLOW + str(threadManager.getID()) + GREEN + " / " + YELLOW + str(
allIPs) + GREEN + " hosts done." + END)
restart_line()
sys.stdout.flush()
def scan(i):
global status
global openPorts
global done
while True:
if stop:
sys.exit()
ip = threadManager.getNextIp()
if ip == 0:
break
status = (threadManager.getID() / allIPs) * 100
status = format(round(status, 2))
status = str(status) + "%"
stringIP = str(ip)
isUp = checkServer(stringIP, port)
if isUp != "FAIL":
if isUp:
openPorts = openPorts + 1
print1(GREEN + "[+] Port 80 is open on '" + stringIP + "'" + END)
http = getHTTP(stringIP, 80)
if not http:
print1(YELLOW + "[!] Failed to get the HTTP response of '" + stringIP + "'" + END)
title = "NO-TITLE"
code = "NO-CODE"
else:
title = str(http[0])
code = str(http[1])
if code is not None:
print1(GREEN + "[+] Response code of '" + stringIP + "': '" + code + "'" + END)
else:
print1(YELLOW + "[!] Failed to get the response code of '" + stringIP + "'" + YELLOW)
code = "NO-CODE"
if title is not None:
title = title.replace("\n", "")
try:
print1(GREEN + "[+] Title of '" + stringIP + "': '" + title + "'" + END)
except:
print1(YELLOW + "[!] Failed to print title of '" + stringIP + "'" + END)
title = "NO-TITLE"
else:
print1(YELLOW + "[!] Failed to get title of '" + stringIP + "'" + YELLOW)
title = "NO-TITLE"
logLine = stringIP + " - " + "80 OPEN" + " - " + code + " - " + title + "\n"
logLines.append(logLine)
elif not isUp:
print1(RED + "[-] Port 80 is closed on '" + stringIP + "'" + END)
else:
print1(RED + "[!] Failed connecting to '" + stringIP + "'" + END)
done = done + 1
def core(moduleOptions):
print(
"\n" + GREEN + "HTTP module by @xdavidhu. Scanning subnet '" + YELLOW + moduleOptions[0][2] + GREEN + "'...\n")
global status
global fileName
global allIPs
global portTimeout
global titleTimeout
global ips
global threadCount
global done
global verbose
global stop
global port
global openPorts
global logLines
global threadManager
logLines = []
stop = False
done = 0
portTimeout = moduleOptions[1][2]
titleTimeout = moduleOptions[2][2]
network = moduleOptions[0][2]
threadCount = int(moduleOptions[3][2])
verbose = moduleOptions[4][2]
if verbose == "true":
verbose = True
else:
verbose = False
try:
ipList = createIPList(network)
allIPs = len(ipList)
if allIPs == 0:
raise Exception
except:
print(RED + "[!] Invalid subnet. Exiting...\n")
return
threadManager = ThreadManager(ipList)
i = datetime.datetime.now()
i = str(i).replace(" ", "_")
i = str(i).replace(":", "-")
if not os.path.exists("logs"):
os.makedirs("logs")
fileName = "logs/log-http-portSpider-" + i + ".log"
file = open(fileName, 'w')
file.write("subnet: " + network + "\n")
file.close()
port = 80
openPorts = 0
threads = []
for i in range(threadCount):
i -= 1
t = threading.Thread(target=scan, args=(i,))
t.daemon = True
threads.append(t)
t.start()
try:
while True:
if done == threadCount and threadManager.getID() == allIPs:
break
statusWidget()
except KeyboardInterrupt:
stop = True
verbose = False
print("\n" + RED + "[I] Stopping..." + END)
stop = True
verbose = False
for logLine in logLines:
try:
writeToFile(logLine)
except:
writeToFile("WRITING-ERROR")
print("\n\n" + GREEN + "[I] HTTP module done. Results saved to '" + YELLOW + fileName + GREEN + "'.\n")
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
lisp_nat_state_info = { }
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
lisp_last_map_request_sent = None
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
lisp_policies = { }
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
lisp_load_split_pings = False
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
if 87 - 87: i11iIiiIii
lisp_eid_hashes = [ ]
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
lisp_reassembly_queue = { }
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
lisp_pubsub_cache = { }
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
lisp_decent_push_configured = False
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
lisp_ipc_socket = None
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
lisp_ms_encryption_keys = { }
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
lisp_rtr_nat_trace_cache = { }
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
lisp_glean_mappings = [ ]
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" , "auth-failure" ]
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
if 52 - 52: OOooOOo
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = 5
if 19 - 19: I1IiiI
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
if 25 - 25: Ii1I / ooOoO0o
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 95 - 95: ooOoO0o / ooOoO0o
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 41 - 41: i1IIi - I11i - Ii1I
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
def lisp_record_traceback ( * args ) :
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
ooo = open ( "./logs/lisp-traceback.log" , "a" )
ooo . write ( "---------- Exception occurred: {} ----------\n" . format ( OOOO0O00o ) )
try :
traceback . print_last ( file = ooo )
except :
ooo . write ( "traceback.print_last(file=fd) failed" )
if 19 - 19: OoO0O00 - Oo0Ooo . oO0o / oO0o % ooOoO0o
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 56 - 56: I1IiiI . O0 + Oo0Ooo
ooo . close ( )
return
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
def lisp_is_raspbian ( ) :
if ( platform . dist ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 5 - 5: Ii1I
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
def lisp_is_ubuntu ( ) :
return ( platform . dist ( ) [ 0 ] == "Ubuntu" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
if 27 - 27: O0
def lisp_is_fedora ( ) :
return ( platform . dist ( ) [ 0 ] == "fedora" )
if 79 - 79: o0oOOo0O0Ooo - I11i + o0oOOo0O0Ooo . oO0o
if 28 - 28: i1IIi - iII111i
if 54 - 54: iII111i - O0 % OOooOOo
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
def lisp_is_centos ( ) :
return ( platform . dist ( ) [ 0 ] == "centos" )
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if 73 - 73: i11iIiiIii - IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
def lisp_is_debian ( ) :
return ( platform . dist ( ) [ 0 ] == "debian" )
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
def lisp_is_debian_kali ( ) :
return ( platform . dist ( ) [ 0 ] == "Kali" )
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
def lisp_is_x86 ( ) :
I1Iiiiiii = platform . machine ( )
return ( I1Iiiiiii in ( "x86" , "i686" , "x86_64" ) )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
def lisp_process_logfile ( ) :
oOooO0 = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( oOooO0 ) ) : return
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
sys . stdout . close ( )
sys . stdout = open ( oOooO0 , "a" )
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 13 - 13: Oo0Ooo
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
lisp_hostname = socket . gethostname ( )
iI11I = lisp_hostname . find ( "." )
if ( iI11I != - 1 ) : lisp_hostname = lisp_hostname [ 0 : iI11I ]
return
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
def lprint ( * args ) :
if ( lisp_debug_logging == False ) : return
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
lisp_process_logfile ( )
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
OOOO0O00o = OOOO0O00o [ : - 3 ]
print "{}: {}:" . format ( OOOO0O00o , lisp_log_id ) ,
for OOoOoo0 in args : print OOoOoo0 ,
print ""
try : sys . stdout . flush ( )
except : pass
return
if 17 - 17: Ii1I + oO0o . OoO0O00 - Oo0Ooo * i11iIiiIii
if 20 - 20: I1IiiI . OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
def debug ( * args ) :
lisp_process_logfile ( )
if 19 - 19: II111iiii * IiII + Ii1I
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
OOOO0O00o = OOOO0O00o [ : - 3 ]
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
print red ( ">>>" , False ) ,
print "{}:" . format ( OOOO0O00o ) ,
for OOoOoo0 in args : print OOoOoo0 ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
if 67 - 67: I11i - OOooOOo . i1IIi
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
if 87 - 87: OoOoOO00
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
OO0oo = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , OO0oo ) )
return
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
if 78 - 78: iIii1I11I1II1 + I11i - Ii1I * I1Ii111 - OoooooooOO % OoOoOO00
if 34 - 34: O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
if 68 - 68: oO0o - I1ii11iIi11i % O0 % I1Ii111
if 11 - 11: O0 / OoO0O00 % OOooOOo + o0oOOo0O0Ooo + iIii1I11I1II1
if 40 - 40: ooOoO0o - OOooOOo . Ii1I * Oo0Ooo % I1Ii111
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 56 - 56: i11iIiiIii . o0oOOo0O0Ooo - I1IiiI * I11i
if 91 - 91: oO0o + OoooooooOO - i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
def convert_font ( string ) :
oooo0OOo = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
OoO00 = "[0m"
if 18 - 18: Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
for ooo0OO in oooo0OOo :
iIi1IiI = ooo0OO [ 0 ]
I11IIIiIi11 = ooo0OO [ 1 ]
I11iiIi1i1 = len ( iIi1IiI )
iI11I = string . find ( iIi1IiI )
if ( iI11I != - 1 ) : break
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
while ( iI11I != - 1 ) :
ooOo0O = string [ iI11I : : ] . find ( OoO00 )
i1I1IIIiiI = string [ iI11I + I11iiIi1i1 : iI11I + ooOo0O ]
string = string [ : iI11I ] + I11IIIiIi11 ( i1I1IIIiiI , True ) + string [ iI11I + ooOo0O + I11iiIi1i1 : : ]
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
iI11I = string . find ( iIi1IiI )
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
def lisp_space ( num ) :
Oo0O = ""
for Ii11 in range ( num ) : Oo0O += " "
return ( Oo0O )
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
if 21 - 21: oO0o / OoooooooOO
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if 69 - 69: i1IIi
if 59 - 59: II111iiii - o0oOOo0O0Ooo
def lisp_button ( string , url ) :
iIIi1I1ii = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 14 - 14: O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
if 96 - 96: iII111i
if ( url == None ) :
i1I11iIII1i1I = iIIi1I1ii + string + "</button>"
else :
oOO0oo = '<a href="{}">' . format ( url )
IiIIi1I1I11Ii = lisp_space ( 2 )
i1I11iIII1i1I = IiIIi1I1I11Ii + oOO0oo + iIIi1I1ii + string + "</button></a>" + IiIIi1I1I11Ii
if 64 - 64: OoooooooOO
return ( i1I11iIII1i1I )
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
def lisp_print_cour ( string ) :
Oo0O = '<font face="Courier New">{}</font>' . format ( string )
return ( Oo0O )
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
def lisp_print_sans ( string ) :
Oo0O = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( Oo0O )
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
if 13 - 13: Ii1I / I11i + OoOoOO00 . o0oOOo0O0Ooo % ooOoO0o
if 48 - 48: I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
if 89 - 89: iIii1I11I1II1 / I1IiiI - II111iiii / Ii1I . i11iIiiIii . Ii1I
def lisp_span ( string , hover_string ) :
Oo0O = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( Oo0O )
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
def lisp_eid_help_hover ( output ) :
iI = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 38 - 38: IiII . Ii1I
if 24 - 24: o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
I1 = lisp_span ( output , iI )
return ( I1 )
if 13 - 13: OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - Oo0Ooo / oO0o
if 8 - 8: OoOoOO00 / O0 * O0 % I1Ii111 - Oo0Ooo + I11i
if 83 - 83: O0 . I1IiiI
if 95 - 95: I11i . OoooooooOO - i1IIi - OoooooooOO - OoO0O00 % iIii1I11I1II1
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
if 41 - 41: ooOoO0o . Oo0Ooo + I1IiiI
if 100 - 100: Ii1I + OoO0O00
def lisp_geo_help_hover ( output ) :
iI = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 73 - 73: i1IIi - I1Ii111 % ooOoO0o / OoO0O00
if 40 - 40: I1ii11iIi11i * ooOoO0o - I1IiiI / IiII / i11iIiiIii
I1 = lisp_span ( output , iI )
return ( I1 )
if 83 - 83: I1ii11iIi11i / I1Ii111 - i11iIiiIii . iIii1I11I1II1 + Oo0Ooo
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
if 74 - 74: O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
def space ( num ) :
Oo0O = ""
for Ii11 in range ( num ) : Oo0O += " "
return ( Oo0O )
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
if 53 - 53: OoooooooOO + Oo0Ooo + oO0o
if 24 - 24: iII111i - IiII - iII111i * I1ii11iIi11i . OoooooooOO / IiII
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 66 - 66: Oo0Ooo
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
if 89 - 89: OoO0O00 + IiII * I1Ii111
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
def lisp_hex_string ( integer_value ) :
oOO = hex ( integer_value ) [ 2 : : ]
if ( oOO [ - 1 ] == "L" ) : oOO = oOO [ 0 : - 1 ]
return ( oOO )
if 53 - 53: o0oOOo0O0Ooo % Oo0Ooo * Oo0Ooo
if 77 - 77: OOooOOo - IiII . I11i / I1IiiI + OoO0O00 % oO0o
if 12 - 12: i1IIi
if 63 - 63: IiII + o0oOOo0O0Ooo
if 1 - 1: I1ii11iIi11i / OoO0O00 + oO0o . o0oOOo0O0Ooo / I1ii11iIi11i - iII111i
if 5 - 5: OOooOOo
if 4 - 4: iII111i % I1Ii111 / OoO0O00 . OOooOOo / OOooOOo - I1ii11iIi11i
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 79 - 79: I1ii11iIi11i + I1Ii111
if 10 - 10: Oo0Ooo + O0
if 43 - 43: iIii1I11I1II1 / II111iiii % o0oOOo0O0Ooo - OOooOOo
if 62 - 62: I11i
if 63 - 63: OOooOOo + ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
i11IiIIi11I = time . time ( ) - ts
i11IiIIi11I = round ( i11IiIIi11I , 0 )
return ( str ( datetime . timedelta ( seconds = i11IiIIi11I ) ) )
if 78 - 78: IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
iIIiI1iiI = ts - time . time ( )
if ( iIIiI1iiI < 0 ) : return ( "expired" )
iIIiI1iiI = round ( iIIiI1iiI , 0 )
return ( str ( datetime . timedelta ( seconds = iIIiI1iiI ) ) )
if 18 - 18: iII111i - oO0o % iII111i / I11i
if 68 - 68: Ii1I * iIii1I11I1II1 + I1Ii111 % OoOoOO00
if 46 - 46: OoOoOO00 % i1IIi / oO0o * Oo0Ooo * OOooOOo
if 67 - 67: OoOoOO00 * OoOoOO00 . OoOoOO00 + Ii1I / oO0o
if 13 - 13: iII111i
if 80 - 80: Ii1I - o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo - Oo0Ooo * I1IiiI
if 82 - 82: OoO0O00 % o0oOOo0O0Ooo % OOooOOo / O0
if 94 - 94: I1ii11iIi11i + I1ii11iIi11i + OoooooooOO % ooOoO0o
if 7 - 7: iII111i
if 78 - 78: OOooOOo + iII111i . IiII
if 91 - 91: iIii1I11I1II1 . o0oOOo0O0Ooo . I1ii11iIi11i + OoooooooOO
if 69 - 69: I1Ii111 - I1IiiI
def lisp_print_eid_tuple ( eid , group ) :
oOoo0OooOOo00 = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( oOoo0OooOOo00 )
if 36 - 36: i1IIi * Oo0Ooo % Oo0Ooo / o0oOOo0O0Ooo + OoOoOO00 - OoooooooOO
Ii11iii1II1i = group . print_prefix ( )
o0OOoOO = group . instance_id
if 46 - 46: oO0o / iII111i - i1IIi
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
iI11I = Ii11iii1II1i . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( o0OOoOO , Ii11iii1II1i [ iI11I : : ] ) )
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
o0 = eid . print_sg ( group )
return ( o0 )
if 9 - 9: ooOoO0o % oO0o . Ii1I
if 32 - 32: I1IiiI
if 78 - 78: OoOoOO00 - OoO0O00 % ooOoO0o
if 80 - 80: I1Ii111 . I11i
if 73 - 73: OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
o0o0O00 = addr_str . split ( ":" )
return ( o0o0O00 [ - 1 ] )
if 35 - 35: iIii1I11I1II1
if 94 - 94: OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
def lisp_convert_4to6 ( addr_str ) :
o0o0O00 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( o0o0O00 . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
o0o0O00 . store_address ( addr_str )
return ( o0o0O00 )
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
def lisp_gethostbyname ( string ) :
IIIiI1ii1IIi = string . split ( "." )
o0O0oo0o = string . split ( ":" )
II11iI1iiI = string . split ( "-" )
if 48 - 48: I11i . OoooooooOO . I1IiiI . OoOoOO00 % I1ii11iIi11i / iII111i
if ( len ( IIIiI1ii1IIi ) > 1 ) :
if ( IIIiI1ii1IIi [ 0 ] . isdigit ( ) ) : return ( string )
if 11 - 11: i1IIi % OoO0O00 % iII111i
if ( len ( o0O0oo0o ) > 1 ) :
try :
int ( o0O0oo0o [ 0 ] , 16 )
return ( string )
except :
pass
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if ( len ( II11iI1iiI ) == 3 ) :
for Ii11 in range ( 3 ) :
try : int ( II11iI1iiI [ Ii11 ] , 16 )
except : break
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
try :
o0o0O00 = socket . gethostbyname ( string )
return ( o0o0O00 )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
try :
o0o0O00 = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( o0o0O00 [ 3 ] != string ) : return ( "" )
o0o0O00 = o0o0O00 [ 4 ] [ 0 ]
except :
o0o0O00 = ""
if 21 - 21: OOooOOo
return ( o0o0O00 )
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
def lisp_ip_checksum ( data ) :
if ( len ( data ) < 20 ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
i1I1i1i = binascii . hexlify ( data )
if 36 - 36: II111iiii % O0
if 35 - 35: iIii1I11I1II1 - OOooOOo % o0oOOo0O0Ooo
if 30 - 30: I1Ii111 % I1Ii111 % IiII . OoOoOO00
if 9 - 9: ooOoO0o / II111iiii . OoOoOO00 % o0oOOo0O0Ooo * II111iiii - ooOoO0o
oOOoo0 = 0
for Ii11 in range ( 0 , 40 , 4 ) :
oOOoo0 += int ( i1I1i1i [ Ii11 : Ii11 + 4 ] , 16 )
if 24 - 24: OoO0O00 - oO0o + I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
if 46 - 46: I1Ii111
if 72 - 72: iII111i * OOooOOo
oOOoo0 = ( oOOoo0 >> 16 ) + ( oOOoo0 & 0xffff )
oOOoo0 += oOOoo0 >> 16
oOOoo0 = socket . htons ( ~ oOOoo0 & 0xffff )
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
oOOoo0 = struct . pack ( "H" , oOOoo0 )
i1I1i1i = data [ 0 : 10 ] + oOOoo0 + data [ 12 : : ]
return ( i1I1i1i )
if 50 - 50: OoOoOO00
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if 37 - 37: IiII
if 37 - 37: Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
def lisp_udp_checksum ( source , dest , data ) :
if 50 - 50: OoO0O00
if 66 - 66: iIii1I11I1II1
if 41 - 41: I1Ii111 . O0 * I1IiiI * I1ii11iIi11i
if 100 - 100: iII111i
IiIIi1I1I11Ii = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
oOo0OOOOOO = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
IiI = socket . htonl ( len ( data ) )
oooO0oOoo = socket . htonl ( LISP_UDP_PROTOCOL )
OoOOoO0O0oO = IiIIi1I1I11Ii . pack_address ( )
OoOOoO0O0oO += oOo0OOOOOO . pack_address ( )
OoOOoO0O0oO += struct . pack ( "II" , IiI , oooO0oOoo )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
I1iIIIiI = binascii . hexlify ( OoOOoO0O0oO + data )
Oo = len ( I1iIIIiI ) % 4
for Ii11 in range ( 0 , Oo ) : I1iIIIiI += "0"
if 34 - 34: I1IiiI
if 47 - 47: I1Ii111 - OOooOOo / ooOoO0o - Oo0Ooo + iII111i - iIii1I11I1II1
if 68 - 68: Ii1I - oO0o + Oo0Ooo
if 44 - 44: Ii1I * o0oOOo0O0Ooo * II111iiii
oOOoo0 = 0
for Ii11 in range ( 0 , len ( I1iIIIiI ) , 4 ) :
oOOoo0 += int ( I1iIIIiI [ Ii11 : Ii11 + 4 ] , 16 )
if 5 - 5: i1IIi + O0 % O0 * O0 + OoOoOO00 % i1IIi
if 80 - 80: iII111i / o0oOOo0O0Ooo + OoO0O00 / oO0o
if 46 - 46: i11iIiiIii / IiII % i1IIi - I11i * OoOoOO00
if 94 - 94: Ii1I - I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo
if 15 - 15: OOooOOo
oOOoo0 = ( oOOoo0 >> 16 ) + ( oOOoo0 & 0xffff )
oOOoo0 += oOOoo0 >> 16
oOOoo0 = socket . htons ( ~ oOOoo0 & 0xffff )
if 31 - 31: iII111i / i1IIi . OoO0O00
if 83 - 83: oO0o / iIii1I11I1II1 + i1IIi / iII111i
if 47 - 47: oO0o + OoooooooOO . II111iiii . iII111i
if 66 - 66: ooOoO0o * OoOoOO00
oOOoo0 = struct . pack ( "H" , oOOoo0 )
I1iIIIiI = data [ 0 : 6 ] + oOOoo0 + data [ 8 : : ]
return ( I1iIIIiI )
if 2 - 2: oO0o . I1Ii111 * Oo0Ooo + O0 - I11i * iIii1I11I1II1
if 12 - 12: o0oOOo0O0Ooo * I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
def lisp_get_interface_address ( device ) :
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
iI1 = netifaces . ifaddresses ( device )
if ( iI1 . has_key ( netifaces . AF_INET ) == False ) : return ( None )
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
O00ooooo00 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 94 - 94: I11i - II111iiii . I1IiiI - Oo0Ooo + I1ii11iIi11i * I1ii11iIi11i
for o0o0O00 in iI1 [ netifaces . AF_INET ] :
I1iiIiiii1111 = o0o0O00 [ "addr" ]
O00ooooo00 . store_address ( I1iiIiiii1111 )
return ( O00ooooo00 )
if 29 - 29: Ii1I - I1IiiI / I1IiiI * Ii1I * IiII . OOooOOo
return ( None )
if 80 - 80: iIii1I11I1II1
if 23 - 23: II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
def lisp_get_input_interface ( packet ) :
IiII1i1iI = lisp_format_packet ( packet [ 0 : 12 ] ) . replace ( " " , "" )
O0OOO00 = IiII1i1iI [ 0 : 12 ]
ooOOo0o = IiII1i1iI [ 12 : : ]
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
try : OoooooOo = lisp_mymacs . has_key ( ooOOo0o )
except : OoooooOo = False
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if ( lisp_mymacs . has_key ( O0OOO00 ) ) : return ( lisp_mymacs [ O0OOO00 ] , ooOOo0o , O0OOO00 , OoooooOo )
if ( OoooooOo ) : return ( lisp_mymacs [ ooOOo0o ] , ooOOo0o , O0OOO00 , OoooooOo )
return ( [ "?" ] , ooOOo0o , O0OOO00 , OoooooOo )
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
def lisp_get_local_interfaces ( ) :
for O0OoO0o in netifaces . interfaces ( ) :
I111IIiIII = lisp_interface ( O0OoO0o )
I111IIiIII . add_interface ( )
if 62 - 62: OoOoOO00 % o0oOOo0O0Ooo % I1IiiI + IiII . OoO0O00
return
if 48 - 48: I1IiiI * i11iIiiIii % II111iiii
if 20 - 20: i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
def lisp_get_loopback_address ( ) :
for o0o0O00 in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( o0o0O00 [ "peer" ] == "127.0.0.1" ) : continue
return ( o0o0O00 [ "peer" ] )
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
return ( None )
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
def lisp_is_mac_string ( mac_str ) :
II11iI1iiI = mac_str . split ( "/" )
if ( len ( II11iI1iiI ) == 2 ) : mac_str = II11iI1iiI [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
def lisp_get_local_macs ( ) :
for O0OoO0o in netifaces . interfaces ( ) :
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
oOo0OOOOOO = O0OoO0o . replace ( ":" , "" )
oOo0OOOOOO = O0OoO0o . replace ( "-" , "" )
if ( oOo0OOOOOO . isalnum ( ) == False ) : continue
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
try :
III11I1 = netifaces . ifaddresses ( O0OoO0o )
except :
continue
if 61 - 61: OoOoOO00 - OoO0O00 + I1IiiI * OOooOOo % OoO0O00
if ( III11I1 . has_key ( netifaces . AF_LINK ) == False ) : continue
II11iI1iiI = III11I1 [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
II11iI1iiI = II11iI1iiI . replace ( ":" , "" )
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if ( len ( II11iI1iiI ) < 12 ) : continue
if 32 - 32: O0 . OoooooooOO
if ( lisp_mymacs . has_key ( II11iI1iiI ) == False ) : lisp_mymacs [ II11iI1iiI ] = [ ]
lisp_mymacs [ II11iI1iiI ] . append ( O0OoO0o )
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
def lisp_get_local_rloc ( ) :
IiI11I111 = commands . getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( IiI11I111 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
IiI11I111 = IiI11I111 . split ( "\n" ) [ 0 ]
O0OoO0o = IiI11I111 . split ( ) [ - 1 ]
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
o0o0O00 = ""
OooooOoO = lisp_is_macos ( )
if ( OooooOoO ) :
IiI11I111 = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( O0OoO0o ) )
if ( IiI11I111 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
o00OoOO0O0 = 'ip addr show | egrep "inet " | egrep "{}"' . format ( O0OoO0o )
IiI11I111 = commands . getoutput ( o00OoOO0O0 )
if ( IiI11I111 == "" ) :
o00OoOO0O0 = 'ip addr show | egrep "inet " | egrep "global lo"'
IiI11I111 = commands . getoutput ( o00OoOO0O0 )
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
if ( IiI11I111 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 45 - 45: OoooooooOO
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
o0o0O00 = ""
IiI11I111 = IiI11I111 . split ( "\n" )
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
for oooOo in IiI11I111 :
oOO0oo = oooOo . split ( ) [ 1 ]
if ( OooooOoO == False ) : oOO0oo = oOO0oo . split ( "/" ) [ 0 ]
oOoO0Oo0 = lisp_address ( LISP_AFI_IPV4 , oOO0oo , 32 , 0 )
return ( oOoO0Oo0 )
if 7 - 7: ooOoO0o + Ii1I
return ( lisp_address ( LISP_AFI_IPV4 , o0o0O00 , 32 , 0 ) )
if 32 - 32: iIii1I11I1II1 % I1IiiI / i11iIiiIii + OOooOOo - o0oOOo0O0Ooo . iII111i
if 86 - 86: i1IIi / Ii1I * I1IiiI
if 67 - 67: I1ii11iIi11i * I1ii11iIi11i / oO0o * OoooooooOO + OoOoOO00
if 79 - 79: i1IIi
if 1 - 1: oO0o / i1IIi
if 74 - 74: I11i / OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
if 98 - 98: oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
oo0o00OO = None
iI11I = 1
oOoo00o0oOO = os . getenv ( "LISP_ADDR_SELECT" )
if ( oOoo00o0oOO != None and oOoo00o0oOO != "" ) :
oOoo00o0oOO = oOoo00o0oOO . split ( ":" )
if ( len ( oOoo00o0oOO ) == 2 ) :
oo0o00OO = oOoo00o0oOO [ 0 ]
iI11I = oOoo00o0oOO [ 1 ]
else :
if ( oOoo00o0oOO [ 0 ] . isdigit ( ) ) :
iI11I = oOoo00o0oOO [ 0 ]
else :
oo0o00OO = oOoo00o0oOO [ 0 ]
if 61 - 61: i1IIi * o0oOOo0O0Ooo + iIii1I11I1II1 / OoOoOO00 - O0 * iIii1I11I1II1
if 56 - 56: OOooOOo
iI11I = 1 if ( iI11I == "" ) else int ( iI11I )
if 49 - 49: ooOoO0o . II111iiii
if 24 - 24: O0 . OoooooooOO - OoO0O00 * OoooooooOO
Ii11iiI = [ None , None , None ]
o0OO0oooo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
I11II1i1 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
IiI1ii11I1 = None
if 19 - 19: I1Ii111 + IiII / oO0o / II111iiii
for O0OoO0o in netifaces . interfaces ( ) :
if ( oo0o00OO != None and oo0o00OO != O0OoO0o ) : continue
iI1 = netifaces . ifaddresses ( O0OoO0o )
if ( iI1 == { } ) : continue
if 92 - 92: i1IIi % ooOoO0o + ooOoO0o - iIii1I11I1II1 . Ii1I
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
if 75 - 75: IiII % i11iIiiIii + iIii1I11I1II1
if 92 - 92: OoOoOO00 % O0
IiI1ii11I1 = lisp_get_interface_instance_id ( O0OoO0o , None )
if 55 - 55: iIii1I11I1II1 * iII111i
if 85 - 85: iIii1I11I1II1 . II111iiii
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
if 22 - 22: OOooOOo
if ( iI1 . has_key ( netifaces . AF_INET ) ) :
IIIiI1ii1IIi = iI1 [ netifaces . AF_INET ]
I1I11Iiii111 = 0
for o0o0O00 in IIIiI1ii1IIi :
o0OO0oooo . store_address ( o0o0O00 [ "addr" ] )
if ( o0OO0oooo . is_ipv4_loopback ( ) ) : continue
if ( o0OO0oooo . is_ipv4_link_local ( ) ) : continue
if ( o0OO0oooo . address == 0 ) : continue
I1I11Iiii111 += 1
o0OO0oooo . instance_id = IiI1ii11I1
if ( oo0o00OO == None and
lisp_db_for_lookups . lookup_cache ( o0OO0oooo , False ) ) : continue
Ii11iiI [ 0 ] = o0OO0oooo
if ( I1I11Iiii111 == iI11I ) : break
if 38 - 38: OoO0O00 . ooOoO0o
if 34 - 34: i1IIi % IiII
if ( iI1 . has_key ( netifaces . AF_INET6 ) ) :
o0O0oo0o = iI1 [ netifaces . AF_INET6 ]
I1I11Iiii111 = 0
for o0o0O00 in o0O0oo0o :
I1iiIiiii1111 = o0o0O00 [ "addr" ]
I11II1i1 . store_address ( I1iiIiiii1111 )
if ( I11II1i1 . is_ipv6_string_link_local ( I1iiIiiii1111 ) ) : continue
if ( I11II1i1 . is_ipv6_loopback ( ) ) : continue
I1I11Iiii111 += 1
I11II1i1 . instance_id = IiI1ii11I1
if ( oo0o00OO == None and
lisp_db_for_lookups . lookup_cache ( I11II1i1 , False ) ) : continue
Ii11iiI [ 1 ] = I11II1i1
if ( I1I11Iiii111 == iI11I ) : break
if 80 - 80: OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if 46 - 46: iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if ( Ii11iiI [ 0 ] == None ) : continue
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
Ii11iiI [ 2 ] = O0OoO0o
break
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
OooO0O0Ooo = Ii11iiI [ 0 ] . print_address_no_iid ( ) if Ii11iiI [ 0 ] else "none"
oO0O = Ii11iiI [ 1 ] . print_address_no_iid ( ) if Ii11iiI [ 1 ] else "none"
O0OoO0o = Ii11iiI [ 2 ] if Ii11iiI [ 2 ] else "none"
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
oo0o00OO = " (user selected)" if oo0o00OO != None else ""
if 64 - 64: i1IIi
OooO0O0Ooo = red ( OooO0O0Ooo , False )
oO0O = red ( oO0O , False )
O0OoO0o = bold ( O0OoO0o , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( OooO0O0Ooo , oO0O , O0OoO0o , oo0o00OO , IiI1ii11I1 ) )
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
lisp_myrlocs = Ii11iiI
return ( ( Ii11iiI [ 0 ] != None ) )
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
def lisp_get_all_addresses ( ) :
I1II1IiI1 = [ ]
for I111IIiIII in netifaces . interfaces ( ) :
try : iIIiI11iI1Ii1 = netifaces . ifaddresses ( I111IIiIII )
except : continue
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if ( iIIiI11iI1Ii1 . has_key ( netifaces . AF_INET ) ) :
for o0o0O00 in iIIiI11iI1Ii1 [ netifaces . AF_INET ] :
oOO0oo = o0o0O00 [ "addr" ]
if ( oOO0oo . find ( "127.0.0.1" ) != - 1 ) : continue
I1II1IiI1 . append ( oOO0oo )
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if ( iIIiI11iI1Ii1 . has_key ( netifaces . AF_INET6 ) ) :
for o0o0O00 in iIIiI11iI1Ii1 [ netifaces . AF_INET6 ] :
oOO0oo = o0o0O00 [ "addr" ]
if ( oOO0oo == "::1" ) : continue
if ( oOO0oo [ 0 : 5 ] == "fe80:" ) : continue
I1II1IiI1 . append ( oOO0oo )
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
return ( I1II1IiI1 )
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
if 37 - 37: iII111i
if 33 - 33: OoO0O00 - O0 - OoO0O00
def lisp_get_all_multicast_rles ( ) :
O000oooOO0Oo0 = [ ]
IiI11I111 = commands . getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( IiI11I111 == "" ) : return ( O000oooOO0Oo0 )
if 31 - 31: IiII - OoO0O00 / OOooOOo . i1IIi / Ii1I
o0o000o = IiI11I111 . split ( "\n" )
for oooOo in o0o000o :
if ( oooOo [ 0 ] == "#" ) : continue
iiiI1i1111II = oooOo . split ( "rle-address = " ) [ 1 ]
IIII11iiii = int ( iiiI1i1111II . split ( "." ) [ 0 ] )
if ( IIII11iiii >= 224 and IIII11iiii < 240 ) : O000oooOO0Oo0 . append ( iiiI1i1111II )
if 75 - 75: iIii1I11I1II1 % IiII + I1ii11iIi11i * O0 . iII111i - ooOoO0o
return ( O000oooOO0Oo0 )
if 32 - 32: Ii1I % oO0o - i1IIi
if 40 - 40: iIii1I11I1II1 + iII111i * OoOoOO00 + oO0o
if 15 - 15: I11i % I1IiiI - iIii1I11I1II1 * ooOoO0o
if 71 - 71: OoOoOO00 % Oo0Ooo % ooOoO0o
if 34 - 34: I11i / I11i % IiII . OoOoOO00 / Oo0Ooo
if 99 - 99: ooOoO0o * I1IiiI - ooOoO0o % Ii1I
if 40 - 40: OOooOOo / IiII / iIii1I11I1II1 + Ii1I
if 59 - 59: I11i * OoooooooOO + OOooOOo . iIii1I11I1II1 / i1IIi
class lisp_packet ( ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 75 - 75: I11i . OOooOOo - iIii1I11I1II1 * OoO0O00 * iII111i
if 93 - 93: ooOoO0o
def encode ( self , nonce ) :
if 18 - 18: ooOoO0o
if 66 - 66: oO0o * i11iIiiIii + OoOoOO00 / OOooOOo
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 58 - 58: I11i / OoooooooOO % oO0o + OoO0O00
if ( self . outer_source . is_null ( ) ) : return ( None )
if 58 - 58: O0
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
if 72 - 72: Ii1I . IiII * I1ii11iIi11i / I1ii11iIi11i / iII111i
if 13 - 13: i1IIi
if 17 - 17: i11iIiiIii * o0oOOo0O0Ooo * o0oOOo0O0Ooo + OoO0O00
if 95 - 95: I1IiiI
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: oO0o
self . lisp_header . key_id ( 0 )
i1iiI = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and i1iiI == False ) :
I1iiIiiii1111 = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 97 - 97: I1Ii111 . I11i / I1IiiI
if ( lisp_crypto_keys_by_rloc_encap . has_key ( I1iiIiiii1111 ) ) :
o00OO0o0 = lisp_crypto_keys_by_rloc_encap [ I1iiIiiii1111 ]
if ( o00OO0o0 [ 1 ] ) :
o00OO0o0 [ 1 ] . use_count += 1
i1II1IiiIi , ii111iI1i1 = self . encrypt ( o00OO0o0 [ 1 ] , I1iiIiiii1111 )
if ( ii111iI1i1 ) : self . packet = i1II1IiiIi
if 80 - 80: OoO0O00 / IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
else :
self . udp_sport = lisp_crypto_ephem_port
if 22 - 22: oO0o * iII111i
else :
self . udp_sport = LISP_DATA_PORT
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if ( self . outer_version == 4 ) :
oOO0ooi1iiIIiII1 = socket . htons ( self . udp_sport )
o0O00OooooO = socket . htons ( self . udp_dport )
else :
oOO0ooi1iiIIiII1 = self . udp_sport
o0O00OooooO = self . udp_dport
if 77 - 77: I1IiiI % ooOoO0o
if 74 - 74: OoOoOO00 / i1IIi % OoooooooOO
o0O00OooooO = socket . htons ( self . udp_dport ) if self . outer_version == 4 else self . udp_dport
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
I1iIIIiI = struct . pack ( "HHHH" , oOO0ooi1iiIIiII1 , o0O00OooooO , socket . htons ( self . udp_length ) ,
self . udp_checksum )
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
if 42 - 42: i11iIiiIii . O0
o0oo0Oo = self . lisp_header . encode ( )
if 10 - 10: I1ii11iIi11i
if 87 - 87: Oo0Ooo % Ii1I
if 53 - 53: i1IIi - IiII + iIii1I11I1II1
if 75 - 75: I1ii11iIi11i
if 92 - 92: I11i / O0 * I1IiiI - I11i
if ( self . outer_version == 4 ) :
oooOo00000 = socket . htons ( self . udp_length + 20 )
IiI1IiI1iiI1 = socket . htons ( 0x4000 )
O000o0 = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , oooOo00000 , 0xdfdf ,
IiI1IiI1iiI1 , self . outer_ttl , 17 , 0 )
O000o0 += self . outer_source . pack_address ( )
O000o0 += self . outer_dest . pack_address ( )
O000o0 = lisp_ip_checksum ( O000o0 )
elif ( self . outer_version == 6 ) :
O000o0 = ""
if 39 - 39: II111iiii + OoooooooOO / OOooOOo / Ii1I * OoOoOO00
if 71 - 71: i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
if 4 - 4: IiII
if 93 - 93: oO0o % i1IIi
if 83 - 83: I1IiiI . Oo0Ooo - I11i . o0oOOo0O0Ooo
if 73 - 73: I1IiiI - iII111i . iII111i
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
else :
return ( None )
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
if 24 - 24: I1Ii111 + OoooooooOO . IiII / OoOoOO00 / I11i
self . packet = O000o0 + I1iIIIiI + o0oo0Oo + self . packet
return ( self )
if 65 - 65: OoooooooOO
if 18 - 18: O0 - i1IIi . I1Ii111
def cipher_pad ( self , packet ) :
o00OOo00 = len ( packet )
if ( ( o00OOo00 % 16 ) != 0 ) :
oooO = ( ( o00OOo00 / 16 ) + 1 ) * 16
packet = packet . ljust ( oooO )
if 2 - 2: iIii1I11I1II1 * I1IiiI % i1IIi % I1ii11iIi11i + OoooooooOO + I1IiiI
return ( packet )
if 16 - 16: OOooOOo
if 63 - 63: iII111i
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 11 - 11: iII111i - iIii1I11I1II1
if 92 - 92: OoO0O00
if 15 - 15: IiII / IiII + iIii1I11I1II1 % OoooooooOO
if 12 - 12: ooOoO0o
if 36 - 36: I1Ii111 . IiII * OoooooooOO - o0oOOo0O0Ooo
i1II1IiiIi = self . cipher_pad ( self . packet )
O0o = key . get_iv ( )
if 82 - 82: I1Ii111 . I1Ii111 - iII111i
OOOO0O00o = lisp_get_timestamp ( )
o0II11I = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
Iii1iIiI1I1I1 = chacha . ChaCha ( key . encrypt_key , O0o ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
oOOO0OO = binascii . unhexlify ( key . encrypt_key )
try :
I11ii1iI11 = AES . new ( oOOO0OO , AES . MODE_GCM , O0o )
Iii1iIiI1I1I1 = I11ii1iI11 . encrypt
o0II11I = I11ii1iI11 . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 6 - 6: IiII * II111iiii % iIii1I11I1II1
else :
oOOO0OO = binascii . unhexlify ( key . encrypt_key )
Iii1iIiI1I1I1 = AES . new ( oOOO0OO , AES . MODE_CBC , O0o ) . encrypt
if 86 - 86: i1IIi * O0 % ooOoO0o . Oo0Ooo % ooOoO0o . Oo0Ooo
if 71 - 71: iII111i . i11iIiiIii * O0 + O0
Oo0 = Iii1iIiI1I1I1 ( i1II1IiiIi )
if 75 - 75: OoO0O00 / Ii1I + II111iiii % IiII . i11iIiiIii
if ( Oo0 == None ) : return ( [ self . packet , False ] )
OOOO0O00o = int ( str ( time . time ( ) - OOOO0O00o ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 76 - 76: iII111i . IiII % iII111i - I1Ii111
if 51 - 51: OoooooooOO + o0oOOo0O0Ooo * iIii1I11I1II1 * oO0o / i1IIi
if 19 - 19: iII111i - OoOoOO00 % oO0o / OoooooooOO % iII111i
if 65 - 65: O0 . oO0o
if 85 - 85: II111iiii
if 55 - 55: I1ii11iIi11i
if ( o0II11I != None ) : Oo0 += o0II11I ( )
if 76 - 76: oO0o - i11iIiiIii
if 27 - 27: I1ii11iIi11i - i11iIiiIii % I1Ii111 / Oo0Ooo . Oo0Ooo / OoooooooOO
if 76 - 76: I11i * OoO0O00 . iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
self . lisp_header . key_id ( key . key_id )
o0oo0Oo = self . lisp_header . encode ( )
if 79 - 79: IiII + IiII + Ii1I
iiiII1i1I = key . do_icv ( o0oo0Oo + O0o + Oo0 , O0o )
if 97 - 97: O0 . I1Ii111 / II111iiii . O0 + OoooooooOO
oo0OooO = 4 if ( key . do_poly ) else 8
if 4 - 4: IiII + iIii1I11I1II1 * iII111i + Oo0Ooo * o0oOOo0O0Ooo % II111iiii
OO0o0o0oo = bold ( "Encrypt" , False )
iIiII1 = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
i111iii1I1 = "poly" if key . do_poly else "sha256"
i111iii1I1 = bold ( i111iii1I1 , False )
ii = "ICV({}): 0x{}...{}" . format ( i111iii1I1 , iiiII1i1I [ 0 : oo0OooO ] , iiiII1i1I [ - oo0OooO : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( OO0o0o0oo , key . key_id , addr_str , ii , iIiII1 , OOOO0O00o ) )
if 47 - 47: i11iIiiIii / Oo0Ooo - Oo0Ooo * OoO0O00
if 48 - 48: IiII
iiiII1i1I = int ( iiiII1i1I , 16 )
if ( key . do_poly ) :
OOooO = byte_swap_64 ( ( iiiII1i1I >> 64 ) & LISP_8_64_MASK )
II1i1i1I1iII = byte_swap_64 ( iiiII1i1I & LISP_8_64_MASK )
iiiII1i1I = struct . pack ( "QQ" , OOooO , II1i1i1I1iII )
else :
OOooO = byte_swap_64 ( ( iiiII1i1I >> 96 ) & LISP_8_64_MASK )
II1i1i1I1iII = byte_swap_64 ( ( iiiII1i1I >> 32 ) & LISP_8_64_MASK )
I1I = socket . htonl ( iiiII1i1I & 0xffffffff )
iiiII1i1I = struct . pack ( "QQI" , OOooO , II1i1i1I1iII , I1I )
if 70 - 70: Ii1I . O0 - OOooOOo
if 62 - 62: I1Ii111 * I11i
return ( [ O0o + Oo0 + iiiII1i1I , True ] )
if 74 - 74: OoOoOO00 . iIii1I11I1II1
if 87 - 87: ooOoO0o
def decrypt ( self , packet , header_length , key , addr_str ) :
if 41 - 41: OoOoOO00 . iIii1I11I1II1 % ooOoO0o + O0
if 22 - 22: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o + I1ii11iIi11i * iII111i . i11iIiiIii
if 90 - 90: OOooOOo * OoOoOO00 - Oo0Ooo + o0oOOo0O0Ooo
if 53 - 53: OoooooooOO . OoooooooOO + o0oOOo0O0Ooo - iII111i + OOooOOo
if 44 - 44: I1Ii111 - IiII
if 100 - 100: oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if ( key . do_poly ) :
OOooO , II1i1i1I1iII = struct . unpack ( "QQ" , packet [ - 16 : : ] )
oOoOO = byte_swap_64 ( OOooO ) << 64
oOoOO |= byte_swap_64 ( II1i1i1I1iII )
oOoOO = lisp_hex_string ( oOoOO ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
oo0OooO = 4
i11 = bold ( "poly" , False )
else :
OOooO , II1i1i1I1iII , I1I = struct . unpack ( "QQI" , packet [ - 20 : : ] )
oOoOO = byte_swap_64 ( OOooO ) << 96
oOoOO |= byte_swap_64 ( II1i1i1I1iII ) << 32
oOoOO |= socket . htonl ( I1I )
oOoOO = lisp_hex_string ( oOoOO ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
oo0OooO = 8
i11 = bold ( "sha" , False )
if 42 - 42: I11i % Oo0Ooo . II111iiii / II111iiii * iII111i
o0oo0Oo = self . lisp_header . encode ( )
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
iIIi11i = 8
iIiII1 = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
iIIi11i = 12
iIiII1 = bold ( "aes-gcm" , False )
else :
iIIi11i = 16
iIiII1 = bold ( "aes-cbc" , False )
if 39 - 39: OoOoOO00 . Oo0Ooo - IiII / o0oOOo0O0Ooo / i1IIi
O0o = packet [ 0 : iIIi11i ]
if 79 - 79: OOooOOo % I1Ii111 / oO0o - iIii1I11I1II1 - OoOoOO00
if 60 - 60: II111iiii
if 90 - 90: OoOoOO00
if 37 - 37: OoOoOO00 + O0 . O0 * Oo0Ooo % I1Ii111 / iII111i
iIIi = key . do_icv ( o0oo0Oo + packet , O0o )
if 98 - 98: oO0o + OoooooooOO - I1Ii111 % i11iIiiIii / o0oOOo0O0Ooo . OoooooooOO
ooo0 = "0x{}...{}" . format ( oOoOO [ 0 : oo0OooO ] , oOoOO [ - oo0OooO : : ] )
o0OOo0O = "0x{}...{}" . format ( iIIi [ 0 : oo0OooO ] , iIIi [ - oo0OooO : : ] )
if 52 - 52: OoooooooOO / IiII % II111iiii
if ( iIIi != oOoOO ) :
self . packet_error = "ICV-error"
Ii11I1I11II = iIiII1 + "/" + i11
IIiiiI = bold ( "ICV failed ({})" . format ( Ii11I1I11II ) , False )
ii = "packet-ICV {} != computed-ICV {}" . format ( ooo0 , o0OOo0O )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( IIiiiI , red ( addr_str , False ) ,
# iII111i / ooOoO0o - i11iIiiIii + OoooooooOO
self . udp_sport , key . key_id , ii ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 33 - 33: O0 + Oo0Ooo - iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
if 92 - 92: OoO0O00 * IiII
lisp_retry_decap_keys ( addr_str , o0oo0Oo + packet , O0o , oOoOO )
return ( [ None , False ] )
if 92 - 92: oO0o
if 7 - 7: iII111i
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
packet = packet [ iIIi11i : : ]
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
if 14 - 14: IiII . IiII % ooOoO0o
OOOO0O00o = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
iII = chacha . ChaCha ( key . encrypt_key , O0o ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
oOOO0OO = binascii . unhexlify ( key . encrypt_key )
try :
iII = AES . new ( oOOO0OO , AES . MODE_GCM , O0o ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 67 - 67: I11i / II111iiii / O0 / IiII - I11i - i1IIi
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
oOOO0OO = binascii . unhexlify ( key . encrypt_key )
iII = AES . new ( oOOO0OO , AES . MODE_CBC , O0o ) . decrypt
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 98 - 98: Ii1I - II111iiii / I1IiiI . oO0o * IiII . I11i
IiIIIIi = iII ( packet )
OOOO0O00o = int ( str ( time . time ( ) - OOOO0O00o ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 51 - 51: II111iiii . oO0o . OoO0O00 % II111iiii
if 41 - 41: OoOoOO00 - OOooOOo + ooOoO0o - i1IIi
if 6 - 6: II111iiii
if 7 - 7: i1IIi
OO0o0o0oo = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
i111iii1I1 = "poly" if key . do_poly else "sha256"
i111iii1I1 = bold ( i111iii1I1 , False )
ii = "ICV({}): {}" . format ( i111iii1I1 , ooo0 )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( OO0o0o0oo , key . key_id , addr_str , ii , iIiII1 , OOOO0O00o ) )
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
if 84 - 84: iIii1I11I1II1 / I1IiiI . OoOoOO00 % I11i
if 99 - 99: Oo0Ooo + i11iIiiIii
if 36 - 36: Ii1I * I1Ii111 * iIii1I11I1II1 - I11i % i11iIiiIii
self . packet = self . packet [ 0 : header_length ]
return ( [ IiIIIIi , True ] )
if 98 - 98: iIii1I11I1II1 - i1IIi + ooOoO0o % I11i + ooOoO0o / oO0o
if 97 - 97: IiII % ooOoO0o + II111iiii - IiII % OoO0O00 + ooOoO0o
def fragment_outer ( self , outer_hdr , inner_packet ) :
iIIII11i = 1000
if 97 - 97: OoOoOO00 % ooOoO0o . oO0o
if 67 - 67: Ii1I / i11iIiiIii
if 5 - 5: O0 - I1IiiI
if 44 - 44: II111iiii . II111iiii + OOooOOo * Ii1I
if 16 - 16: II111iiii
oooOO0OO0 = [ ]
I11iiIi1i1 = 0
o00OOo00 = len ( inner_packet )
while ( I11iiIi1i1 < o00OOo00 ) :
IiI1IiI1iiI1 = inner_packet [ I11iiIi1i1 : : ]
if ( len ( IiI1IiI1iiI1 ) > iIIII11i ) : IiI1IiI1iiI1 = IiI1IiI1iiI1 [ 0 : iIIII11i ]
oooOO0OO0 . append ( IiI1IiI1iiI1 )
I11iiIi1i1 += len ( IiI1IiI1iiI1 )
if 10 - 10: I1IiiI / I1ii11iIi11i
if 68 - 68: OOooOOo - OoooooooOO
if 14 - 14: O0 / oO0o - Oo0Ooo - IiII
if 44 - 44: OoO0O00
if 32 - 32: OoOoOO00 % OoO0O00 + i11iIiiIii + ooOoO0o - Ii1I + oO0o
if 31 - 31: iIii1I11I1II1 - o0oOOo0O0Ooo
oOOo00Ooo0O = [ ]
I11iiIi1i1 = 0
for IiI1IiI1iiI1 in oooOO0OO0 :
if 34 - 34: II111iiii
if 49 - 49: I11i . OOooOOo
if 74 - 74: i1IIi
if 15 - 15: i1IIi + IiII % I1IiiI / i11iIiiIii * OoOoOO00
oO = I11iiIi1i1 if ( IiI1IiI1iiI1 == oooOO0OO0 [ - 1 ] ) else 0x2000 + I11iiIi1i1
oO = socket . htons ( oO )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , oO ) + outer_hdr [ 8 : : ]
if 13 - 13: i1IIi
if 48 - 48: O0 + OoO0O00 . iII111i * o0oOOo0O0Ooo * iII111i
if 69 - 69: OoO0O00 - OoooooooOO - OOooOOo % I11i / OoOoOO00 - II111iiii
if 67 - 67: OOooOOo + OOooOOo + OoO0O00 . i11iIiiIii + I1ii11iIi11i + i11iIiiIii
IIi11I1i1I1I = socket . htons ( len ( IiI1IiI1iiI1 ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , IIi11I1i1I1I ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
oOOo00Ooo0O . append ( outer_hdr + IiI1IiI1iiI1 )
I11iiIi1i1 += len ( IiI1IiI1iiI1 ) / 8
if 35 - 35: O0 + Oo0Ooo - I1IiiI % Ii1I % II111iiii
return ( oOOo00Ooo0O )
if 77 - 77: I1Ii111 + oO0o
if 38 - 38: I1ii11iIi11i - Ii1I * o0oOOo0O0Ooo
def fragment ( self ) :
i1II1IiiIi = self . fix_outer_header ( self . packet )
if 13 - 13: I1IiiI * oO0o
if 41 - 41: IiII
if 16 - 16: iIii1I11I1II1
if 94 - 94: ooOoO0o % I11i % i1IIi
if 90 - 90: Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
o00OOo00 = len ( i1II1IiiIi )
if ( o00OOo00 <= 1500 ) : return ( [ i1II1IiiIi ] , "Fragment-None" )
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
i1II1IiiIi = self . packet
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
if 79 - 79: i1IIi . oO0o
if ( self . inner_version != 4 ) :
i1i1i11iI11II = random . randint ( 0 , 0xffff )
II1 = i1II1IiiIi [ 0 : 4 ] + struct . pack ( "H" , i1i1i11iI11II ) + i1II1IiiIi [ 6 : 20 ]
iiI1iI = i1II1IiiIi [ 20 : : ]
oOOo00Ooo0O = self . fragment_outer ( II1 , iiI1iI )
return ( oOOo00Ooo0O , "Fragment-Outer" )
if 74 - 74: IiII - O0 / I1Ii111 * Ii1I % ooOoO0o . I1Ii111
if 60 - 60: I1ii11iIi11i . II111iiii * i11iIiiIii . o0oOOo0O0Ooo
if 66 - 66: iII111i / i11iIiiIii * O0
if 78 - 78: IiII - I11i % O0 - OOooOOo % OoO0O00
if 43 - 43: OoO0O00
OoOooO = 56 if ( self . outer_version == 6 ) else 36
II1 = i1II1IiiIi [ 0 : OoOooO ]
I1I1i11iiiiI = i1II1IiiIi [ OoOooO : OoOooO + 20 ]
iiI1iI = i1II1IiiIi [ OoOooO + 20 : : ]
if 66 - 66: oO0o / OoOoOO00
if 13 - 13: II111iiii
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
oo0o = struct . unpack ( "H" , I1I1i11iiiiI [ 6 : 8 ] ) [ 0 ]
oo0o = socket . ntohs ( oo0o )
if ( oo0o & 0x4000 ) :
o0o00O = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( o0o00O ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 46 - 46: OoOoOO00
if 4 - 4: iII111i + O0
I11iiIi1i1 = 0
o00OOo00 = len ( iiI1iI )
oOOo00Ooo0O = [ ]
while ( I11iiIi1i1 < o00OOo00 ) :
oOOo00Ooo0O . append ( iiI1iI [ I11iiIi1i1 : I11iiIi1i1 + 1400 ] )
I11iiIi1i1 += 1400
if 28 - 28: IiII + i11iIiiIii + OoooooooOO / OoO0O00
if 6 - 6: I1IiiI - i11iIiiIii
if 61 - 61: I1Ii111 * I1ii11iIi11i % I1IiiI % OoO0O00 % I11i + I11i
if 6 - 6: Oo0Ooo
if 73 - 73: I1Ii111 * I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo . I11i
oooOO0OO0 = oOOo00Ooo0O
oOOo00Ooo0O = [ ]
o0oOOO = True if oo0o & 0x2000 else False
oo0o = ( oo0o & 0x1fff ) * 8
for IiI1IiI1iiI1 in oooOO0OO0 :
if 62 - 62: Ii1I - oO0o % iIii1I11I1II1
if 57 - 57: OoooooooOO / OoOoOO00
if 44 - 44: OoOoOO00 * i1IIi * O0
if 94 - 94: I1IiiI - O0
I1iIi = oo0o / 8
if ( o0oOOO ) :
I1iIi |= 0x2000
elif ( IiI1IiI1iiI1 != oooOO0OO0 [ - 1 ] ) :
I1iIi |= 0x2000
if 62 - 62: iIii1I11I1II1
I1iIi = socket . htons ( I1iIi )
I1I1i11iiiiI = I1I1i11iiiiI [ 0 : 6 ] + struct . pack ( "H" , I1iIi ) + I1I1i11iiiiI [ 8 : : ]
if 4 - 4: I1ii11iIi11i * I11i . I11i . II111iiii / OOooOOo
if 86 - 86: oO0o % O0 + OoO0O00
if 52 - 52: Oo0Ooo / iII111i
if 42 - 42: iIii1I11I1II1 * Ii1I / OoO0O00 + OOooOOo
if 48 - 48: OoooooooOO - I1Ii111 . i11iIiiIii * iII111i - Ii1I - o0oOOo0O0Ooo
if 59 - 59: iII111i / I11i . Oo0Ooo
o00OOo00 = len ( IiI1IiI1iiI1 )
oo0o += o00OOo00
IIi11I1i1I1I = socket . htons ( o00OOo00 + 20 )
I1I1i11iiiiI = I1I1i11iiiiI [ 0 : 2 ] + struct . pack ( "H" , IIi11I1i1I1I ) + I1I1i11iiiiI [ 4 : 10 ] + struct . pack ( "H" , 0 ) + I1I1i11iiiiI [ 12 : : ]
if 100 - 100: O0
I1I1i11iiiiI = lisp_ip_checksum ( I1I1i11iiiiI )
oOOO00Oo = I1I1i11iiiiI + IiI1IiI1iiI1
if 48 - 48: II111iiii + II111iiii * i1IIi / Ii1I
if 37 - 37: iIii1I11I1II1 % I11i / IiII
if 37 - 37: I1Ii111 - oO0o - OoO0O00
if 42 - 42: iIii1I11I1II1 % Ii1I - I1ii11iIi11i + iIii1I11I1II1
if 27 - 27: O0 / OoO0O00
o00OOo00 = len ( oOOO00Oo )
if ( self . outer_version == 4 ) :
IIi11I1i1I1I = o00OOo00 + OoOooO
o00OOo00 += 16
II1 = II1 [ 0 : 2 ] + struct . pack ( "H" , IIi11I1i1I1I ) + II1 [ 4 : : ]
if 99 - 99: Ii1I - IiII * iIii1I11I1II1 . II111iiii
II1 = lisp_ip_checksum ( II1 )
oOOO00Oo = II1 + oOOO00Oo
oOOO00Oo = self . fix_outer_header ( oOOO00Oo )
if 56 - 56: iIii1I11I1II1 % OoO0O00 . ooOoO0o % IiII . I1Ii111 * Oo0Ooo
if 41 - 41: iIii1I11I1II1 % IiII * oO0o - ooOoO0o
if 5 - 5: OoO0O00 + OoO0O00 + II111iiii * iIii1I11I1II1 + OoooooooOO
if 77 - 77: O0 * I1ii11iIi11i * oO0o + OoO0O00 + I1ii11iIi11i - I1Ii111
if 10 - 10: I1ii11iIi11i + IiII
Ooooo00 = OoOooO - 12
IIi11I1i1I1I = socket . htons ( o00OOo00 )
oOOO00Oo = oOOO00Oo [ 0 : Ooooo00 ] + struct . pack ( "H" , IIi11I1i1I1I ) + oOOO00Oo [ Ooooo00 + 2 : : ]
if 99 - 99: I1ii11iIi11i - oO0o
oOOo00Ooo0O . append ( oOOO00Oo )
if 10 - 10: II111iiii . OoO0O00
return ( oOOo00Ooo0O , "Fragment-Inner" )
if 89 - 89: ooOoO0o * Ii1I
if 93 - 93: i1IIi . Ii1I * I1Ii111 . ooOoO0o
def fix_outer_header ( self , packet ) :
if 54 - 54: iII111i . i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo % iII111i
if 30 - 30: I11i
if 85 - 85: II111iiii + ooOoO0o * I11i
if 12 - 12: Ii1I . I1IiiI % o0oOOo0O0Ooo
if 28 - 28: Ii1I - I1IiiI % OoO0O00 * I1Ii111
if 80 - 80: OOooOOo * IiII
if 4 - 4: iIii1I11I1II1 . I1Ii111 + II111iiii % OoooooooOO
if 82 - 82: OoooooooOO / ooOoO0o * I11i * O0 . I1ii11iIi11i
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 21 - 21: II111iiii + Oo0Ooo
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 59 - 59: OOooOOo + I1IiiI / II111iiii / OoOoOO00
if 80 - 80: OoOoOO00 + iIii1I11I1II1 . IiII
return ( packet )
if 76 - 76: I1IiiI * OOooOOo
if 12 - 12: iIii1I11I1II1 / I11i % Ii1I
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
dest = dest . print_address_no_iid ( )
oOOo00Ooo0O , iII1i1 = self . fragment ( )
if 34 - 34: OoO0O00 / OoooooooOO - oO0o / oO0o * I1IiiI
for oOOO00Oo in oOOo00Ooo0O :
if ( len ( oOOo00Ooo0O ) != 1 ) :
self . packet = oOOO00Oo
self . print_packet ( iII1i1 , True )
if 61 - 61: I11i
if 81 - 81: I11i
try : lisp_raw_socket . sendto ( oOOO00Oo , ( dest , 0 ) )
except socket . error , ooo0OO :
lprint ( "socket.sendto() failed: {}" . format ( ooo0OO ) )
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
if 31 - 31: i1IIi % II111iiii
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
i1II1IiiIi = mac_header + self . packet
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if 34 - 34: I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
if 98 - 98: OoO0O00
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
if 53 - 53: OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
l2_socket . write ( i1II1IiiIi )
return
if 52 - 52: I1Ii111 + I1Ii111
if 73 - 73: o0oOOo0O0Ooo . i11iIiiIii % OoooooooOO + ooOoO0o . OoooooooOO / OOooOOo
def bridge_l2_packet ( self , eid , db ) :
try : oOiiI1i11I = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : I111IIiIII = lisp_myinterfaces [ oOiiI1i11I . interface ]
except : return
try :
socket = I111IIiIII . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 31 - 31: II111iiii + OOooOOo - OoooooooOO . I11i
try : socket . send ( self . packet )
except socket . error , ooo0OO :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( ooo0OO ) )
if 28 - 28: Ii1I . I1ii11iIi11i
if 77 - 77: I1ii11iIi11i % II111iiii
if 81 - 81: OoOoOO00 % Ii1I / O0 * iIii1I11I1II1 % IiII . I1IiiI
def is_lisp_packet ( self , packet ) :
I1iIIIiI = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( I1iIIIiI == False ) : return ( False )
if 90 - 90: o0oOOo0O0Ooo
IIiII = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( IIiII ) == LISP_DATA_PORT ) : return ( True )
IIiII = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( IIiII ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 39 - 39: o0oOOo0O0Ooo / IiII - iII111i
if 96 - 96: I11i * I1ii11iIi11i * Ii1I + I1ii11iIi11i % I1IiiI + i11iIiiIii
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
i1II1IiiIi = self . packet
i1iI11Ii1i = len ( i1II1IiiIi )
Ii = i1Iii = True
if 48 - 48: OOooOOo
if 26 - 26: iII111i * I1Ii111 * oO0o * OoOoOO00
if 48 - 48: iII111i % i11iIiiIii . OoooooooOO * IiII % OoO0O00 . iII111i
if 6 - 6: O0 . ooOoO0o - oO0o / i11iIiiIii
O00O0 = 0
o0OOoOO = self . lisp_header . get_instance_id ( )
if ( is_lisp_packet ) :
O00o0O = struct . unpack ( "B" , i1II1IiiIi [ 0 : 1 ] ) [ 0 ]
self . outer_version = O00o0O >> 4
if ( self . outer_version == 4 ) :
if 73 - 73: OoO0O00
if 28 - 28: OoooooooOO - I11i
if 84 - 84: II111iiii
if 36 - 36: OOooOOo - OoOoOO00 - iIii1I11I1II1
if 10 - 10: I1ii11iIi11i / Ii1I * i1IIi % O0 + I11i
I1i1ii1ii = struct . unpack ( "H" , i1II1IiiIi [ 10 : 12 ] ) [ 0 ]
i1II1IiiIi = lisp_ip_checksum ( i1II1IiiIi )
oOOoo0 = struct . unpack ( "H" , i1II1IiiIi [ 10 : 12 ] ) [ 0 ]
if ( oOOoo0 != 0 ) :
if ( I1i1ii1ii != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( i1iI11Ii1i )
if 32 - 32: IiII / OoooooooOO
if 30 - 30: OoOoOO00 / I1IiiI - OoO0O00 - iII111i - i11iIiiIii
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 84 - 84: i1IIi - I1IiiI % iII111i
if 80 - 80: o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
iioOO = LISP_AFI_IPV4
I11iiIi1i1 = 12
self . outer_tos = struct . unpack ( "B" , i1II1IiiIi [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , i1II1IiiIi [ 8 : 9 ] ) [ 0 ]
O00O0 = 20
elif ( self . outer_version == 6 ) :
iioOO = LISP_AFI_IPV6
I11iiIi1i1 = 8
I1OO = struct . unpack ( "H" , i1II1IiiIi [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( I1OO ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , i1II1IiiIi [ 7 : 8 ] ) [ 0 ]
O00O0 = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI11Ii1i )
lprint ( "Cannot decode outer header" )
return ( None )
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
self . outer_source . afi = iioOO
self . outer_dest . afi = iioOO
I11II11IiI11 = self . outer_source . addr_length ( )
if 97 - 97: ooOoO0o / iIii1I11I1II1 % ooOoO0o / I1IiiI * iII111i % OoOoOO00
self . outer_source . unpack_address ( i1II1IiiIi [ I11iiIi1i1 : I11iiIi1i1 + I11II11IiI11 ] )
I11iiIi1i1 += I11II11IiI11
self . outer_dest . unpack_address ( i1II1IiiIi [ I11iiIi1i1 : I11iiIi1i1 + I11II11IiI11 ] )
i1II1IiiIi = i1II1IiiIi [ O00O0 : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 17 - 17: iIii1I11I1II1
if 89 - 89: i1IIi . i1IIi
if 10 - 10: iII111i % Oo0Ooo
if 48 - 48: OOooOOo + I1Ii111 % OOooOOo
Ooo0o0000OO = struct . unpack ( "H" , i1II1IiiIi [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( Ooo0o0000OO )
Ooo0o0000OO = struct . unpack ( "H" , i1II1IiiIi [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( Ooo0o0000OO )
Ooo0o0000OO = struct . unpack ( "H" , i1II1IiiIi [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( Ooo0o0000OO )
Ooo0o0000OO = struct . unpack ( "H" , i1II1IiiIi [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( Ooo0o0000OO )
i1II1IiiIi = i1II1IiiIi [ 8 : : ]
if 8 - 8: I1ii11iIi11i % oO0o / Ii1I
if 37 - 37: oO0o % I1Ii111 % oO0o
if 14 - 14: OoO0O00 / I1IiiI
if 66 - 66: Oo0Ooo / i11iIiiIii % ooOoO0o
Ii = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
i1Iii = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
if 49 - 49: Ii1I + II111iiii / oO0o - OoOoOO00 % OoOoOO00 + I1IiiI
if ( self . lisp_header . decode ( i1II1IiiIi ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI11Ii1i )
if 54 - 54: ooOoO0o % Oo0Ooo - OOooOOo
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 16 - 16: I1ii11iIi11i * iII111i / I11i
i1II1IiiIi = i1II1IiiIi [ 8 : : ]
o0OOoOO = self . lisp_header . get_instance_id ( )
O00O0 += 16
if 46 - 46: II111iiii
if ( o0OOoOO == 0xffffff ) : o0OOoOO = 0
if 13 - 13: IiII + II111iiii % I1IiiI
if 30 - 30: OoooooooOO - i11iIiiIii + oO0o / Oo0Ooo - i11iIiiIii
if 74 - 74: O0 . I11i
if 64 - 64: ooOoO0o / i1IIi % iII111i
OOoOo0O0 = False
I1o0 = self . lisp_header . k_bits
if ( I1o0 ) :
I1iiIiiii1111 = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( I1iiIiiii1111 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI11Ii1i )
if 26 - 26: iII111i * iIii1I11I1II1 + II111iiii / I1IiiI
self . print_packet ( "Receive" , is_lisp_packet )
O0OO = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( O0OO , I1o0 ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 77 - 77: Ii1I % OOooOOo / oO0o
if 91 - 91: OoO0O00 / OoO0O00 . II111iiii . ooOoO0o - I1IiiI
iii11 = lisp_crypto_keys_by_rloc_decap [ I1iiIiiii1111 ] [ I1o0 ]
if ( iii11 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI11Ii1i )
if 59 - 59: Oo0Ooo / i11iIiiIii * I1IiiI + OoO0O00
self . print_packet ( "Receive" , is_lisp_packet )
O0OO = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( O0OO ,
red ( I1iiIiiii1111 , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 47 - 47: OOooOOo / II111iiii % IiII . oO0o * I1ii11iIi11i
if 35 - 35: Oo0Ooo * II111iiii
if 32 - 32: oO0o . Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
if 85 - 85: i1IIi
iii11 . use_count += 1
i1II1IiiIi , OOoOo0O0 = self . decrypt ( i1II1IiiIi , O00O0 , iii11 ,
I1iiIiiii1111 )
if ( OOoOo0O0 == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI11Ii1i )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
O00o0O = struct . unpack ( "B" , i1II1IiiIi [ 0 : 1 ] ) [ 0 ]
self . inner_version = O00o0O >> 4
if ( Ii and self . inner_version == 4 and O00o0O >= 0x45 ) :
OooOOoO00OO00 = socket . ntohs ( struct . unpack ( "H" , i1II1IiiIi [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , i1II1IiiIi [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , i1II1IiiIi [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , i1II1IiiIi [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( i1II1IiiIi [ 12 : 16 ] )
self . inner_dest . unpack_address ( i1II1IiiIi [ 16 : 20 ] )
oo0o = socket . ntohs ( struct . unpack ( "H" , i1II1IiiIi [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( oo0o & 0x2000 or oo0o != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , i1II1IiiIi [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , i1II1IiiIi [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 17 - 17: OoooooooOO * I1Ii111 * I1IiiI
elif ( Ii and self . inner_version == 6 and O00o0O >= 0x60 ) :
OooOOoO00OO00 = socket . ntohs ( struct . unpack ( "H" , i1II1IiiIi [ 4 : 6 ] ) [ 0 ] ) + 40
I1OO = struct . unpack ( "H" , i1II1IiiIi [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( I1OO ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , i1II1IiiIi [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , i1II1IiiIi [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( i1II1IiiIi [ 8 : 24 ] )
self . inner_dest . unpack_address ( i1II1IiiIi [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , i1II1IiiIi [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , i1II1IiiIi [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 30 - 30: OoOoOO00 / oO0o / Ii1I * o0oOOo0O0Ooo * oO0o . I1IiiI
elif ( i1Iii ) :
OooOOoO00OO00 = len ( i1II1IiiIi )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( i1II1IiiIi [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( i1II1IiiIi [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI11Ii1i )
if 93 - 93: OoOoOO00
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( O00o0O ) ) )
if 97 - 97: i11iIiiIii
i1II1IiiIi = lisp_format_packet ( i1II1IiiIi [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( i1II1IiiIi ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 68 - 68: IiII * OoO0O00 . I11i / Ii1I . o0oOOo0O0Ooo - i11iIiiIii
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = o0OOoOO
self . inner_dest . instance_id = o0OOoOO
if 49 - 49: Oo0Ooo / Ii1I % I11i + oO0o - OoO0O00
if 13 - 13: II111iiii
if 83 - 83: OoooooooOO . I1IiiI + Ii1I * O0 / oO0o
if 8 - 8: i1IIi + II111iiii / Ii1I + I1ii11iIi11i % Ii1I - iIii1I11I1II1
if 29 - 29: Oo0Ooo + II111iiii
if ( lisp_nonce_echoing and is_lisp_packet ) :
oOOo00ooO = lisp_get_echo_nonce ( self . outer_source , None )
if ( oOOo00ooO == None ) :
ooOo = self . outer_source . print_address_no_iid ( )
oOOo00ooO = lisp_echo_nonce ( ooOo )
if 73 - 73: OoO0O00 * OoooooooOO - OoooooooOO + I1IiiI * Oo0Ooo
oOo0 = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
oOOo00ooO . receive_request ( lisp_ipc_socket , oOo0 )
elif ( oOOo00ooO . request_nonce_sent ) :
oOOo00ooO . receive_echo ( lisp_ipc_socket , oOo0 )
if 2 - 2: I1IiiI + II111iiii / Ii1I % Oo0Ooo - I1Ii111 + I1Ii111
if 84 - 84: o0oOOo0O0Ooo % i1IIi / Oo0Ooo - I1IiiI . I1ii11iIi11i . o0oOOo0O0Ooo
if 75 - 75: O0 * i1IIi - I11i / OOooOOo % OOooOOo / OoOoOO00
if 5 - 5: O0 - iII111i / I1Ii111 . o0oOOo0O0Ooo
if 7 - 7: I1ii11iIi11i - OoOoOO00
if 54 - 54: oO0o / iIii1I11I1II1 / OoooooooOO . i1IIi - OoOoOO00
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
if ( OOoOo0O0 ) : self . packet += i1II1IiiIi [ : OooOOoO00OO00 ]
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
if 45 - 45: OoOoOO00 * ooOoO0o / OoooooooOO + OoO0O00 . I1Ii111 / OoO0O00
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 64 - 64: Ii1I / i1IIi % I1IiiI - o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i - OoooooooOO
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
if 27 - 27: IiII + iIii1I11I1II1 / Oo0Ooo + OoO0O00 % Oo0Ooo + OoO0O00
def strip_outer_headers ( self ) :
I11iiIi1i1 = 16
I11iiIi1i1 += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ I11iiIi1i1 : : ]
return ( self )
if 77 - 77: Oo0Ooo * ooOoO0o % Ii1I
if 2 - 2: I11i / Oo0Ooo / Ii1I / I1ii11iIi11i / OoooooooOO
def hash_ports ( self ) :
i1II1IiiIi = self . packet
O00o0O = self . inner_version
IiiiI1I1iI11 = 0
if ( O00o0O == 4 ) :
iIiIiI1ii = struct . unpack ( "B" , i1II1IiiIi [ 9 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( iIiIiI1ii )
if ( iIiIiI1ii in [ 6 , 17 ] ) :
IiiiI1I1iI11 = iIiIiI1ii
IiiiI1I1iI11 += struct . unpack ( "I" , i1II1IiiIi [ 20 : 24 ] ) [ 0 ]
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 16 ) ^ ( IiiiI1I1iI11 & 0xffff )
if 75 - 75: OoO0O00 % OoooooooOO
if 16 - 16: O0 / i1IIi
if ( O00o0O == 6 ) :
iIiIiI1ii = struct . unpack ( "B" , i1II1IiiIi [ 6 ] ) [ 0 ]
if ( iIiIiI1ii in [ 6 , 17 ] ) :
IiiiI1I1iI11 = iIiIiI1ii
IiiiI1I1iI11 += struct . unpack ( "I" , i1II1IiiIi [ 40 : 44 ] ) [ 0 ]
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 16 ) ^ ( IiiiI1I1iI11 & 0xffff )
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
return ( IiiiI1I1iI11 )
if 12 - 12: OoOoOO00 + o0oOOo0O0Ooo . I1Ii111
if 52 - 52: OoO0O00
def hash_packet ( self ) :
IiiiI1I1iI11 = self . inner_source . address ^ self . inner_dest . address
IiiiI1I1iI11 += self . hash_ports ( )
if ( self . inner_version == 4 ) :
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 16 ) ^ ( IiiiI1I1iI11 & 0xffff )
elif ( self . inner_version == 6 ) :
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 64 ) ^ ( IiiiI1I1iI11 & 0xffffffffffffffff )
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 32 ) ^ ( IiiiI1I1iI11 & 0xffffffff )
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 16 ) ^ ( IiiiI1I1iI11 & 0xffff )
if 4 - 4: Ii1I % I1ii11iIi11i + I11i - I1ii11iIi11i
self . udp_sport = 0xf000 | ( IiiiI1I1iI11 & 0xfff )
if 98 - 98: Ii1I - O0 * oO0o * Ii1I * Ii1I
if 44 - 44: IiII + I11i
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
oOO00OoOo = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# i1IIi . II111iiii + OOooOOo / OoOoOO00 / OOooOOo
green ( oOO00OoOo , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 77 - 77: IiII + OoooooooOO * i1IIi % OoooooooOO
if 3 - 3: Ii1I * ooOoO0o - I1IiiI / i1IIi
if ( s_or_r . find ( "Receive" ) != - 1 ) :
ii1iIi1 = "decap"
ii1iIi1 += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
ii1iIi1 = s_or_r
if ( ii1iIi1 in [ "Send" , "Replicate" ] or ii1iIi1 . find ( "Fragment" ) != - 1 ) :
ii1iIi1 = "encap"
if 44 - 44: OoO0O00 + I11i % OoO0O00 + i1IIi + iII111i + O0
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
iIi = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
oooOo = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 75 - 75: IiII / OoooooooOO . I1IiiI + I1Ii111 - II111iiii
oooOo += bold ( "control-packet" , False ) + ": {} ..."
if 33 - 33: IiII / IiII . i11iIiiIii * I1ii11iIi11i + o0oOOo0O0Ooo
dprint ( oooOo . format ( bold ( s_or_r , False ) , red ( iIi , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
oooOo = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 16 - 16: IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
if 22 - 22: I1Ii111
if ( self . lisp_header . k_bits ) :
if ( ii1iIi1 == "encap" ) : ii1iIi1 = "encrypt/encap"
if ( ii1iIi1 == "decap" ) : ii1iIi1 = "decap/decrypt"
if 23 - 23: O0
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
oOO00OoOo = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
dprint ( oooOo . format ( bold ( s_or_r , False ) , red ( iIi , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( oOO00OoOo , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( ii1iIi1 ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 80 - 80: I1IiiI
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
def get_raw_socket ( self ) :
o0OOoOO = str ( self . lisp_header . get_instance_id ( ) )
if ( o0OOoOO == "0" ) : return ( None )
if ( lisp_iid_to_interface . has_key ( o0OOoOO ) == False ) : return ( None )
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
I111IIiIII = lisp_iid_to_interface [ o0OOoOO ]
IiIIi1I1I11Ii = I111IIiIII . get_socket ( )
if ( IiIIi1I1I11Ii == None ) :
OO0o0o0oo = bold ( "SO_BINDTODEVICE" , False )
III1IIi = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( OO0o0o0oo , "drop" if III1IIi else "forward" ) )
if 37 - 37: iIii1I11I1II1 * OoOoOO00 / I1ii11iIi11i . II111iiii
if ( III1IIi ) : return ( None )
if 88 - 88: ooOoO0o + O0
if 87 - 87: I1Ii111 + OoooooooOO * i1IIi * i11iIiiIii
o0OOoOO = bold ( o0OOoOO , False )
oOo0OOOOOO = bold ( I111IIiIII . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( o0OOoOO , oOo0OOOOOO ) )
return ( IiIIi1I1I11Ii )
if 74 - 74: OoooooooOO - o0oOOo0O0Ooo * iII111i
if 37 - 37: o0oOOo0O0Ooo * Oo0Ooo
def log_flow ( self , encap ) :
global lisp_flow_log
if 11 - 11: oO0o
Oo0O0o00o00 = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or Oo0O0o00o00 ) :
o0o = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = o0o ) . start ( )
if ( Oo0O0o00o00 ) : os . system ( "rm ./log-flows" )
return
if 26 - 26: I1Ii111 / ooOoO0o - OoO0O00 . iIii1I11I1II1
if 83 - 83: ooOoO0o % Ii1I / Oo0Ooo - iII111i / O0
OOOO0O00o = datetime . datetime . now ( )
lisp_flow_log . append ( [ OOOO0O00o , encap , self . packet , self ] )
if 97 - 97: iIii1I11I1II1 * I11i
if 95 - 95: OoO0O00
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
OoiIIii1Ii1 = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 92 - 92: ooOoO0o / IiII + iIii1I11I1II1
I111ii1III1I = red ( self . outer_source . print_address_no_iid ( ) , False )
OO0o0oo = red ( self . outer_dest . print_address_no_iid ( ) , False )
o0oo0oOOOo00 = green ( self . inner_source . print_address ( ) , False )
OO0OOO = green ( self . inner_dest . print_address ( ) , False )
if 80 - 80: iIii1I11I1II1 - Oo0Ooo % I1Ii111 % Oo0Ooo + I1IiiI % Ii1I
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
OoiIIii1Ii1 += " {}:{} -> {}:{}, LISP control message type {}\n"
OoiIIii1Ii1 = OoiIIii1Ii1 . format ( I111ii1III1I , self . udp_sport , OO0o0oo , self . udp_dport ,
self . inner_version )
return ( OoiIIii1Ii1 )
if 86 - 86: I1Ii111 - oO0o % OOooOOo % i11iIiiIii
if 57 - 57: I1Ii111
if ( self . outer_dest . is_null ( ) == False ) :
OoiIIii1Ii1 += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
OoiIIii1Ii1 = OoiIIii1Ii1 . format ( I111ii1III1I , self . udp_sport , OO0o0oo , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 10 - 10: I11i % II111iiii * I1IiiI % i1IIi * i11iIiiIii + OoOoOO00
if 100 - 100: i1IIi % Ii1I
if 55 - 55: I1IiiI + iII111i
if 85 - 85: oO0o + iII111i % iII111i / I11i . I1IiiI - OoOoOO00
if 19 - 19: I11i / iII111i + IiII
if ( self . lisp_header . k_bits != 0 ) :
OoO00OO0ooO0O = "\n"
if ( self . packet_error != "" ) :
OoO00OO0ooO0O = " ({})" . format ( self . packet_error ) + OoO00OO0ooO0O
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
OoiIIii1Ii1 += ", encrypted" + OoO00OO0ooO0O
return ( OoiIIii1Ii1 )
if 9 - 9: i11iIiiIii + OOooOOo - OoOoOO00 / ooOoO0o % i1IIi / oO0o
if 22 - 22: i1IIi
if 3 - 3: OoO0O00 * I1ii11iIi11i - iII111i + I1ii11iIi11i
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
if 96 - 96: IiII
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 99 - 99: iIii1I11I1II1 - ooOoO0o
if 79 - 79: I1IiiI + oO0o % I11i % oO0o
iIiIiI1ii = packet [ 9 ] if self . inner_version == 4 else packet [ 6 ]
iIiIiI1ii = struct . unpack ( "B" , iIiIiI1ii ) [ 0 ]
if 56 - 56: I1ii11iIi11i + oO0o . OoO0O00 + OoooooooOO * I1ii11iIi11i - O0
OoiIIii1Ii1 += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
OoiIIii1Ii1 = OoiIIii1Ii1 . format ( o0oo0oOOOo00 , OO0OOO , len ( packet ) , self . inner_tos ,
self . inner_ttl , iIiIiI1ii )
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
if 99 - 99: o0oOOo0O0Ooo + OOooOOo
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
if 61 - 61: iIii1I11I1II1 + oO0o * I11i - i1IIi % oO0o
if ( iIiIiI1ii in [ 6 , 17 ] ) :
oOOo = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( oOOo ) == 4 ) :
oOOo = socket . ntohl ( struct . unpack ( "I" , oOOo ) [ 0 ] )
OoiIIii1Ii1 += ", ports {} -> {}" . format ( oOOo >> 16 , oOOo & 0xffff )
if 9 - 9: I1Ii111 - OoO0O00 + iIii1I11I1II1 % O0 + I11i + IiII
elif ( iIiIiI1ii == 1 ) :
ii1II1 = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( ii1II1 ) == 2 ) :
ii1II1 = socket . ntohs ( struct . unpack ( "H" , ii1II1 ) [ 0 ] )
OoiIIii1Ii1 += ", icmp-seq {}" . format ( ii1II1 )
if 53 - 53: oO0o
if 99 - 99: Oo0Ooo
if ( self . packet_error != "" ) :
OoiIIii1Ii1 += " ({})" . format ( self . packet_error )
if 17 - 17: i11iIiiIii - i11iIiiIii + I1ii11iIi11i * ooOoO0o * oO0o / OoooooooOO
OoiIIii1Ii1 += "\n"
return ( OoiIIii1Ii1 )
if 22 - 22: I1Ii111 * I1ii11iIi11i - IiII
if 71 - 71: iIii1I11I1II1 / i11iIiiIii % o0oOOo0O0Ooo . I1Ii111 * I1IiiI % II111iiii
def is_trace ( self ) :
oOOo = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in oOOo )
if 35 - 35: I1Ii111 - OoOoOO00
if 61 - 61: I1Ii111 * o0oOOo0O0Ooo * OoO0O00 + I1ii11iIi11i . Oo0Ooo + i1IIi
if 82 - 82: Oo0Ooo + I1Ii111
if 93 - 93: I11i * O0 * OOooOOo - o0oOOo0O0Ooo / I1ii11iIi11i
if 54 - 54: i1IIi - OoO0O00 / OoooooooOO
if 95 - 95: O0 + iIii1I11I1II1 . I1ii11iIi11i
if 61 - 61: Ii1I * Ii1I
if 70 - 70: I1Ii111 . I1ii11iIi11i / o0oOOo0O0Ooo * oO0o
if 74 - 74: I1IiiI . ooOoO0o / iII111i . IiII
if 74 - 74: Oo0Ooo / I1Ii111 % I1Ii111 . IiII
if 72 - 72: i1IIi
if 21 - 21: I1Ii111 . OOooOOo / i11iIiiIii * i1IIi
if 82 - 82: ooOoO0o * Oo0Ooo % i11iIiiIii * i1IIi . OOooOOo
if 89 - 89: IiII - i1IIi - IiII
if 74 - 74: OoO0O00 % OoO0O00
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 91 - 91: I1IiiI / II111iiii * OOooOOo
class lisp_data_header ( ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 94 - 94: II111iiii - iIii1I11I1II1 - iIii1I11I1II1
if 83 - 83: I1ii11iIi11i * iIii1I11I1II1 + OoOoOO00 * i1IIi . OoooooooOO % Ii1I
def print_header ( self , e_or_d ) :
oOoOo00oo = lisp_hex_string ( self . first_long & 0xffffff )
II11IiIIiiiii = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 66 - 66: O0 * o0oOOo0O0Ooo / I1ii11iIi11i
oooOo = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 15 - 15: OOooOOo . o0oOOo0O0Ooo + OoooooooOO - Oo0Ooo * iIii1I11I1II1 . i1IIi
return ( oooOo . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
oOoOo00oo , II11IiIIiiiii ) )
if 39 - 39: Ii1I % i1IIi . I1ii11iIi11i - O0
if 65 - 65: oO0o * oO0o / I11i + oO0o % ooOoO0o + OoOoOO00
def encode ( self ) :
oOoOo000 = "II"
oOoOo00oo = socket . htonl ( self . first_long )
II11IiIIiiiii = socket . htonl ( self . second_long )
if 37 - 37: iII111i
iIiI1I1II1 = struct . pack ( oOoOo000 , oOoOo00oo , II11IiIIiiiii )
return ( iIiI1I1II1 )
if 45 - 45: I1IiiI + I11i + i1IIi
if 22 - 22: IiII / OOooOOo
def decode ( self , packet ) :
oOoOo000 = "II"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( False )
if 31 - 31: oO0o % i1IIi . OoooooooOO - o0oOOo0O0Ooo + OoooooooOO
oOoOo00oo , II11IiIIiiiii = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 45 - 45: OOooOOo + I11i / OoooooooOO - Ii1I + OoooooooOO
if 42 - 42: iIii1I11I1II1 * I1IiiI * I1Ii111
self . first_long = socket . ntohl ( oOoOo00oo )
self . second_long = socket . ntohl ( II11IiIIiiiii )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 62 - 62: OOooOOo * O0 % IiII . IiII . I1IiiI
if 91 - 91: i1IIi . iII111i
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 37 - 37: iII111i - I11i + iIii1I11I1II1 / I1Ii111 - OoO0O00 . o0oOOo0O0Ooo
if 62 - 62: I1ii11iIi11i
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 47 - 47: I1Ii111 % OOooOOo * OoO0O00 . iIii1I11I1II1 % Oo0Ooo + OoooooooOO
if 2 - 2: I1Ii111 % OoooooooOO - ooOoO0o * I1ii11iIi11i * IiII
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 99 - 99: iIii1I11I1II1 . Oo0Ooo / ooOoO0o . OOooOOo % I1IiiI * I11i
if 95 - 95: oO0o
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 80 - 80: IiII
if 42 - 42: OoooooooOO * II111iiii
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 53 - 53: I1Ii111 + i1IIi . OoO0O00 / i11iIiiIii + Ii1I % OoOoOO00
if 9 - 9: ooOoO0o . I11i - Oo0Ooo . I1Ii111
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 39 - 39: OOooOOo
if 70 - 70: IiII % OoO0O00 % I1IiiI
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 95 - 95: OoOoOO00 - I1Ii111 / O0 * I1IiiI - o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 % Oo0Ooo . iII111i . IiII % i11iIiiIii
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 2 - 2: oO0o * oO0o . OoOoOO00 * Ii1I * iIii1I11I1II1
if 13 - 13: I11i / O0 . i11iIiiIii * i1IIi % i11iIiiIii
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 8 - 8: OoOoOO00 - OoooooooOO
if 99 - 99: II111iiii / IiII % OoooooooOO . i11iIiiIii
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 18 - 18: o0oOOo0O0Ooo . ooOoO0o
if 70 - 70: OoooooooOO . ooOoO0o / oO0o . oO0o - o0oOOo0O0Ooo
if 29 - 29: I11i % OOooOOo - ooOoO0o
class lisp_echo_nonce ( ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 26 - 26: O0 . I11i + iII111i - Ii1I . I11i
if 2 - 2: I1ii11iIi11i . Oo0Ooo * OOooOOo % II111iiii . iII111i
def send_ipc ( self , ipc_socket , ipc ) :
II1i1iI = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
iI111I1 = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , II1i1iI )
lisp_ipc ( ipc , ipc_socket , iI111I1 )
if 46 - 46: Ii1I
if 42 - 42: iIii1I11I1II1
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
IIi1IiIii = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , IIi1IiIii )
if 40 - 40: I1IiiI
if 3 - 3: ooOoO0o / i1IIi - OoOoOO00
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
IIi1IiIii = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , IIi1IiIii )
if 73 - 73: OoooooooOO * O0 * ooOoO0o
if 7 - 7: II111iiii + i1IIi
def receive_request ( self , ipc_socket , nonce ) :
OoooO0 = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( OoooO0 != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 43 - 43: iIii1I11I1II1
if 29 - 29: IiII % ooOoO0o + OoO0O00 . i1IIi + I1IiiI
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 24 - 24: I1Ii111 / Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . oO0o
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 98 - 98: i1IIi - iII111i
if 49 - 49: o0oOOo0O0Ooo . Ii1I . oO0o
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 9 - 9: IiII - II111iiii * OoO0O00
if 78 - 78: iIii1I11I1II1 / O0 * oO0o / iII111i / OoOoOO00
if 15 - 15: ooOoO0o / oO0o
if 54 - 54: ooOoO0o - iIii1I11I1II1 - I11i % Ii1I / II111iiii
if 80 - 80: i11iIiiIii % iIii1I11I1II1 / i11iIiiIii
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
OO = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 91 - 91: oO0o
if 56 - 56: iIii1I11I1II1 % II111iiii / OoOoOO00 % OoooooooOO
if ( remote_rloc . address > OO . address ) :
oOO0oo = "exit"
self . request_nonce_sent = None
else :
oOO0oo = "stay in"
self . echo_nonce_sent = None
if 13 - 13: IiII . Oo0Ooo - I11i / oO0o - Oo0Ooo - I1IiiI
if 84 - 84: II111iiii
Oo0ooooO0o00 = bold ( "collision" , False )
IIi11I1i1I1I = red ( OO . print_address_no_iid ( ) , False )
iIIIIIi11Ii = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( Oo0ooooO0o00 ,
IIi11I1i1I1I , iIIIIIi11Ii , oOO0oo ) )
if 92 - 92: oO0o / I1ii11iIi11i
if 6 - 6: i11iIiiIii / i1IIi / IiII . I1IiiI - OOooOOo % i11iIiiIii
if 77 - 77: OOooOOo % i11iIiiIii - I1ii11iIi11i
if 21 - 21: I11i . Oo0Ooo - OoooooooOO * i1IIi
if 54 - 54: II111iiii % o0oOOo0O0Ooo - i1IIi . I1IiiI - II111iiii / iIii1I11I1II1
if ( self . echo_nonce_sent != None ) :
oOo0 = self . echo_nonce_sent
ooo0OO = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( ooo0OO ,
lisp_hex_string ( oOo0 ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( oOo0 )
if 29 - 29: oO0o
if 66 - 66: OoooooooOO + iII111i . IiII % i1IIi
if 58 - 58: OOooOOo % iII111i * O0 + I1ii11iIi11i - IiII
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
if 46 - 46: I1Ii111 % I1ii11iIi11i + Ii1I
if 67 - 67: iIii1I11I1II1 . i11iIiiIii . i11iIiiIii . i11iIiiIii / I11i + ooOoO0o
if 10 - 10: ooOoO0o - Oo0Ooo % II111iiii
oOo0 = self . request_nonce_sent
oo = self . last_request_nonce_sent
if ( oOo0 and oo != None ) :
if ( time . time ( ) - oo >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOo0 ) ) )
if 15 - 15: ooOoO0o * iIii1I11I1II1 * oO0o
return ( None )
if 96 - 96: I1Ii111 * iIii1I11I1II1 / OoOoOO00 % OOooOOo * II111iiii
if 3 - 3: OOooOOo . Oo0Ooo / i11iIiiIii + OoO0O00
if 47 - 47: IiII . OOooOOo
if 96 - 96: I11i % II111iiii / ooOoO0o % OOooOOo / ooOoO0o % i11iIiiIii
if 57 - 57: I11i - I11i % II111iiii % Oo0Ooo . o0oOOo0O0Ooo % Oo0Ooo
if 91 - 91: I1IiiI - OoO0O00 - Oo0Ooo - Ii1I * iIii1I11I1II1
if 68 - 68: OoO0O00 % O0 * iIii1I11I1II1 / oO0o * o0oOOo0O0Ooo + OOooOOo
if 89 - 89: ooOoO0o * I1IiiI . oO0o
if 75 - 75: ooOoO0o - iII111i % iII111i + ooOoO0o * o0oOOo0O0Ooo - I1ii11iIi11i
if ( oOo0 == None ) :
oOo0 = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( oOo0 )
if 26 - 26: I11i * Ii1I % I1IiiI + iII111i
self . request_nonce_sent = oOo0
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOo0 ) ) )
if 38 - 38: iII111i - Oo0Ooo / Ii1I + oO0o . iII111i + IiII
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 19 - 19: Ii1I
if 51 - 51: iIii1I11I1II1
if 8 - 8: OoO0O00 / o0oOOo0O0Ooo % iII111i . i11iIiiIii . OoooooooOO . Ii1I
if 8 - 8: OoO0O00 * Oo0Ooo
if 41 - 41: Oo0Ooo / OoO0O00 / OoOoOO00 - i11iIiiIii - OoOoOO00
if ( lisp_i_am_itr == False ) : return ( oOo0 | 0x80000000 )
self . send_request_ipc ( ipc_socket , oOo0 )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOo0 ) ) )
if 4 - 4: I11i . IiII
if 39 - 39: OOooOOo . Oo0Ooo - OoOoOO00 * i11iIiiIii
if 4 - 4: OoOoOO00 * O0 - I11i
if 72 - 72: I11i + ooOoO0o / I1IiiI . IiII % OoO0O00 / i11iIiiIii
if 13 - 13: I1Ii111 % o0oOOo0O0Ooo + OOooOOo + I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: II111iiii * II111iiii . I1IiiI
if 11 - 11: iII111i
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( oOo0 | 0x80000000 )
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if 5 - 5: OOooOOo + iII111i
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 23 - 23: I1Ii111 % iIii1I11I1II1 . I11i
i11IiIIi11I = time . time ( ) - self . last_request_nonce_sent
OO0oO0O = self . last_echo_nonce_rcvd
return ( i11IiIIi11I >= LISP_NONCE_ECHO_INTERVAL and OO0oO0O == None )
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
def recently_requested ( self ) :
OO0oO0O = self . last_request_nonce_sent
if ( OO0oO0O == None ) : return ( False )
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
i11IiIIi11I = time . time ( ) - OO0oO0O
return ( i11IiIIi11I <= LISP_NONCE_ECHO_INTERVAL )
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
if 2 - 2: Ii1I
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
if 81 - 81: iIii1I11I1II1
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
OO0oO0O = self . last_good_echo_nonce_rcvd
if ( OO0oO0O == None ) : OO0oO0O = 0
i11IiIIi11I = time . time ( ) - OO0oO0O
if ( i11IiIIi11I <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
if 7 - 7: IiII
if 15 - 15: Oo0Ooo + iII111i + I1IiiI * o0oOOo0O0Ooo
if 33 - 33: o0oOOo0O0Ooo * Oo0Ooo
if 88 - 88: I1Ii111 % OOooOOo - OoOoOO00 - OoOoOO00 . I1IiiI
OO0oO0O = self . last_new_request_nonce_sent
if ( OO0oO0O == None ) : OO0oO0O = 0
i11IiIIi11I = time . time ( ) - OO0oO0O
return ( i11IiIIi11I <= LISP_NONCE_ECHO_INTERVAL )
if 52 - 52: II111iiii / II111iiii / I1IiiI - I1Ii111
if 91 - 91: I1IiiI + o0oOOo0O0Ooo % II111iiii + OoO0O00
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
Oo0o0OOo0Oo0 = bold ( "down" , False )
O00o = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , Oo0o0OOo0Oo0 , O00o ) )
if 65 - 65: OoOoOO00 / I1ii11iIi11i / o0oOOo0O0Ooo
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 15 - 15: ooOoO0o / ooOoO0o % OoooooooOO . I1Ii111
if 93 - 93: I1ii11iIi11i * I1ii11iIi11i / OoooooooOO
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 6 - 6: I1ii11iIi11i * Oo0Ooo + iIii1I11I1II1
if ( self . recently_requested ( ) == False ) :
ii1iIi111i1 = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , ii1iIi111i1 ) )
if 57 - 57: I1IiiI
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 63 - 63: i1IIi + oO0o
if 58 - 58: iII111i - OoooooooOO
if 56 - 56: iII111i / iII111i
def print_echo_nonce ( self ) :
Ii11iIi1iIiii = lisp_print_elapsed ( self . last_request_nonce_sent )
iIIIi = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 11 - 11: I11i
IIIIi1 = lisp_print_elapsed ( self . last_echo_nonce_sent )
IiII1II1 = lisp_print_elapsed ( self . last_request_nonce_rcvd )
IiIIi1I1I11Ii = space ( 4 )
if 61 - 61: Ii1I + I1IiiI / i1IIi + i1IIi / oO0o
Oo0O = "Nonce-Echoing:\n"
Oo0O += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( IiIIi1I1I11Ii , Ii11iIi1iIiii , IiIIi1I1I11Ii , iIIIi )
if 47 - 47: I1Ii111
Oo0O += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( IiIIi1I1I11Ii , IiII1II1 , IiIIi1I1I11Ii , IIIIi1 )
if 25 - 25: iII111i + I1IiiI + OoOoOO00 + I1Ii111 % O0
if 26 - 26: ooOoO0o + OoOoOO00
return ( Oo0O )
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
if 6 - 6: I1Ii111
if 46 - 46: II111iiii * I1Ii111
if 23 - 23: i1IIi - O0
if 6 - 6: ooOoO0o % OoooooooOO * I1Ii111 - IiII
if 24 - 24: I11i / iIii1I11I1II1 . OoooooooOO % OoOoOO00 . Ii1I
if 73 - 73: I1Ii111
if 25 - 25: IiII
if 77 - 77: o0oOOo0O0Ooo . iIii1I11I1II1 . OoooooooOO . iIii1I11I1II1
class lisp_keys ( ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 87 - 87: II111iiii - OoooooooOO / i1IIi . Ii1I - Oo0Ooo . i11iIiiIii
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
iii11 = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( iii11 )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 47 - 47: Oo0Ooo % OoO0O00 - ooOoO0o - Oo0Ooo * oO0o
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 72 - 72: o0oOOo0O0Ooo % o0oOOo0O0Ooo + iII111i + I1ii11iIi11i / Oo0Ooo
if 30 - 30: Oo0Ooo + I1IiiI + i11iIiiIii / OoO0O00
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 64 - 64: IiII
if 80 - 80: I1IiiI - i11iIiiIii / OoO0O00 / OoOoOO00 + OoOoOO00
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 89 - 89: O0 + IiII * I1Ii111
O0o = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
O0o = struct . pack ( "Q" , O0o & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
iIIIIII = struct . pack ( "I" , ( O0o >> 64 ) & LISP_4_32_MASK )
IIIi1i1i1iii = struct . pack ( "Q" , O0o & LISP_8_64_MASK )
O0o = iIIIIII + IIIi1i1i1iii
else :
O0o = struct . pack ( "QQ" , O0o >> 64 , O0o & LISP_8_64_MASK )
return ( O0o )
if 53 - 53: OoO0O00
if 80 - 80: II111iiii - o0oOOo0O0Ooo . iIii1I11I1II1
def key_length ( self , key ) :
if ( type ( key ) != str ) : key = self . normalize_pub_key ( key )
return ( len ( key ) / 2 )
if 44 - 44: i11iIiiIii % I11i % I1ii11iIi11i
if 7 - 7: Oo0Ooo * OoO0O00 - II111iiii % I1Ii111 . Oo0Ooo . Oo0Ooo
def print_key ( self , key ) :
oOOO0OO = self . normalize_pub_key ( key )
return ( "0x{}...{}({})" . format ( oOOO0OO [ 0 : 4 ] , oOOO0OO [ - 4 : : ] , self . key_length ( oOOO0OO ) ) )
if 5 - 5: OoooooooOO * I1ii11iIi11i
if 42 - 42: o0oOOo0O0Ooo . I1Ii111 / O0 . II111iiii * OoOoOO00
def normalize_pub_key ( self , key ) :
if ( type ( key ) == str ) :
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 7 - 7: I1Ii111 * O0 + OoOoOO00
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 90 - 90: IiII * II111iiii * IiII - iII111i
if 34 - 34: OOooOOo - I1ii11iIi11i * iII111i % Ii1I
def print_keys ( self , do_bold = True ) :
IIi11I1i1I1I = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
IIi11I1i1I1I += "none"
else :
IIi11I1i1I1I += self . print_key ( self . local_public_key )
if 25 - 25: II111iiii + I1IiiI * ooOoO0o * I1ii11iIi11i . iII111i
iIIIIIi11Ii = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
iIIIIIi11Ii += "none"
else :
iIIIIIi11Ii += self . print_key ( self . remote_public_key )
if 26 - 26: iII111i - ooOoO0o / OoooooooOO + o0oOOo0O0Ooo . Oo0Ooo
oooO0 = "ECDH" if ( self . curve25519 ) else "DH"
iI1iIi1ii1I1 = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( oooO0 , iI1iIi1ii1I1 , IIi11I1i1I1I , iIIIIIi11Ii ) )
if 59 - 59: II111iiii * OoooooooOO - OoooooooOO
if 33 - 33: O0 . i11iIiiIii % o0oOOo0O0Ooo
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 50 - 50: ooOoO0o
if 81 - 81: i11iIiiIii * iIii1I11I1II1 / Oo0Ooo * OOooOOo
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 83 - 83: i11iIiiIii - I1IiiI * i11iIiiIii
iii11 = self . local_private_key
O0ooO0oOO = self . dh_g_value
OoOoO = self . dh_p_value
return ( int ( ( O0ooO0oOO ** iii11 ) % OoOoO ) )
if 70 - 70: oO0o
if 69 - 69: IiII
def compute_shared_key ( self , ed , print_shared = False ) :
iii11 = self . local_private_key
OOOo0O0o0oo = self . remote_public_key
if 25 - 25: OoooooooOO
IiIi1I1IiI1II1 = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( IiIi1I1IiI1II1 , self . print_keys ( ) ) )
if 21 - 21: OoooooooOO . O0 / i11iIiiIii
if ( self . curve25519 ) :
oOOO = curve25519 . Public ( OOOo0O0o0oo )
self . shared_key = self . curve25519 . get_shared_key ( oOOO )
else :
OoOoO = self . dh_p_value
self . shared_key = ( OOOo0O0o0oo ** iii11 ) % OoOoO
if 71 - 71: I1IiiI . ooOoO0o
if 43 - 43: I1ii11iIi11i * OOooOOo
if 1 - 1: OoO0O00 * ooOoO0o + IiII . oO0o / ooOoO0o
if 91 - 91: Ii1I + I11i - Oo0Ooo % OoOoOO00 . iII111i
if 51 - 51: OOooOOo / I11i
if 51 - 51: ooOoO0o * oO0o - I1Ii111 + iII111i
if 46 - 46: o0oOOo0O0Ooo - i11iIiiIii % OoO0O00 / Ii1I - OoOoOO00
if ( print_shared ) :
oOOO0OO = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( oOOO0OO ) )
if 88 - 88: oO0o * I1IiiI / OoO0O00 - OOooOOo / i1IIi . I1Ii111
if 26 - 26: i11iIiiIii - ooOoO0o
if 45 - 45: ooOoO0o + II111iiii % iII111i
if 55 - 55: ooOoO0o - oO0o % I1IiiI
if 61 - 61: ooOoO0o
self . compute_encrypt_icv_keys ( )
if 22 - 22: iIii1I11I1II1 / ooOoO0o / I1IiiI - o0oOOo0O0Ooo
if 21 - 21: oO0o . i11iIiiIii * I11i . OOooOOo / OOooOOo
if 42 - 42: OoooooooOO / I1Ii111 . o0oOOo0O0Ooo / O0 - IiII * IiII
if 1 - 1: Ii1I % I1Ii111
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 97 - 97: OoOoOO00
if 13 - 13: OoOoOO00 % OOooOOo . O0 / Oo0Ooo % Oo0Ooo
def compute_encrypt_icv_keys ( self ) :
I1I111iII1 = hashlib . sha256
if ( self . curve25519 ) :
IIII1iI1iiI = self . shared_key
else :
IIII1iI1iiI = lisp_hex_string ( self . shared_key )
if 78 - 78: ooOoO0o . OOooOOo / OoOoOO00 * Oo0Ooo % oO0o
if 20 - 20: OoOoOO00
if 1 - 1: I1Ii111 * OoO0O00 - iII111i
if 97 - 97: iII111i . I1ii11iIi11i - iIii1I11I1II1 . ooOoO0o + I1IiiI % oO0o
if 4 - 4: I1IiiI / II111iiii % O0 * ooOoO0o / II111iiii . Oo0Ooo
IIi11I1i1I1I = self . local_public_key
if ( type ( IIi11I1i1I1I ) != long ) : IIi11I1i1I1I = int ( binascii . hexlify ( IIi11I1i1I1I ) , 16 )
iIIIIIi11Ii = self . remote_public_key
if ( type ( iIIIIIi11Ii ) != long ) : iIIIIIi11Ii = int ( binascii . hexlify ( iIIIIIi11Ii ) , 16 )
iiIiii = "0001" + "lisp-crypto" + lisp_hex_string ( IIi11I1i1I1I ^ iIIIIIi11Ii ) + "0100"
if 3 - 3: I11i / I1Ii111 * IiII - O0 + I1IiiI / IiII
iii1II11II1 = hmac . new ( iiIiii , IIII1iI1iiI , I1I111iII1 ) . hexdigest ( )
iii1II11II1 = int ( iii1II11II1 , 16 )
if 30 - 30: IiII / i11iIiiIii % OoO0O00 * OOooOOo
if 27 - 27: O0
if 95 - 95: OoOoOO00 . Oo0Ooo + II111iiii - I1ii11iIi11i
if 57 - 57: OoooooooOO . I1ii11iIi11i - oO0o * i1IIi . I11i
II1iIi11iIii = ( iii1II11II1 >> 128 ) & LISP_16_128_MASK
oOOO0oo0 = iii1II11II1 & LISP_16_128_MASK
self . encrypt_key = lisp_hex_string ( II1iIi11iIii ) . zfill ( 32 )
iI1IiiiiI = 32 if self . do_poly else 40
self . icv_key = lisp_hex_string ( oOOO0oo0 ) . zfill ( iI1IiiiiI )
if 12 - 12: i11iIiiIii . I11i * OOooOOo % i1IIi . ooOoO0o
if 58 - 58: iII111i % iIii1I11I1II1 . iIii1I11I1II1 / I11i
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
OOO0O = self . icv . poly1305aes
II11 = self . icv . binascii . hexlify
nonce = II11 ( nonce )
oOoOo000Ooooo = OOO0O ( self . encrypt_key , self . icv_key , nonce , packet )
oOoOo000Ooooo = II11 ( oOoOo000Ooooo )
else :
iii11 = binascii . unhexlify ( self . icv_key )
oOoOo000Ooooo = hmac . new ( iii11 , packet , self . icv ) . hexdigest ( )
oOoOo000Ooooo = oOoOo000Ooooo [ 0 : 40 ]
if 18 - 18: Ii1I + OoOoOO00 . i1IIi / IiII / iII111i
return ( oOoOo000Ooooo )
if 97 - 97: OoO0O00 + iIii1I11I1II1
if 79 - 79: ooOoO0o + oO0o - II111iiii . Oo0Ooo
def add_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 26 - 26: IiII
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 52 - 52: O0 + ooOoO0o
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
def delete_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 96 - 96: I1ii11iIi11i % I1ii11iIi11i
if 1 - 1: I1IiiI . Ii1I
def add_key_by_rloc ( self , addr_str , encap ) :
II11IIII1 = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 33 - 33: Ii1I + OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 % i1IIi * IiII
if 21 - 21: O0 * ooOoO0o % OoO0O00
if ( II11IIII1 . has_key ( addr_str ) == False ) :
II11IIII1 [ addr_str ] = [ None , None , None , None ]
if 14 - 14: O0 / I1Ii111 / ooOoO0o + IiII - IiII
II11IIII1 [ addr_str ] [ self . key_id ] = self
if 10 - 10: O0 - I1ii11iIi11i / I1Ii111 % OoOoOO00 / OoooooooOO / Ii1I
if 73 - 73: ooOoO0o + IiII % o0oOOo0O0Ooo . I1ii11iIi11i / OOooOOo . I1Ii111
if 76 - 76: I11i . I1ii11iIi11i * OoooooooOO % iII111i
if 24 - 24: OoooooooOO
if 83 - 83: O0 / OoO0O00
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , II11IIII1 [ addr_str ] )
if 62 - 62: I11i
if 73 - 73: Ii1I % OoO0O00 * OOooOOo
if 84 - 84: Oo0Ooo
def encode_lcaf ( self , rloc_addr ) :
i1Ii = self . normalize_pub_key ( self . local_public_key )
iI1i11 = self . key_length ( i1Ii )
OO0OoO0OOoOo = ( 6 + iI1i11 + 2 )
if ( rloc_addr != None ) : OO0OoO0OOoOo += rloc_addr . addr_length ( )
if 84 - 84: oO0o / Ii1I * iII111i
i1II1IiiIi = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( OO0OoO0OOoOo ) , 1 , 0 )
if 20 - 20: OoOoOO00 % O0
if 59 - 59: O0 . o0oOOo0O0Ooo % I1ii11iIi11i * oO0o + I11i
if 82 - 82: OoooooooOO
if 88 - 88: O0 / o0oOOo0O0Ooo * o0oOOo0O0Ooo . o0oOOo0O0Ooo . O0
if 27 - 27: i11iIiiIii % iII111i + Ii1I . OOooOOo
if 9 - 9: OoO0O00
iI1iIi1ii1I1 = self . cipher_suite
i1II1IiiIi += struct . pack ( "BBH" , iI1iIi1ii1I1 , 0 , socket . htons ( iI1i11 ) )
if 43 - 43: Ii1I . OOooOOo + I1IiiI * i11iIiiIii
if 2 - 2: OOooOOo
if 3 - 3: I1IiiI . iII111i % O0 - ooOoO0o / O0
if 79 - 79: Ii1I + oO0o % ooOoO0o % I1IiiI
for Ii11 in range ( 0 , iI1i11 * 2 , 16 ) :
iii11 = int ( i1Ii [ Ii11 : Ii11 + 16 ] , 16 )
i1II1IiiIi += struct . pack ( "Q" , byte_swap_64 ( iii11 ) )
if 68 - 68: II111iiii - OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo % II111iiii
if 53 - 53: iII111i . oO0o / Oo0Ooo . OoO0O00 . i11iIiiIii
if 60 - 60: II111iiii
if 25 - 25: Oo0Ooo + o0oOOo0O0Ooo - OoO0O00
if 57 - 57: II111iiii . i1IIi
if ( rloc_addr ) :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
i1II1IiiIi += rloc_addr . pack_address ( )
if 33 - 33: iII111i + Oo0Ooo % I11i . oO0o
return ( i1II1IiiIi )
if 6 - 6: IiII + I1ii11iIi11i
if 62 - 62: oO0o . I1Ii111 - OoooooooOO * II111iiii . i11iIiiIii
def decode_lcaf ( self , packet , lcaf_len ) :
if 13 - 13: iIii1I11I1II1 * o0oOOo0O0Ooo - i11iIiiIii
if 63 - 63: OoooooooOO * I1Ii111
if 50 - 50: Oo0Ooo - o0oOOo0O0Ooo % II111iiii . O0 . oO0o % II111iiii
if 18 - 18: I11i % OoooooooOO + OoO0O00 / I11i
if ( lcaf_len == 0 ) :
oOoOo000 = "HHBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 37 - 37: i1IIi - Ii1I / IiII . II111iiii % ooOoO0o
iioOO , i11iIi1I1i1 , oOOi1I111II , i11iIi1I1i1 , lcaf_len = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 51 - 51: I1IiiI * ooOoO0o
if 47 - 47: OOooOOo . OOooOOo . IiII . I1Ii111 / i1IIi
if ( oOOi1I111II != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 77 - 77: II111iiii % I11i / Oo0Ooo
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ O0OOoooO : : ]
if 23 - 23: iIii1I11I1II1
if 10 - 10: I11i - o0oOOo0O0Ooo % OoooooooOO - I1ii11iIi11i
if 64 - 64: OoO0O00 / I1IiiI
if 23 - 23: I11i * I1Ii111 * o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
if 41 - 41: IiII * OoooooooOO . ooOoO0o % i11iIiiIii
if 11 - 11: iIii1I11I1II1 . I1Ii111 - Oo0Ooo / I11i + II111iiii
oOOi1I111II = LISP_LCAF_SECURITY_TYPE
oOoOo000 = "BBBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 29 - 29: I11i . i11iIiiIii + i1IIi - Ii1I + O0 . I1IiiI
i1iIiII1 , i11iIi1I1i1 , iI1iIi1ii1I1 , i11iIi1I1i1 , iI1i11 = struct . unpack ( oOoOo000 ,
packet [ : O0OOoooO ] )
if 59 - 59: OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
if 42 - 42: I1Ii111
if 70 - 70: o0oOOo0O0Ooo / I11i + oO0o % I1IiiI % Oo0Ooo + OoO0O00
if 80 - 80: OOooOOo
if 12 - 12: Ii1I
if 2 - 2: OoooooooOO
packet = packet [ O0OOoooO : : ]
iI1i11 = socket . ntohs ( iI1i11 )
if ( len ( packet ) < iI1i11 ) : return ( None )
if 100 - 100: Oo0Ooo / O0 * i11iIiiIii * OoooooooOO
if 46 - 46: O0 % OoooooooOO
if 22 - 22: iII111i + OoooooooOO - OoOoOO00 - OoO0O00 * I1Ii111 - oO0o
if 99 - 99: ooOoO0o / I1IiiI . Ii1I - Ii1I * I1IiiI
I1IIiIIiiI1i = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( iI1iIi1ii1I1 not in I1IIiIIiiI1i ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( I1IIiIIiiI1i ,
iI1iIi1ii1I1 ) )
packet = packet [ iI1i11 : : ]
return ( packet )
if 83 - 83: I1ii11iIi11i * II111iiii . I1Ii111 - I11i
if 46 - 46: OoO0O00 % I1ii11iIi11i
self . cipher_suite = iI1iIi1ii1I1
if 58 - 58: oO0o + IiII % iII111i - Ii1I - OOooOOo % Ii1I
if 86 - 86: o0oOOo0O0Ooo
if 15 - 15: oO0o - iIii1I11I1II1 - II111iiii - IiII % I1ii11iIi11i
if 80 - 80: IiII * iII111i . i1IIi % Ii1I % I1ii11iIi11i + ooOoO0o
if 6 - 6: I1ii11iIi11i . oO0o . OoO0O00 + IiII
i1Ii = 0
for Ii11 in range ( 0 , iI1i11 , 8 ) :
iii11 = byte_swap_64 ( struct . unpack ( "Q" , packet [ Ii11 : Ii11 + 8 ] ) [ 0 ] )
i1Ii <<= 64
i1Ii |= iii11
if 65 - 65: I1ii11iIi11i / ooOoO0o
self . remote_public_key = i1Ii
if 23 - 23: OOooOOo / OOooOOo * o0oOOo0O0Ooo * OOooOOo
if 57 - 57: iII111i
if 29 - 29: I1IiiI
if 41 - 41: I1Ii111 * OoO0O00 - iII111i . Ii1I
if 41 - 41: iIii1I11I1II1 - O0 - I1ii11iIi11i - oO0o + I1Ii111
if ( self . curve25519 ) :
iii11 = lisp_hex_string ( self . remote_public_key )
iii11 = iii11 . zfill ( 64 )
Ii1111iI1i1 = ""
for Ii11 in range ( 0 , len ( iii11 ) , 2 ) :
Ii1111iI1i1 += chr ( int ( iii11 [ Ii11 : Ii11 + 2 ] , 16 ) )
if 78 - 78: I1ii11iIi11i . iII111i % II111iiii
self . remote_public_key = Ii1111iI1i1
if 90 - 90: OoooooooOO % i11iIiiIii % o0oOOo0O0Ooo % I1Ii111 - ooOoO0o + iIii1I11I1II1
if 98 - 98: O0 / oO0o / iII111i
packet = packet [ iI1i11 : : ]
return ( packet )
if 83 - 83: I1Ii111
if 38 - 38: oO0o
if 9 - 9: I11i . OoO0O00 . oO0o / OoooooooOO
if 59 - 59: iIii1I11I1II1 + i1IIi % II111iiii
if 2 - 2: II111iiii + I11i . OoO0O00
if 14 - 14: OOooOOo * I1IiiI - I1ii11iIi11i
if 10 - 10: iII111i % I1Ii111 * I1ii11iIi11i * O0 * i11iIiiIii % I1Ii111
if 68 - 68: OoooooooOO * OoOoOO00
class lisp_thread ( ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = Queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 9 - 9: I1Ii111
if 36 - 36: I1Ii111 / OoOoOO00 + OoOoOO00 * ooOoO0o / OOooOOo * O0
if 17 - 17: OoO0O00 / ooOoO0o % I1IiiI
if 47 - 47: Oo0Ooo * OoO0O00 / o0oOOo0O0Ooo * I1IiiI
if 60 - 60: I1ii11iIi11i / IiII . i11iIiiIii / OoO0O00 % II111iiii
if 6 - 6: iII111i % o0oOOo0O0Ooo + I1Ii111
if 91 - 91: o0oOOo0O0Ooo + O0 * oO0o * IiII * I1ii11iIi11i
if 83 - 83: OoooooooOO
if 52 - 52: o0oOOo0O0Ooo / OoOoOO00 % oO0o % OoO0O00 / IiII % o0oOOo0O0Ooo
if 88 - 88: OOooOOo / i11iIiiIii / Ii1I / i11iIiiIii * I1ii11iIi11i % I11i
if 43 - 43: OoOoOO00 * OoO0O00 % i1IIi * Ii1I + iIii1I11I1II1
if 80 - 80: o0oOOo0O0Ooo . iII111i . OoooooooOO
if 63 - 63: ooOoO0o . OOooOOo
if 66 - 66: I1IiiI
if 99 - 99: OoO0O00 % O0 . I1Ii111 - I1ii11iIi11i . Oo0Ooo / OoOoOO00
if 60 - 60: I1ii11iIi11i
if 78 - 78: oO0o + II111iiii
if 55 - 55: OoooooooOO
class lisp_control_header ( ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 90 - 90: I1IiiI
if 4 - 4: OOooOOo % ooOoO0o - OOooOOo - o0oOOo0O0Ooo
def decode ( self , packet ) :
oOoOo000 = "BBBBQ"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( False )
if 30 - 30: IiII
IIIiIII1 , OOo0OOo , OOIiI1IIIiI1I1i , self . record_count , self . nonce = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 84 - 84: OoOoOO00 - I11i
if 80 - 80: i11iIiiIii % OOooOOo - Oo0Ooo % OOooOOo
self . type = IIIiIII1 >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( IIIiIII1 & 0x01 ) else False
self . rloc_probe = True if ( IIIiIII1 & 0x02 ) else False
self . smr_invoked_bit = True if ( OOo0OOo & 0x40 ) else False
if 89 - 89: Ii1I * I11i + OoOoOO00 / i11iIiiIii
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( IIIiIII1 & 0x04 ) else False
self . to_etr = True if ( IIIiIII1 & 0x02 ) else False
self . to_ms = True if ( IIIiIII1 & 0x01 ) else False
if 68 - 68: OoooooooOO * I11i
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( IIIiIII1 & 0x08 ) else False
if 86 - 86: o0oOOo0O0Ooo / OoOoOO00
return ( True )
if 40 - 40: iII111i
if 62 - 62: ooOoO0o / OOooOOo
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 74 - 74: iII111i % I1Ii111 / I1Ii111 - iIii1I11I1II1 - II111iiii + OOooOOo
if 92 - 92: I11i % I1Ii111
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 18 - 18: ooOoO0o + I1Ii111 / OOooOOo / oO0o + iIii1I11I1II1 % IiII
if 94 - 94: I11i
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 37 - 37: oO0o
if 52 - 52: I1ii11iIi11i * I1IiiI . OOooOOo + i1IIi % oO0o / iIii1I11I1II1
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 68 - 68: I1Ii111 - OoOoOO00 . i11iIiiIii + o0oOOo0O0Ooo
if 71 - 71: i11iIiiIii / i1IIi * I1IiiI / OoOoOO00
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 33 - 33: I11i . Oo0Ooo
if 89 - 89: iII111i + i1IIi - IiII + ooOoO0o . II111iiii
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 85 - 85: iIii1I11I1II1 - Ii1I * Oo0Ooo . oO0o + I1Ii111
if 13 - 13: O0 + iIii1I11I1II1 % II111iiii + iIii1I11I1II1
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 85 - 85: I1IiiI * iIii1I11I1II1 . iII111i / iII111i
if 43 - 43: I1IiiI
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 78 - 78: OoO0O00 % II111iiii + OoOoOO00 / I1IiiI
if 34 - 34: o0oOOo0O0Ooo % I1ii11iIi11i + Ii1I * I11i / oO0o
if 18 - 18: ooOoO0o
if 92 - 92: OoO0O00 % iIii1I11I1II1 / IiII * iII111i . i1IIi + oO0o
if 24 - 24: IiII . iII111i * IiII % i11iIiiIii . i11iIiiIii + i1IIi
if 64 - 64: iIii1I11I1II1 / IiII / Oo0Ooo - I1ii11iIi11i
if 100 - 100: IiII + i1IIi * OoO0O00
if 64 - 64: oO0o * i11iIiiIii . Oo0Ooo
if 52 - 52: Oo0Ooo / ooOoO0o / iII111i - o0oOOo0O0Ooo / iII111i
if 74 - 74: i1IIi . iIii1I11I1II1
if 85 - 85: I1IiiI
if 10 - 10: O0 . II111iiii / OoooooooOO
if 72 - 72: OoooooooOO . o0oOOo0O0Ooo + O0
if 46 - 46: OoOoOO00 * I11i / oO0o + Oo0Ooo + IiII
if 95 - 95: o0oOOo0O0Ooo - Ii1I
if 67 - 67: I1ii11iIi11i * Oo0Ooo % o0oOOo0O0Ooo
if 19 - 19: OoOoOO00 . OOooOOo . OoooooooOO
if 79 - 79: OOooOOo * ooOoO0o * I1IiiI * I1ii11iIi11i / I1ii11iIi11i
if 62 - 62: ooOoO0o * Ii1I % I1ii11iIi11i - i1IIi - I1ii11iIi11i
if 24 - 24: OOooOOo
if 71 - 71: IiII - i1IIi
if 56 - 56: OoOoOO00 + oO0o
if 74 - 74: iII111i / I1Ii111 / II111iiii - iII111i / oO0o % I11i
if 19 - 19: IiII % OoooooooOO + OoooooooOO
if 7 - 7: i1IIi
if 91 - 91: OoOoOO00 - OoOoOO00 . IiII
if 33 - 33: I1Ii111 - iIii1I11I1II1 / Ii1I % O0
if 80 - 80: IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
if 22 - 22: Oo0Ooo % OoooooooOO - Oo0Ooo - iII111i . Ii1I
if 100 - 100: II111iiii / I1Ii111 / iII111i - I1ii11iIi11i * iIii1I11I1II1
if 7 - 7: i1IIi . IiII % i11iIiiIii * I1ii11iIi11i . I11i % I1ii11iIi11i
if 35 - 35: I1IiiI
if 48 - 48: OoooooooOO % OoooooooOO - OoO0O00 . OoOoOO00
if 22 - 22: ooOoO0o . i11iIiiIii . OoooooooOO . i1IIi
if 12 - 12: OoOoOO00 % OOooOOo + oO0o . O0 % iIii1I11I1II1
if 41 - 41: OoooooooOO
if 13 - 13: I11i + I1Ii111 - I1Ii111 % oO0o / I11i
if 4 - 4: I1IiiI + OOooOOo - IiII + iII111i
if 78 - 78: Ii1I
if 29 - 29: II111iiii
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
if 66 - 66: OOooOOo / iIii1I11I1II1 - OoOoOO00 % O0 . ooOoO0o
if 12 - 12: Oo0Ooo + I1IiiI
if 37 - 37: i1IIi * i11iIiiIii
if 95 - 95: i11iIiiIii % I1Ii111 * Oo0Ooo + i1IIi . O0 + I1ii11iIi11i
if 7 - 7: OoO0O00 * i11iIiiIii * iIii1I11I1II1 / OOooOOo / I1Ii111
if 35 - 35: iII111i * OOooOOo
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 65 - 65: II111iiii % i1IIi
if 13 - 13: OoO0O00 * I1Ii111 + Oo0Ooo - IiII
def print_map_register ( self ) :
i11IIii = lisp_hex_string ( self . xtr_id )
if 48 - 48: iII111i
oooOo = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 26 - 26: I1ii11iIi11i . Ii1I % o0oOOo0O0Ooo
lprint ( oooOo . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# i11iIiiIii
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , i11IIii , self . site_id ) )
if 92 - 92: iIii1I11I1II1 - Ii1I + OoooooooOO . o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1 + I11i . I11i * OoooooooOO + i11iIiiIii
if 46 - 46: i1IIi + O0
if 5 - 5: o0oOOo0O0Ooo + I1IiiI / OoooooooOO % i11iIiiIii % OoooooooOO - o0oOOo0O0Ooo
def encode ( self ) :
oOoOo00oo = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : oOoOo00oo |= 0x08000000
if ( self . lisp_sec_present ) : oOoOo00oo |= 0x04000000
if ( self . xtr_id_present ) : oOoOo00oo |= 0x02000000
if ( self . map_register_refresh ) : oOoOo00oo |= 0x1000
if ( self . use_ttl_for_timeout ) : oOoOo00oo |= 0x800
if ( self . merge_register_requested ) : oOoOo00oo |= 0x400
if ( self . mobile_node ) : oOoOo00oo |= 0x200
if ( self . map_notify_requested ) : oOoOo00oo |= 0x100
if ( self . encryption_key_id != None ) :
oOoOo00oo |= 0x2000
oOoOo00oo |= self . encryption_key_id << 14
if 53 - 53: OoO0O00 + i11iIiiIii / iIii1I11I1II1
if 1 - 1: IiII % i1IIi
if 41 - 41: OoO0O00 * OoO0O00 / iII111i + I1ii11iIi11i . o0oOOo0O0Ooo
if 84 - 84: i11iIiiIii + OoO0O00 * I1IiiI + I1ii11iIi11i / Ii1I
if 80 - 80: I1ii11iIi11i
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 67 - 67: II111iiii
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 2 - 2: o0oOOo0O0Ooo - O0 * Ii1I % IiII
if 64 - 64: i1IIi . ooOoO0o
if 7 - 7: oO0o . iII111i - iII111i / I1Ii111 % Oo0Ooo
i1II1IiiIi = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
i1II1IiiIi += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 61 - 61: oO0o - I1ii11iIi11i / iII111i % I1ii11iIi11i + OoO0O00 / Oo0Ooo
i1II1IiiIi = self . zero_auth ( i1II1IiiIi )
return ( i1II1IiiIi )
if 10 - 10: i11iIiiIii / OoOoOO00
if 27 - 27: I1IiiI / OoooooooOO
def zero_auth ( self , packet ) :
I11iiIi1i1 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
OOO00Oo00o = ""
IiII1Iiii = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
OOO00Oo00o = struct . pack ( "QQI" , 0 , 0 , 0 )
IiII1Iiii = struct . calcsize ( "QQI" )
if 16 - 16: iII111i . O0 - I1Ii111 * I1Ii111
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
OOO00Oo00o = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
IiII1Iiii = struct . calcsize ( "QQQQ" )
if 80 - 80: Ii1I % I1ii11iIi11i
packet = packet [ 0 : I11iiIi1i1 ] + OOO00Oo00o + packet [ I11iiIi1i1 + IiII1Iiii : : ]
return ( packet )
if 60 - 60: OoO0O00 % iIii1I11I1II1 . ooOoO0o * o0oOOo0O0Ooo % ooOoO0o - I1Ii111
if 51 - 51: ooOoO0o * IiII * iIii1I11I1II1 / OoOoOO00 % IiII
def encode_auth ( self , packet ) :
I11iiIi1i1 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
IiII1Iiii = self . auth_len
OOO00Oo00o = self . auth_data
packet = packet [ 0 : I11iiIi1i1 ] + OOO00Oo00o + packet [ I11iiIi1i1 + IiII1Iiii : : ]
return ( packet )
if 36 - 36: I1ii11iIi11i * o0oOOo0O0Ooo + i11iIiiIii + OoooooooOO
if 82 - 82: OoOoOO00 . OoOoOO00
def decode ( self , packet ) :
IIiIiIii11I1 = packet
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( [ None , None ] )
if 60 - 60: OoooooooOO * Oo0Ooo % I1Ii111
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
oOoOo00oo = socket . ntohl ( oOoOo00oo [ 0 ] )
packet = packet [ O0OOoooO : : ]
if 68 - 68: O0 - Oo0Ooo . II111iiii % Ii1I % Oo0Ooo + i11iIiiIii
oOoOo000 = "QBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( [ None , None ] )
if 90 - 90: II111iiii / OOooOOo * I1IiiI - Oo0Ooo
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 11 - 11: IiII - oO0o - oO0o / I1Ii111 * II111iiii % oO0o
if 39 - 39: oO0o / i11iIiiIii
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( oOoOo00oo & 0x08000000 ) else False
if 46 - 46: i11iIiiIii . I1ii11iIi11i
self . lisp_sec_present = True if ( oOoOo00oo & 0x04000000 ) else False
self . xtr_id_present = True if ( oOoOo00oo & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( oOoOo00oo & 0x800 ) else False
self . map_register_refresh = True if ( oOoOo00oo & 0x1000 ) else False
self . merge_register_requested = True if ( oOoOo00oo & 0x400 ) else False
self . mobile_node = True if ( oOoOo00oo & 0x200 ) else False
self . map_notify_requested = True if ( oOoOo00oo & 0x100 ) else False
self . record_count = oOoOo00oo & 0xff
if 11 - 11: ooOoO0o
if 36 - 36: OoO0O00 % iIii1I11I1II1 - I1ii11iIi11i - i1IIi % o0oOOo0O0Ooo
if 54 - 54: IiII - II111iiii . ooOoO0o + Ii1I
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
self . encrypt_bit = True if oOoOo00oo & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( oOoOo00oo >> 14 ) & 0x7
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
if 72 - 72: ooOoO0o + II111iiii . O0 - iII111i / OoooooooOO . I1Ii111
if 28 - 28: iIii1I11I1II1 . O0
if 32 - 32: OoooooooOO
if 29 - 29: I1ii11iIi11i
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( IIiIiIii11I1 ) == False ) : return ( [ None , None ] )
if 41 - 41: Ii1I
if 49 - 49: Ii1I % II111iiii . Ii1I - o0oOOo0O0Ooo - I11i * IiII
packet = packet [ O0OOoooO : : ]
if 47 - 47: O0 . o0oOOo0O0Ooo / Ii1I * iII111i
if 63 - 63: I1Ii111 - oO0o - iII111i - ooOoO0o / oO0o + OoO0O00
if 94 - 94: IiII / I1IiiI . II111iiii
if 32 - 32: oO0o . OOooOOo % OOooOOo . OoOoOO00
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 37 - 37: OOooOOo + O0 + OOooOOo . iII111i . o0oOOo0O0Ooo
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 78 - 78: I1IiiI / I11i + o0oOOo0O0Ooo . Oo0Ooo / O0
if 49 - 49: I1ii11iIi11i
IiII1Iiii = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
O0OOoooO = struct . calcsize ( "QQI" )
if ( IiII1Iiii < O0OOoooO ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 66 - 66: o0oOOo0O0Ooo . I1ii11iIi11i
iI111I , i1 , iiII1I1I1ii = struct . unpack ( "QQI" , packet [ : IiII1Iiii ] )
Iii1I1111iI = ""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
O0OOoooO = struct . calcsize ( "QQQQ" )
if ( IiII1Iiii < O0OOoooO ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 71 - 71: Oo0Ooo
iI111I , i1 , iiII1I1I1ii , Iii1I1111iI = struct . unpack ( "QQQQ" ,
packet [ : IiII1Iiii ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 34 - 34: O0 / OOooOOo % OoooooooOO . OoooooooOO
return ( [ None , None ] )
if 30 - 30: OoO0O00 % OOooOOo * OoO0O00 + oO0o % iIii1I11I1II1 + OoooooooOO
self . auth_data = lisp_concat_auth_data ( self . alg_id , iI111I , i1 ,
iiII1I1I1ii , Iii1I1111iI )
IIiIiIii11I1 = self . zero_auth ( IIiIiIii11I1 )
packet = packet [ self . auth_len : : ]
if 71 - 71: Oo0Ooo
return ( [ IIiIiIii11I1 , packet ] )
if 98 - 98: o0oOOo0O0Ooo * Oo0Ooo - Ii1I . ooOoO0o
if 2 - 2: Oo0Ooo - ooOoO0o % iIii1I11I1II1
def encode_xtr_id ( self , packet ) :
o0O0o0O0O = self . xtr_id >> 64
ii11iIi1IiI = self . xtr_id & 0xffffffffffffffff
o0O0o0O0O = byte_swap_64 ( o0O0o0O0O )
ii11iIi1IiI = byte_swap_64 ( ii11iIi1IiI )
ooO0OOoOooO = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , o0O0o0O0O , ii11iIi1IiI , ooO0OOoOooO )
return ( packet )
if 76 - 76: Oo0Ooo * ooOoO0o % OOooOOo . OoO0O00
if 31 - 31: I1IiiI - OoooooooOO . IiII
def decode_xtr_id ( self , packet ) :
O0OOoooO = struct . calcsize ( "QQQ" )
if ( len ( packet ) < O0OOoooO ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - O0OOoooO : : ]
o0O0o0O0O , ii11iIi1IiI , ooO0OOoOooO = struct . unpack ( "QQQ" ,
packet [ : O0OOoooO ] )
o0O0o0O0O = byte_swap_64 ( o0O0o0O0O )
ii11iIi1IiI = byte_swap_64 ( ii11iIi1IiI )
self . xtr_id = ( o0O0o0O0O << 64 ) | ii11iIi1IiI
self . site_id = byte_swap_64 ( ooO0OOoOooO )
return ( True )
if 12 - 12: I11i . Ii1I + I11i - OOooOOo * iII111i - O0
if 44 - 44: i1IIi % oO0o / OoOoOO00 % IiII . I1ii11iIi11i
if 38 - 38: OoOoOO00 . I11i
if 66 - 66: iII111i
if 61 - 61: i11iIiiIii / oO0o / i11iIiiIii
if 61 - 61: I11i / iIii1I11I1II1 - i1IIi - IiII * i11iIiiIii
if 86 - 86: I11i % I11i - OoOoOO00 + I1Ii111 / I1IiiI * OoooooooOO
if 26 - 26: II111iiii * iII111i + o0oOOo0O0Ooo / O0 + i1IIi - I11i
if 56 - 56: OOooOOo
if 76 - 76: i1IIi % iIii1I11I1II1 - o0oOOo0O0Ooo + IiII - I11i
if 81 - 81: I1ii11iIi11i + OoooooooOO - OOooOOo * O0
if 100 - 100: iIii1I11I1II1 - OoOoOO00
if 28 - 28: Oo0Ooo . O0 . I11i
if 60 - 60: II111iiii + I1Ii111 / oO0o % OoooooooOO - i1IIi
if 57 - 57: ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 52 - 52: I1ii11iIi11i
if 93 - 93: iII111i . i11iIiiIii
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
if 49 - 49: O0 . Oo0Ooo / Ii1I
if 29 - 29: I1ii11iIi11i / oO0o * O0 - i11iIiiIii - OoO0O00 + Ii1I
if 86 - 86: I1IiiI / I1ii11iIi11i * Ii1I % i11iIiiIii
if 20 - 20: iII111i . OoooooooOO + iII111i + ooOoO0o * I1ii11iIi11i
if 44 - 44: i11iIiiIii
if 69 - 69: OOooOOo * O0 + i11iIiiIii
if 65 - 65: O0 / iII111i . i1IIi * iII111i / iIii1I11I1II1 - oO0o
if 93 - 93: OoOoOO00 % i11iIiiIii - Ii1I % OoO0O00
if 55 - 55: o0oOOo0O0Ooo . I1ii11iIi11i
if 63 - 63: oO0o
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
class lisp_map_notify ( ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
def print_notify ( self ) :
OOO00Oo00o = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( OOO00Oo00o ) != 40 ) :
OOO00Oo00o = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( OOO00Oo00o ) != 64 ) :
OOO00Oo00o = self . auth_data
if 68 - 68: II111iiii - I1ii11iIi11i - OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
oooOo = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( oooOo . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# IiII % II111iiii * I11i
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , OOO00Oo00o ) )
if 27 - 27: iII111i
if 35 - 35: iII111i + I1IiiI
if 78 - 78: iII111i
if 15 - 15: iII111i + i11iIiiIii % O0 % I1Ii111 + OoO0O00 * ooOoO0o
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
OOO00Oo00o = struct . pack ( "QQI" , 0 , 0 , 0 )
if 46 - 46: iII111i . OoOoOO00
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
OOO00Oo00o = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 18 - 18: I1ii11iIi11i
packet += OOO00Oo00o
return ( packet )
if 33 - 33: i11iIiiIii % o0oOOo0O0Ooo . iII111i * OOooOOo / I11i
if 25 - 25: OoO0O00
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
oOoOo00oo = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
oOoOo00oo = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 39 - 39: Ii1I * OoOoOO00 + Oo0Ooo . OOooOOo - O0 * I1ii11iIi11i
i1II1IiiIi = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
i1II1IiiIi += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 98 - 98: IiII * iII111i . OoooooooOO . O0
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = i1II1IiiIi + eid_records
return ( self . packet )
if 89 - 89: iII111i / O0 % OoooooooOO - O0 . OoO0O00
if 32 - 32: ooOoO0o
if 26 - 26: O0 * Ii1I - I1IiiI - iII111i / iIii1I11I1II1
if 57 - 57: I1ii11iIi11i - OoO0O00 * iIii1I11I1II1
if 26 - 26: OoO0O00 % ooOoO0o % o0oOOo0O0Ooo % OoOoOO00 . iII111i % O0
i1II1IiiIi = self . zero_auth ( i1II1IiiIi )
i1II1IiiIi += eid_records
if 91 - 91: II111iiii . Oo0Ooo . oO0o - OoooooooOO / OoOoOO00
IiiiI1I1iI11 = lisp_hash_me ( i1II1IiiIi , self . alg_id , password , False )
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
I11iiIi1i1 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
IiII1Iiii = self . auth_len
self . auth_data = IiiiI1I1iI11
i1II1IiiIi = i1II1IiiIi [ 0 : I11iiIi1i1 ] + IiiiI1I1iI11 + i1II1IiiIi [ I11iiIi1i1 + IiII1Iiii : : ]
self . packet = i1II1IiiIi
return ( i1II1IiiIi )
if 55 - 55: OoO0O00
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
def decode ( self , packet ) :
IIiIiIii11I1 = packet
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 32 - 32: Ii1I * oO0o
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
oOoOo00oo = socket . ntohl ( oOoOo00oo [ 0 ] )
self . map_notify_ack = ( ( oOoOo00oo >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = oOoOo00oo & 0xff
packet = packet [ O0OOoooO : : ]
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
oOoOo000 = "QBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 28 - 28: Oo0Ooo
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ O0OOoooO : : ]
self . eid_records = packet [ self . auth_len : : ]
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if ( len ( packet ) < self . auth_len ) : return ( None )
if 9 - 9: O0 . iIii1I11I1II1
IiII1Iiii = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
iI111I , i1 , iiII1I1I1ii = struct . unpack ( "QQI" , packet [ : IiII1Iiii ] )
Iii1I1111iI = ""
if 44 - 44: I1ii11iIi11i % IiII
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
iI111I , i1 , iiII1I1I1ii , Iii1I1111iI = struct . unpack ( "QQQQ" ,
packet [ : IiII1Iiii ] )
if 6 - 6: OoO0O00
self . auth_data = lisp_concat_auth_data ( self . alg_id , iI111I , i1 ,
iiII1I1I1ii , Iii1I1111iI )
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
O0OOoooO = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( IIiIiIii11I1 [ : O0OOoooO ] )
O0OOoooO += IiII1Iiii
packet += IIiIiIii11I1 [ O0OOoooO : : ]
return ( packet )
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
if 83 - 83: i1IIi
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
if 71 - 71: Ii1I
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
if 93 - 93: ooOoO0o % I1Ii111
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
if 61 - 61: Oo0Ooo - I1Ii111
if 51 - 51: iII111i * ooOoO0o / O0 / O0
if 52 - 52: OoooooooOO % O0
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if 28 - 28: i1IIi / I11i . o0oOOo0O0Ooo
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
if 3 - 3: I1ii11iIi11i
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
if 54 - 54: ooOoO0o * I11i - I1Ii111
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
class lisp_map_request ( ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
if 47 - 47: I1ii11iIi11i * I1IiiI / I1ii11iIi11i + Ii1I * II111iiii
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 78 - 78: I1Ii111 - i1IIi + OoOoOO00 + Oo0Ooo * I1ii11iIi11i * o0oOOo0O0Ooo
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 97 - 97: i1IIi
if 29 - 29: I1IiiI
def print_map_request ( self ) :
i11IIii = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
i11IIii = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 37 - 37: I1ii11iIi11i * I1Ii111 * I1IiiI * O0
if 35 - 35: I1IiiI - I1ii11iIi11i * iII111i + IiII / i1IIi
if 46 - 46: Oo0Ooo . ooOoO0o % Oo0Ooo / II111iiii * ooOoO0o * OOooOOo
oooOo = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 59 - 59: I1Ii111 * iII111i
lprint ( oooOo . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# I1IiiI / I11i
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , i11IIii ) )
if 6 - 6: Ii1I / ooOoO0o / i11iIiiIii % o0oOOo0O0Ooo
o00OO0o0 = self . keys
for o00ooOOo0ooO0 in self . itr_rlocs :
lprint ( " itr-rloc: afi {} {}{}" . format ( o00ooOOo0ooO0 . afi ,
red ( o00ooOOo0ooO0 . print_address_no_iid ( ) , False ) ,
"" if ( o00OO0o0 == None ) else ", " + o00OO0o0 [ 1 ] . print_keys ( ) ) )
o00OO0o0 = None
if 28 - 28: I1Ii111 + II111iiii % OOooOOo * i11iIiiIii % oO0o + OoooooooOO
if 65 - 65: o0oOOo0O0Ooo . IiII % i1IIi % OoOoOO00 + I1ii11iIi11i
if 41 - 41: OoOoOO00 / iIii1I11I1II1
def sign_map_request ( self , privkey ) :
O0O0o0OOOooo0 = self . signature_eid . print_address ( )
iiI1i = self . source_eid . print_address ( )
i1i11IIi11iiI = self . target_eid . print_address ( )
ii1Ii111I11 = lisp_hex_string ( self . nonce ) + iiI1i + i1i11IIi11iiI
self . map_request_signature = privkey . sign ( ii1Ii111I11 )
IiiiI1I1i = binascii . b2a_base64 ( self . map_request_signature )
IiiiI1I1i = { "source-eid" : iiI1i , "signature-eid" : O0O0o0OOOooo0 ,
"signature" : IiiiI1I1i }
return ( json . dumps ( IiiiI1I1i ) )
if 53 - 53: Oo0Ooo . OOooOOo + iII111i * Ii1I
if 23 - 23: o0oOOo0O0Ooo + ooOoO0o
def verify_map_request_sig ( self , pubkey ) :
i1i1iIi1IiI = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( i1i1iIi1IiI ) )
return ( False )
if 16 - 16: oO0o
if 96 - 96: ooOoO0o / oO0o % O0 / OOooOOo * OoO0O00 * I11i
iiI1i = self . source_eid . print_address ( )
i1i11IIi11iiI = self . target_eid . print_address ( )
ii1Ii111I11 = lisp_hex_string ( self . nonce ) + iiI1i + i1i11IIi11iiI
pubkey = binascii . a2b_base64 ( pubkey )
if 27 - 27: OoOoOO00 % Ii1I / i1IIi . i1IIi * OoooooooOO % ooOoO0o
O0o0O00O0 = True
try :
iii11 = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 67 - 67: OoooooooOO * OoO0O00 * iII111i + ooOoO0o - i1IIi
O0o0O00O0 = False
if 66 - 66: IiII / OoOoOO00 % O0 % o0oOOo0O0Ooo - OOooOOo / OoOoOO00
if 11 - 11: I1IiiI + IiII
if ( O0o0O00O0 ) :
try :
O0o0O00O0 = iii11 . verify ( self . map_request_signature , ii1Ii111I11 )
except :
O0o0O00O0 = False
if 95 - 95: I1IiiI - OOooOOo . Oo0Ooo / O0 + Ii1I
if 67 - 67: OoOoOO00 % Oo0Ooo
if 7 - 7: i11iIiiIii % I1ii11iIi11i / I1Ii111 % Oo0Ooo - OoO0O00
o0OOo0o0 = bold ( "passed" if O0o0O00O0 else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( o0OOo0o0 , i1i1iIi1IiI ) )
return ( O0o0O00O0 )
if 60 - 60: o0oOOo0O0Ooo / Oo0Ooo
if 19 - 19: iIii1I11I1II1 . OoO0O00 / OoooooooOO
def encode ( self , probe_dest , probe_port ) :
oOoOo00oo = ( LISP_MAP_REQUEST << 28 ) | self . record_count
oOoOo00oo = oOoOo00oo | ( self . itr_rloc_count << 8 )
if ( self . auth_bit ) : oOoOo00oo |= 0x08000000
if ( self . map_data_present ) : oOoOo00oo |= 0x04000000
if ( self . rloc_probe ) : oOoOo00oo |= 0x02000000
if ( self . smr_bit ) : oOoOo00oo |= 0x01000000
if ( self . pitr_bit ) : oOoOo00oo |= 0x00800000
if ( self . smr_invoked_bit ) : oOoOo00oo |= 0x00400000
if ( self . mobile_node ) : oOoOo00oo |= 0x00200000
if ( self . xtr_id_present ) : oOoOo00oo |= 0x00100000
if ( self . local_xtr ) : oOoOo00oo |= 0x00004000
if ( self . dont_reply_bit ) : oOoOo00oo |= 0x00002000
if 2 - 2: O0 - O0 % I1Ii111 / I1ii11iIi11i
i1II1IiiIi = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
i1II1IiiIi += struct . pack ( "Q" , self . nonce )
if 76 - 76: OoO0O00 * oO0o - OoO0O00
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
if 70 - 70: O0 . Ii1I
if 33 - 33: OOooOOo * Ii1I
oooIII1II1I1iI = False
oOOOO = self . privkey_filename
if ( oOOOO != None and os . path . exists ( oOOOO ) ) :
Oo0OO0o0oOO0 = open ( oOOOO , "r" ) ; iii11 = Oo0OO0o0oOO0 . read ( ) ; Oo0OO0o0oOO0 . close ( )
try :
iii11 = ecdsa . SigningKey . from_pem ( iii11 )
except :
return ( None )
if 48 - 48: I11i
O0OoOOo0o = self . sign_map_request ( iii11 )
oooIII1II1I1iI = True
elif ( self . map_request_signature != None ) :
IiiiI1I1i = binascii . b2a_base64 ( self . map_request_signature )
O0OoOOo0o = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : IiiiI1I1i }
O0OoOOo0o = json . dumps ( O0OoOOo0o )
oooIII1II1I1iI = True
if 21 - 21: I11i - I1IiiI / OoooooooOO . i1IIi + II111iiii
if ( oooIII1II1I1iI ) :
oOOi1I111II = LISP_LCAF_JSON_TYPE
O0OOOOO0O = socket . htons ( LISP_AFI_LCAF )
ii111 = socket . htons ( len ( O0OoOOo0o ) + 2 )
i1oO0o00oOo00oO = socket . htons ( len ( O0OoOOo0o ) )
i1II1IiiIi += struct . pack ( "HBBBBHH" , O0OOOOO0O , 0 , 0 , oOOi1I111II , 0 ,
ii111 , i1oO0o00oOo00oO )
i1II1IiiIi += O0OoOOo0o
i1II1IiiIi += struct . pack ( "H" , 0 )
else :
if ( self . source_eid . instance_id != 0 ) :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
i1II1IiiIi += self . source_eid . lcaf_encode_iid ( )
else :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
i1II1IiiIi += self . source_eid . pack_address ( )
if 68 - 68: iIii1I11I1II1 - I1IiiI . oO0o + OoOoOO00
if 94 - 94: o0oOOo0O0Ooo % o0oOOo0O0Ooo % II111iiii * iIii1I11I1II1 / IiII . I1ii11iIi11i
if 13 - 13: OoOoOO00 . I1IiiI . o0oOOo0O0Ooo * oO0o / Ii1I
if 38 - 38: IiII - i1IIi . i11iIiiIii
if 28 - 28: I1Ii111 / oO0o . I1ii11iIi11i
if 83 - 83: I11i
if 36 - 36: iIii1I11I1II1
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
I1iiIiiii1111 = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 74 - 74: IiII * I1ii11iIi11i - OoooooooOO
if ( lisp_crypto_keys_by_rloc_encap . has_key ( I1iiIiiii1111 ) ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ I1iiIiiii1111 ]
if 59 - 59: ooOoO0o * OoO0O00 - I1Ii111 % oO0o
if 95 - 95: II111iiii + II111iiii
if 33 - 33: i1IIi . Oo0Ooo - IiII
if 30 - 30: OoooooooOO % OOooOOo
if 14 - 14: OoOoOO00 / OoO0O00 / i11iIiiIii - OoOoOO00 / o0oOOo0O0Ooo - OOooOOo
if 81 - 81: iII111i % Ii1I . ooOoO0o
if 66 - 66: I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
for o00ooOOo0ooO0 in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( o00ooOOo0ooO0 ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
o00OO0o0 = lisp_keys ( 1 )
self . keys = [ None , o00OO0o0 , None , None ]
if 49 - 49: II111iiii . I1IiiI * O0 * Ii1I / I1Ii111 * OoooooooOO
o00OO0o0 = self . keys [ 1 ]
o00OO0o0 . add_key_by_nonce ( self . nonce )
i1II1IiiIi += o00OO0o0 . encode_lcaf ( o00ooOOo0ooO0 )
else :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( o00ooOOo0ooO0 . afi ) )
i1II1IiiIi += o00ooOOo0ooO0 . pack_address ( )
if 82 - 82: Oo0Ooo / Ii1I / Ii1I % Ii1I
if 20 - 20: ooOoO0o
if 63 - 63: iIii1I11I1II1 . OoO0O00
ooooOo00OO0o = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 86 - 86: OoOoOO00
if 61 - 61: IiII / II111iiii . O0 + OoooooooOO * i1IIi
Oooo00oOO00 = 0
if ( self . subscribe_bit ) :
Oooo00oOO00 = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 81 - 81: i11iIiiIii * OoooooooOO + Ii1I . IiII / O0
if 82 - 82: II111iiii * OoOoOO00 * iIii1I11I1II1 % oO0o * OOooOOo
if 33 - 33: Ii1I . oO0o
oOoOo000 = "BB"
i1II1IiiIi += struct . pack ( oOoOo000 , Oooo00oOO00 , ooooOo00OO0o )
if 87 - 87: Oo0Ooo . o0oOOo0O0Ooo - OoooooooOO * oO0o % IiII + O0
if ( self . target_group . is_null ( ) == False ) :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
i1II1IiiIi += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
i1II1IiiIi += self . target_eid . lcaf_encode_iid ( )
else :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
i1II1IiiIi += self . target_eid . pack_address ( )
if 16 - 16: I1ii11iIi11i % Oo0Ooo % II111iiii % II111iiii
if 51 - 51: OoOoOO00 * OoOoOO00 - O0 % iIii1I11I1II1 / O0
if 5 - 5: i11iIiiIii * ooOoO0o % iII111i - I11i
if 5 - 5: O0 * IiII * OOooOOo + I1Ii111 % Oo0Ooo - I1ii11iIi11i
if 62 - 62: I1ii11iIi11i + I11i
if ( self . subscribe_bit ) : i1II1IiiIi = self . encode_xtr_id ( i1II1IiiIi )
return ( i1II1IiiIi )
if 90 - 90: iIii1I11I1II1
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
def lcaf_decode_json ( self , packet ) :
oOoOo000 = "BBBBHH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 69 - 69: Oo0Ooo * ooOoO0o
OOII1iI , Ooooo0OO , oOOi1I111II , o0o0OO0OO , ii111 , i1oO0o00oOo00oO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 21 - 21: I1IiiI - OoooooooOO / OoOoOO00 * OoooooooOO % OoooooooOO + OoO0O00
if 89 - 89: iII111i . OOooOOo . I1ii11iIi11i
if ( oOOi1I111II != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 93 - 93: II111iiii
if 8 - 8: Ii1I * OoooooooOO / Ii1I / OoO0O00 % OoOoOO00 + I11i
if 16 - 16: I11i % ooOoO0o - i11iIiiIii
if 38 - 38: o0oOOo0O0Ooo / I1ii11iIi11i - O0
ii111 = socket . ntohs ( ii111 )
i1oO0o00oOo00oO = socket . ntohs ( i1oO0o00oOo00oO )
packet = packet [ O0OOoooO : : ]
if ( len ( packet ) < ii111 ) : return ( None )
if ( ii111 != i1oO0o00oOo00oO + 2 ) : return ( None )
if 21 - 21: OOooOOo
if 77 - 77: II111iiii
if 54 - 54: OoooooooOO % O0 % O0 * Ii1I % II111iiii + OOooOOo
if 89 - 89: IiII - o0oOOo0O0Ooo - II111iiii * Ii1I . iIii1I11I1II1
try :
O0OoOOo0o = json . loads ( packet [ 0 : i1oO0o00oOo00oO ] )
except :
return ( None )
if 33 - 33: I1IiiI . iIii1I11I1II1 / i11iIiiIii * Ii1I
packet = packet [ i1oO0o00oOo00oO : : ]
if 18 - 18: OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % ooOoO0o % II111iiii - IiII
if 75 - 75: OoO0O00 . II111iiii . oO0o / OoO0O00 % iIii1I11I1II1
if 8 - 8: O0 / II111iiii
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
oOoOo000 = "H"
O0OOoooO = struct . calcsize ( oOoOo000 )
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
if ( iioOO != 0 ) : return ( packet )
if 87 - 87: IiII
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
if 55 - 55: IiII
if 43 - 43: OOooOOo
if ( O0OoOOo0o . has_key ( "source-eid" ) == False ) : return ( packet )
i1OO0o = O0OoOOo0o [ "source-eid" ]
iioOO = LISP_AFI_IPV4 if i1OO0o . count ( "." ) == 3 else LISP_AFI_IPV6 if i1OO0o . count ( ":" ) == 7 else None
if 64 - 64: i1IIi / o0oOOo0O0Ooo
if ( iioOO == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( i1OO0o ) )
return ( None )
if 24 - 24: I1ii11iIi11i * OoO0O00 . OoooooooOO % Ii1I % O0
if 46 - 46: iII111i + I1Ii111 % OoooooooOO * I1ii11iIi11i
self . source_eid . afi = iioOO
self . source_eid . store_address ( i1OO0o )
if 89 - 89: IiII - IiII % iII111i / I11i + oO0o - IiII
if ( O0OoOOo0o . has_key ( "signature-eid" ) == False ) : return ( packet )
i1OO0o = O0OoOOo0o [ "signature-eid" ]
if ( i1OO0o . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( i1OO0o ) )
return ( None )
if 97 - 97: Ii1I % OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if 80 - 80: oO0o / O0
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( i1OO0o )
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if ( O0OoOOo0o . has_key ( "signature" ) == False ) : return ( packet )
IiiiI1I1i = binascii . a2b_base64 ( O0OoOOo0o [ "signature" ] )
self . map_request_signature = IiiiI1I1i
return ( packet )
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if 59 - 59: IiII
def decode ( self , packet , source , port ) :
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 54 - 54: OOooOOo
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
oOoOo00oo = oOoOo00oo [ 0 ]
packet = packet [ O0OOoooO : : ]
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
oOoOo000 = "Q"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
oOo0 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
packet = packet [ O0OOoooO : : ]
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
oOoOo00oo = socket . ntohl ( oOoOo00oo )
self . auth_bit = True if ( oOoOo00oo & 0x08000000 ) else False
self . map_data_present = True if ( oOoOo00oo & 0x04000000 ) else False
self . rloc_probe = True if ( oOoOo00oo & 0x02000000 ) else False
self . smr_bit = True if ( oOoOo00oo & 0x01000000 ) else False
self . pitr_bit = True if ( oOoOo00oo & 0x00800000 ) else False
self . smr_invoked_bit = True if ( oOoOo00oo & 0x00400000 ) else False
self . mobile_node = True if ( oOoOo00oo & 0x00200000 ) else False
self . xtr_id_present = True if ( oOoOo00oo & 0x00100000 ) else False
self . local_xtr = True if ( oOoOo00oo & 0x00004000 ) else False
self . dont_reply_bit = True if ( oOoOo00oo & 0x00002000 ) else False
self . itr_rloc_count = ( ( oOoOo00oo >> 8 ) & 0x1f ) + 1
self . record_count = oOoOo00oo & 0xff
self . nonce = oOo0 [ 0 ]
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
if 47 - 47: I1Ii111 + I1IiiI
O0OOoooO = struct . calcsize ( "H" )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
iioOO = struct . unpack ( "H" , packet [ : O0OOoooO ] )
self . source_eid . afi = socket . ntohs ( iioOO [ 0 ] )
packet = packet [ O0OOoooO : : ]
if 80 - 80: oO0o
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
Oo00o = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( Oo00o )
if ( packet == None ) : return ( None )
if 14 - 14: II111iiii + O0 - iII111i
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 18 - 18: o0oOOo0O0Ooo / i11iIiiIii % I1ii11iIi11i * OoooooooOO
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 67 - 67: OoOoOO00
OOO0 = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
while ( self . itr_rloc_count != 0 ) :
O0OOoooO = struct . calcsize ( "H" )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 75 - 75: I1IiiI
iioOO = struct . unpack ( "H" , packet [ : O0OOoooO ] ) [ 0 ]
if 99 - 99: ooOoO0o . Ii1I
o00ooOOo0ooO0 = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
o00ooOOo0ooO0 . afi = socket . ntohs ( iioOO )
if 92 - 92: i1IIi
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
if 4 - 4: Ii1I
if ( o00ooOOo0ooO0 . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < o00ooOOo0ooO0 . addr_length ( ) ) : return ( None )
packet = o00ooOOo0ooO0 . unpack_address ( packet [ O0OOoooO : : ] )
if ( packet == None ) : return ( None )
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if ( OOO0 ) :
self . itr_rlocs . append ( o00ooOOo0ooO0 )
self . itr_rloc_count -= 1
continue
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
if 32 - 32: I1Ii111 / oO0o / I1IiiI
I1iiIiiii1111 = lisp_build_crypto_decap_lookup_key ( o00ooOOo0ooO0 , port )
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
if ( lisp_nat_traversal and o00ooOOo0ooO0 . is_private_address ( ) and source ) : o00ooOOo0ooO0 = source
if 35 - 35: I1ii11iIi11i % OoooooooOO
oO0oO0oOoo = lisp_crypto_keys_by_rloc_decap
if ( oO0oO0oOoo . has_key ( I1iiIiiii1111 ) ) : oO0oO0oOoo . pop ( I1iiIiiii1111 )
if 34 - 34: IiII
if 5 - 5: OoO0O00 . I1IiiI
if 48 - 48: Oo0Ooo - OoO0O00 . I11i - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i / OoooooooOO - II111iiii
if 91 - 91: OoOoOO00 + o0oOOo0O0Ooo
if 23 - 23: i1IIi
lisp_write_ipc_decap_key ( I1iiIiiii1111 , None )
else :
IIiIiIii11I1 = packet
IiI11IiIIi = lisp_keys ( 1 )
packet = IiI11IiIIi . decode_lcaf ( IIiIiIii11I1 , 0 )
if ( packet == None ) : return ( None )
if 92 - 92: Ii1I
if 48 - 48: iII111i . I1IiiI + O0
if 19 - 19: I1IiiI / I1Ii111 - I11i
if 49 - 49: iIii1I11I1II1 - iIii1I11I1II1 - OoOoOO00 + IiII / OoOoOO00
I1IIiIIiiI1i = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( IiI11IiIIi . cipher_suite in I1IIiIIiiI1i ) :
if ( IiI11IiIIi . cipher_suite == LISP_CS_25519_CBC or
IiI11IiIIi . cipher_suite == LISP_CS_25519_GCM ) :
iii11 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 74 - 74: OoooooooOO + I1ii11iIi11i % O0
if ( IiI11IiIIi . cipher_suite == LISP_CS_25519_CHACHA ) :
iii11 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 32 - 32: I1ii11iIi11i + I1ii11iIi11i
else :
iii11 = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
packet = iii11 . decode_lcaf ( IIiIiIii11I1 , 0 )
if ( packet == None ) : return ( None )
if 12 - 12: OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
if ( len ( packet ) < O0OOoooO ) : return ( None )
iioOO = struct . unpack ( "H" , packet [ : O0OOoooO ] ) [ 0 ]
o00ooOOo0ooO0 . afi = socket . ntohs ( iioOO )
if ( len ( packet ) < o00ooOOo0ooO0 . addr_length ( ) ) : return ( None )
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
packet = o00ooOOo0ooO0 . unpack_address ( packet [ O0OOoooO : : ] )
if ( packet == None ) : return ( None )
if 30 - 30: OOooOOo * I1ii11iIi11i % I1ii11iIi11i + iII111i * IiII
if ( OOO0 ) :
self . itr_rlocs . append ( o00ooOOo0ooO0 )
self . itr_rloc_count -= 1
continue
if 33 - 33: o0oOOo0O0Ooo + I11i * O0 * OoO0O00 . I1ii11iIi11i
if 74 - 74: iII111i * iII111i * o0oOOo0O0Ooo / oO0o
I1iiIiiii1111 = lisp_build_crypto_decap_lookup_key ( o00ooOOo0ooO0 , port )
if 91 - 91: i11iIiiIii . I1ii11iIi11i / II111iiii
O00oO0OOOo0 = None
if ( lisp_nat_traversal and o00ooOOo0ooO0 . is_private_address ( ) and source ) : o00ooOOo0ooO0 = source
if 64 - 64: Ii1I - iII111i
if 12 - 12: i1IIi
if ( lisp_crypto_keys_by_rloc_decap . has_key ( I1iiIiiii1111 ) ) :
o00OO0o0 = lisp_crypto_keys_by_rloc_decap [ I1iiIiiii1111 ]
O00oO0OOOo0 = o00OO0o0 [ 1 ] if o00OO0o0 and o00OO0o0 [ 1 ] else None
if 99 - 99: II111iiii - I1ii11iIi11i * IiII
if 3 - 3: IiII - I1ii11iIi11i * iII111i * I1ii11iIi11i + Oo0Ooo
IIi1i1iI11I11 = True
if ( O00oO0OOOo0 ) :
if ( O00oO0OOOo0 . compare_keys ( iii11 ) ) :
self . keys = [ None , O00oO0OOOo0 , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( I1iiIiiii1111 , False ) ) )
if 67 - 67: i11iIiiIii % I11i
else :
IIi1i1iI11I11 = False
ii1I11iIi = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( ii1I11iIi , red ( I1iiIiiii1111 ,
False ) ) )
iii11 . copy_keypair ( O00oO0OOOo0 )
iii11 . uptime = O00oO0OOOo0 . uptime
O00oO0OOOo0 = None
if 13 - 13: O0 . iII111i - IiII % i11iIiiIii % I1IiiI
if 88 - 88: i1IIi % O0
if 35 - 35: OoOoOO00 % OoO0O00 + O0 * o0oOOo0O0Ooo % I1ii11iIi11i
if ( O00oO0OOOo0 == None ) :
self . keys = [ None , iii11 , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
iii11 . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( I1iiIiiii1111 , False ) ) )
elif ( iii11 . remote_public_key != None ) :
if ( IIi1i1iI11I11 ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# i1IIi / I11i - o0oOOo0O0Ooo - ooOoO0o
red ( I1iiIiiii1111 , False ) ) )
if 98 - 98: Oo0Ooo + OoOoOO00 * OOooOOo / iII111i * OoOoOO00 / OoooooooOO
iii11 . compute_shared_key ( "decap" )
iii11 . add_key_by_rloc ( I1iiIiiii1111 , False )
if 35 - 35: II111iiii . OOooOOo + iIii1I11I1II1 . i1IIi - OoOoOO00 + IiII
if 55 - 55: Oo0Ooo % I1Ii111 . II111iiii
if 53 - 53: O0 / OoO0O00 % i11iIiiIii
if 11 - 11: I1Ii111 + i1IIi - iII111i - OoO0O00 * ooOoO0o / ooOoO0o
self . itr_rlocs . append ( o00ooOOo0ooO0 )
self . itr_rloc_count -= 1
if 4 - 4: iIii1I11I1II1 - i11iIiiIii * OoO0O00 . I1Ii111 + o0oOOo0O0Ooo
if 11 - 11: OoOoOO00 % I1ii11iIi11i - Ii1I - I1Ii111
O0OOoooO = struct . calcsize ( "BBH" )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 58 - 58: OoOoOO00 . Ii1I / IiII * oO0o
Oooo00oOO00 , ooooOo00OO0o , iioOO = struct . unpack ( "BBH" , packet [ : O0OOoooO ] )
self . subscribe_bit = ( Oooo00oOO00 & 0x80 )
self . target_eid . afi = socket . ntohs ( iioOO )
packet = packet [ O0OOoooO : : ]
if 70 - 70: OoooooooOO
self . target_eid . mask_len = ooooOo00OO0o
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , OOOoo = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( OOOoo ) : self . target_group = OOOoo
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ O0OOoooO : : ]
if 97 - 97: I11i
return ( packet )
if 84 - 84: IiII - OoOoOO00 . IiII + ooOoO0o . iII111i
if 96 - 96: Ii1I % iII111i * Ii1I % I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 7 - 7: OoO0O00 - ooOoO0o % i1IIi
if 24 - 24: OoO0O00 % O0 % I11i
def encode_xtr_id ( self , packet ) :
o0O0o0O0O = self . xtr_id >> 64
ii11iIi1IiI = self . xtr_id & 0xffffffffffffffff
o0O0o0O0O = byte_swap_64 ( o0O0o0O0O )
ii11iIi1IiI = byte_swap_64 ( ii11iIi1IiI )
packet += struct . pack ( "QQ" , o0O0o0O0O , ii11iIi1IiI )
return ( packet )
if 61 - 61: ooOoO0o . iII111i / ooOoO0o * OoooooooOO
if 13 - 13: II111iiii
def decode_xtr_id ( self , packet ) :
O0OOoooO = struct . calcsize ( "QQ" )
if ( len ( packet ) < O0OOoooO ) : return ( None )
packet = packet [ len ( packet ) - O0OOoooO : : ]
o0O0o0O0O , ii11iIi1IiI = struct . unpack ( "QQ" , packet [ : O0OOoooO ] )
o0O0o0O0O = byte_swap_64 ( o0O0o0O0O )
ii11iIi1IiI = byte_swap_64 ( ii11iIi1IiI )
self . xtr_id = ( o0O0o0O0O << 64 ) | ii11iIi1IiI
return ( True )
if 17 - 17: II111iiii
if 66 - 66: IiII * oO0o
if 73 - 73: i11iIiiIii + O0 % O0
if 70 - 70: II111iiii * OoooooooOO - Ii1I + oO0o * O0
if 49 - 49: oO0o . Ii1I . OoOoOO00 - I1ii11iIi11i
if 74 - 74: ooOoO0o % I1ii11iIi11i * i1IIi
if 18 - 18: OoOoOO00
if 30 - 30: II111iiii
if 27 - 27: i1IIi - iIii1I11I1II1 + O0 % Oo0Ooo / OOooOOo + i1IIi
if 48 - 48: Oo0Ooo
if 70 - 70: OoooooooOO * i11iIiiIii
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
if 47 - 47: Ii1I . OoOoOO00 . iIii1I11I1II1 . o0oOOo0O0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 89 - 89: OoooooooOO + iII111i . I1Ii111 / Ii1I
if 75 - 75: iIii1I11I1II1 * iII111i / OoOoOO00 * II111iiii . i1IIi
if 6 - 6: Ii1I % Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
if 59 - 59: I11i . I11i * I1IiiI - Ii1I % OoOoOO00
if 19 - 19: OoooooooOO / Oo0Ooo - I1Ii111 . OoOoOO00
if 8 - 8: I11i % ooOoO0o . iIii1I11I1II1
if 95 - 95: o0oOOo0O0Ooo + i11iIiiIii . I1ii11iIi11i . ooOoO0o . o0oOOo0O0Ooo
if 93 - 93: iII111i
if 55 - 55: II111iiii % o0oOOo0O0Ooo - OoO0O00
if 48 - 48: ooOoO0o * iIii1I11I1II1 % OoOoOO00
if 100 - 100: II111iiii - i11iIiiIii + OoO0O00 % ooOoO0o - iIii1I11I1II1 * i11iIiiIii
if 30 - 30: OoO0O00 . OoO0O00 . Ii1I % Ii1I * i1IIi * oO0o
if 74 - 74: OoooooooOO
if 33 - 33: o0oOOo0O0Ooo - II111iiii
if 95 - 95: OoooooooOO
if 23 - 23: II111iiii + I11i / O0 . I11i . I1Ii111 + iIii1I11I1II1
if 2 - 2: i1IIi . O0 / o0oOOo0O0Ooo . II111iiii / OoO0O00 % i1IIi
class lisp_map_reply ( ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 12 - 12: o0oOOo0O0Ooo
if 58 - 58: iIii1I11I1II1 * Ii1I . ooOoO0o . Oo0Ooo * Ii1I
def print_map_reply ( self ) :
oooOo = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 63 - 63: OoOoOO00 . I11i * o0oOOo0O0Ooo - I11i % I11i
lprint ( oooOo . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# o0oOOo0O0Ooo + I1IiiI % ooOoO0o * I1Ii111
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 87 - 87: II111iiii + O0 / iII111i * ooOoO0o
if 52 - 52: iIii1I11I1II1 / iII111i . O0 * IiII . I1IiiI
def encode ( self ) :
oOoOo00oo = ( LISP_MAP_REPLY << 28 ) | self . record_count
oOoOo00oo |= self . hop_count << 8
if ( self . rloc_probe ) : oOoOo00oo |= 0x08000000
if ( self . echo_nonce_capable ) : oOoOo00oo |= 0x04000000
if ( self . security ) : oOoOo00oo |= 0x02000000
if 67 - 67: II111iiii + Ii1I - I1IiiI * ooOoO0o
i1II1IiiIi = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
i1II1IiiIi += struct . pack ( "Q" , self . nonce )
return ( i1II1IiiIi )
if 19 - 19: i11iIiiIii * Oo0Ooo
if 33 - 33: i11iIiiIii + I1IiiI
def decode ( self , packet ) :
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
oOoOo00oo = oOoOo00oo [ 0 ]
packet = packet [ O0OOoooO : : ]
if 6 - 6: IiII
oOoOo000 = "Q"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 73 - 73: o0oOOo0O0Ooo % o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i - Ii1I
oOo0 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
packet = packet [ O0OOoooO : : ]
if 97 - 97: IiII
oOoOo00oo = socket . ntohl ( oOoOo00oo )
self . rloc_probe = True if ( oOoOo00oo & 0x08000000 ) else False
self . echo_nonce_capable = True if ( oOoOo00oo & 0x04000000 ) else False
self . security = True if ( oOoOo00oo & 0x02000000 ) else False
self . hop_count = ( oOoOo00oo >> 8 ) & 0xff
self . record_count = oOoOo00oo & 0xff
self . nonce = oOo0 [ 0 ]
if 15 - 15: O0 - I1IiiI / i1IIi . I1Ii111
if ( lisp_crypto_keys_by_nonce . has_key ( self . nonce ) ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 64 - 64: ooOoO0o / i1IIi
return ( packet )
if 100 - 100: II111iiii
if 16 - 16: Ii1I
if 96 - 96: o0oOOo0O0Ooo / I1Ii111 % Ii1I - ooOoO0o
if 35 - 35: OOooOOo
if 90 - 90: i11iIiiIii
if 47 - 47: OoO0O00 . i11iIiiIii
if 9 - 9: OoOoOO00 - I11i . OoooooooOO % ooOoO0o
if 13 - 13: OoO0O00 * iIii1I11I1II1 + II111iiii - Oo0Ooo - OoOoOO00
if 43 - 43: iII111i / I1Ii111 * I1IiiI % ooOoO0o % I1IiiI
if 18 - 18: OoO0O00
if 99 - 99: iII111i / oO0o . i11iIiiIii / I11i + i1IIi - I11i
if 50 - 50: i1IIi
if 56 - 56: OoO0O00 + I1Ii111 / Ii1I
if 75 - 75: OoOoOO00
if 96 - 96: o0oOOo0O0Ooo * I11i * Oo0Ooo
if 36 - 36: OoooooooOO + ooOoO0o . oO0o * ooOoO0o + IiII
if 45 - 45: oO0o / iII111i + I1ii11iIi11i - Oo0Ooo - ooOoO0o . iIii1I11I1II1
if 52 - 52: I1IiiI + i1IIi . iII111i * I1IiiI
if 31 - 31: Oo0Ooo % iIii1I11I1II1 . O0
if 80 - 80: I11i / Oo0Ooo + I1ii11iIi11i
if 18 - 18: II111iiii - iII111i / iIii1I11I1II1 % OoOoOO00 % I1ii11iIi11i / o0oOOo0O0Ooo
if 47 - 47: OOooOOo
if 24 - 24: Ii1I % o0oOOo0O0Ooo
if 87 - 87: o0oOOo0O0Ooo % iII111i / ooOoO0o - IiII + i11iIiiIii
if 85 - 85: OoooooooOO * IiII . OOooOOo / iII111i / OoooooooOO
if 87 - 87: OoO0O00
if 32 - 32: i11iIiiIii - OoOoOO00 * I11i . Oo0Ooo * ooOoO0o
if 21 - 21: OOooOOo
if 11 - 11: oO0o % i11iIiiIii * O0
if 28 - 28: I1Ii111 / iIii1I11I1II1 + OOooOOo . I1ii11iIi11i % OOooOOo + OoO0O00
if 79 - 79: oO0o
if 39 - 39: I1Ii111 % oO0o % O0 % O0 - iII111i - oO0o
class lisp_eid_record ( ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 83 - 83: i11iIiiIii + iIii1I11I1II1
if 21 - 21: o0oOOo0O0Ooo / i11iIiiIii % I1Ii111
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 56 - 56: o0oOOo0O0Ooo * iIii1I11I1II1 . Ii1I + OoOoOO00 % I1Ii111
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 11 - 11: OOooOOo
if 12 - 12: OoooooooOO * OOooOOo * I1ii11iIi11i * ooOoO0o
def print_ttl ( self ) :
iiI = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
iiI = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( iiI % 60 ) == 0 ) :
iiI = str ( iiI / 60 ) + " hours"
else :
iiI = str ( iiI ) + " mins"
if 23 - 23: IiII + i11iIiiIii * Ii1I
return ( iiI )
if 55 - 55: Oo0Ooo % IiII + i11iIiiIii - OOooOOo - II111iiii
if 80 - 80: IiII
def store_ttl ( self ) :
iiI = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : iiI = self . record_ttl & 0x7fffffff
return ( iiI )
if 97 - 97: iII111i
if 40 - 40: ooOoO0o
def print_record ( self , indent , ddt ) :
O0oOo00O = ""
I11I = ""
Oo0OOo0oO00O00 = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
Oo0OOo0oO00O00 = lisp_map_referral_action_string [ self . action ]
Oo0OOo0oO00O00 = bold ( Oo0OOo0oO00O00 , False )
O0oOo00O = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 16 - 16: OOooOOo % IiII - II111iiii - o0oOOo0O0Ooo * i11iIiiIii / I1Ii111
I11I = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 74 - 74: iII111i % i1IIi / Oo0Ooo . O0
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
Oo0OOo0oO00O00 = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
Oo0OOo0oO00O00 = bold ( Oo0OOo0oO00O00 , False )
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
if 59 - 59: I1IiiI * I1IiiI / I11i
if 92 - 92: o0oOOo0O0Ooo
iioOO = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
oooOo = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
lprint ( oooOo . format ( indent , self . print_ttl ( ) , self . rloc_count ,
Oo0OOo0oO00O00 , "auth" if ( self . authoritative is True ) else "non-auth" ,
O0oOo00O , I11I , self . map_version , iioOO ,
green ( self . print_prefix ( ) , False ) ) )
if 50 - 50: Oo0Ooo
if 16 - 16: Ii1I - OoOoOO00 % Oo0Ooo / Ii1I . I11i + ooOoO0o
def encode ( self ) :
ooOOoo0 = self . action << 13
if ( self . authoritative ) : ooOOoo0 |= 0x1000
if ( self . ddt_incomplete ) : ooOOoo0 |= 0x800
if 47 - 47: Ii1I % ooOoO0o + Ii1I
if 49 - 49: OoOoOO00 / i1IIi / OoooooooOO . iII111i + iII111i
if 51 - 51: OoooooooOO + i11iIiiIii
if 57 - 57: Oo0Ooo % o0oOOo0O0Ooo
iioOO = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( iioOO < 0 ) : iioOO = LISP_AFI_LCAF
OOoo = ( self . group . is_null ( ) == False )
if ( OOoo ) : iioOO = LISP_AFI_LCAF
if 42 - 42: OOooOOo . Oo0Ooo
i1i1IIiIiI11 = ( self . signature_count << 12 ) | self . map_version
ooooOo00OO0o = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 61 - 61: i11iIiiIii % I1Ii111 / o0oOOo0O0Ooo
i1II1IiiIi = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , ooooOo00OO0o , socket . htons ( ooOOoo0 ) ,
socket . htons ( i1i1IIiIiI11 ) , socket . htons ( iioOO ) )
if 40 - 40: OOooOOo / Ii1I % I1IiiI / o0oOOo0O0Ooo . iII111i
if 78 - 78: I11i - I1IiiI * IiII
if 43 - 43: OoooooooOO . OOooOOo
if 33 - 33: o0oOOo0O0Ooo % OoOoOO00 * I1IiiI
if ( OOoo ) :
i1II1IiiIi += self . eid . lcaf_encode_sg ( self . group )
return ( i1II1IiiIi )
if 26 - 26: I11i . iII111i . o0oOOo0O0Ooo
if 15 - 15: OoO0O00 / iII111i
if 46 - 46: OoooooooOO . I1Ii111
if 15 - 15: Ii1I
if 84 - 84: OoOoOO00 - ooOoO0o - OoooooooOO . OoooooooOO % IiII
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
i1II1IiiIi = i1II1IiiIi [ 0 : - 2 ]
i1II1IiiIi += self . eid . address . encode_geo ( )
return ( i1II1IiiIi )
if 38 - 38: OoO0O00 * I1ii11iIi11i
if 4 - 4: OoO0O00 . I1ii11iIi11i
if 21 - 21: i11iIiiIii / OoO0O00 / I1ii11iIi11i * O0 - II111iiii * OOooOOo
if 27 - 27: o0oOOo0O0Ooo . OoOoOO00 * Ii1I * iII111i * O0
if 93 - 93: IiII % I1Ii111 % II111iiii
if ( iioOO == LISP_AFI_LCAF ) :
i1II1IiiIi += self . eid . lcaf_encode_iid ( )
return ( i1II1IiiIi )
if 20 - 20: OoooooooOO * I1Ii111
if 38 - 38: iII111i . OoooooooOO
if 28 - 28: I1Ii111 * i1IIi . I1ii11iIi11i
if 75 - 75: O0 / oO0o * ooOoO0o - OOooOOo / i1IIi
if 61 - 61: I11i
i1II1IiiIi += self . eid . pack_address ( )
return ( i1II1IiiIi )
if 100 - 100: O0 - iIii1I11I1II1 * Oo0Ooo
if 35 - 35: ooOoO0o
def decode ( self , packet ) :
oOoOo000 = "IBBHHH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 57 - 57: OoO0O00 . Oo0Ooo + I1IiiI
self . record_ttl , self . rloc_count , self . eid . mask_len , ooOOoo0 , self . map_version , self . eid . afi = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 18 - 18: I1IiiI - I1ii11iIi11i * I11i / i11iIiiIii - o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 31 - 31: I11i
if 100 - 100: i11iIiiIii * i11iIiiIii . iIii1I11I1II1 % iII111i * I1ii11iIi11i
self . record_ttl = socket . ntohl ( self . record_ttl )
ooOOoo0 = socket . ntohs ( ooOOoo0 )
self . action = ( ooOOoo0 >> 13 ) & 0x7
self . authoritative = True if ( ( ooOOoo0 >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( ooOOoo0 >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ O0OOoooO : : ]
if 17 - 17: Ii1I * IiII * i11iIiiIii / I1ii11iIi11i / i11iIiiIii
if 23 - 23: OoooooooOO + i11iIiiIii / Oo0Ooo / iII111i . iII111i * I1IiiI
if 98 - 98: IiII
if 23 - 23: I11i / i1IIi * OoO0O00
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , O0oo0oo0 = self . eid . lcaf_decode_eid ( packet )
if ( O0oo0oo0 ) : self . group = O0oo0oo0
self . group . instance_id = self . eid . instance_id
return ( packet )
if 40 - 40: OoO0O00
if 1 - 1: I11i + oO0o - iII111i . Ii1I
packet = self . eid . unpack_address ( packet )
return ( packet )
if 76 - 76: IiII
if 6 - 6: Oo0Ooo % oO0o * ooOoO0o - i1IIi . OoOoOO00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 20 - 20: Oo0Ooo / I1Ii111 . Oo0Ooo
if 60 - 60: I1ii11iIi11i - I1IiiI * O0 * Oo0Ooo . i1IIi . OoOoOO00
if 24 - 24: IiII * I1IiiI / OOooOOo
if 51 - 51: iIii1I11I1II1 / I11i * OoO0O00 * Ii1I + I1ii11iIi11i . OoooooooOO
if 75 - 75: IiII / OoooooooOO / O0 % OOooOOo
if 87 - 87: II111iiii / iIii1I11I1II1 % I1ii11iIi11i
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if 92 - 92: OoOoOO00 . Oo0Ooo * I11i
if 86 - 86: O0
if 55 - 55: Ii1I / I1Ii111 / I1ii11iIi11i % ooOoO0o % I1IiiI
if 55 - 55: oO0o + OoooooooOO % i1IIi
if 24 - 24: I1ii11iIi11i - Oo0Ooo
if 36 - 36: I1IiiI . OOooOOo % II111iiii * IiII
if 34 - 34: I11i % iII111i - ooOoO0o - I1IiiI
if 44 - 44: Ii1I . o0oOOo0O0Ooo . iIii1I11I1II1 + OoooooooOO - I1IiiI
if 22 - 22: I11i * I1ii11iIi11i . OoooooooOO / Oo0Ooo / Ii1I
if 54 - 54: I1Ii111 % Ii1I + ooOoO0o
if 45 - 45: Ii1I / oO0o * I1Ii111 . Ii1I
if 25 - 25: I1ii11iIi11i / I1ii11iIi11i
if 79 - 79: Oo0Ooo - OoO0O00 % Oo0Ooo . II111iiii
if 84 - 84: ooOoO0o * OoooooooOO + O0
if 84 - 84: i1IIi . I11i . i1IIi . Oo0Ooo
if 21 - 21: II111iiii . O0 + Oo0Ooo - i11iIiiIii
if 5 - 5: iIii1I11I1II1 * i11iIiiIii + OoO0O00 + I11i * O0 % ooOoO0o
if 88 - 88: o0oOOo0O0Ooo / i11iIiiIii * I1ii11iIi11i
if 23 - 23: O0 / iII111i
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
if 14 - 14: I1IiiI . IiII
if 29 - 29: OoooooooOO / IiII + OoOoOO00 - I1Ii111 + IiII . i1IIi
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
class lisp_ecm ( ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
if 39 - 39: ooOoO0o - OoooooooOO
def print_ecm ( self ) :
oooOo = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 88 - 88: i1IIi + iIii1I11I1II1 * i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo
lprint ( oooOo . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 74 - 74: ooOoO0o - i11iIiiIii
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 34 - 34: IiII + I1Ii111 + Oo0Ooo / II111iiii
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 33 - 33: Ii1I . i1IIi - II111iiii - OoO0O00
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
if 52 - 52: oO0o % Oo0Ooo * II111iiii
oOoOo00oo = ( LISP_ECM << 28 )
if ( self . security ) : oOoOo00oo |= 0x08000000
if ( self . ddt ) : oOoOo00oo |= 0x04000000
if ( self . to_etr ) : oOoOo00oo |= 0x02000000
if ( self . to_ms ) : oOoOo00oo |= 0x01000000
if 24 - 24: i11iIiiIii * i1IIi * i1IIi
iiIIIiI1 = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
if 89 - 89: I1Ii111
i1I1i1i = ""
if ( self . afi == LISP_AFI_IPV4 ) :
i1I1i1i = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
i1I1i1i += self . source . pack_address ( )
i1I1i1i += self . dest . pack_address ( )
i1I1i1i = lisp_ip_checksum ( i1I1i1i )
if 19 - 19: IiII + I1Ii111
if ( self . afi == LISP_AFI_IPV6 ) :
i1I1i1i = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
i1I1i1i += self . source . pack_address ( )
i1I1i1i += self . dest . pack_address ( )
if 65 - 65: Ii1I - oO0o + i1IIi + OOooOOo % iII111i
if 5 - 5: OoO0O00 / iII111i / OOooOOo
IiIIi1I1I11Ii = socket . htons ( self . udp_sport )
oOo0OOOOOO = socket . htons ( self . udp_dport )
IIi11I1i1I1I = socket . htons ( self . udp_length )
Oo0ooooO0o00 = socket . htons ( self . udp_checksum )
I1iIIIiI = struct . pack ( "HHHH" , IiIIi1I1I11Ii , oOo0OOOOOO , IIi11I1i1I1I , Oo0ooooO0o00 )
return ( iiIIIiI1 + i1I1i1i + I1iIIIiI )
if 70 - 70: OoOoOO00 - I11i + ooOoO0o / i11iIiiIii / I1IiiI % iIii1I11I1II1
if 83 - 83: oO0o . Ii1I - o0oOOo0O0Ooo % I11i + i11iIiiIii
def decode ( self , packet ) :
if 40 - 40: O0 . Ii1I
if 58 - 58: i11iIiiIii * iII111i / Ii1I - oO0o - I1ii11iIi11i % o0oOOo0O0Ooo
if 16 - 16: OoooooooOO
if 71 - 71: Ii1I % O0 / I1Ii111 % iII111i - II111iiii / OoO0O00
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 30 - 30: I11i
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 60 - 60: ooOoO0o - Ii1I . I1IiiI * oO0o * i11iIiiIii
oOoOo00oo = socket . ntohl ( oOoOo00oo [ 0 ] )
self . security = True if ( oOoOo00oo & 0x08000000 ) else False
self . ddt = True if ( oOoOo00oo & 0x04000000 ) else False
self . to_etr = True if ( oOoOo00oo & 0x02000000 ) else False
self . to_ms = True if ( oOoOo00oo & 0x01000000 ) else False
packet = packet [ O0OOoooO : : ]
if 29 - 29: OoO0O00 - Oo0Ooo . oO0o / OoO0O00 % i11iIiiIii
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
if ( len ( packet ) < 1 ) : return ( None )
O00o0O = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
O00o0O = O00o0O >> 4
if 78 - 78: I1Ii111 - O0 - I1Ii111 . iIii1I11I1II1 % I1ii11iIi11i . OoooooooOO
if ( O00o0O == 4 ) :
O0OOoooO = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 64 - 64: IiII
II11iiii , IIi11I1i1I1I , II11iiii , O000o0Ooo , OoOoO , Oo0ooooO0o00 = struct . unpack ( "HHIBBH" , packet [ : O0OOoooO ] )
self . length = socket . ntohs ( IIi11I1i1I1I )
self . ttl = O000o0Ooo
self . protocol = OoOoO
self . ip_checksum = socket . ntohs ( Oo0ooooO0o00 )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 51 - 51: I1ii11iIi11i + OoOoOO00 - O0 / o0oOOo0O0Ooo
if 43 - 43: I1ii11iIi11i . IiII * OoOoOO00 / Oo0Ooo
if 2 - 2: iIii1I11I1II1
if 2 - 2: oO0o + ooOoO0o % OOooOOo + IiII
OoOoO = struct . pack ( "H" , 0 )
iIIIII1Iii1 = struct . calcsize ( "HHIBB" )
IiIIIii11 = struct . calcsize ( "H" )
packet = packet [ : iIIIII1Iii1 ] + OoOoO + packet [ iIIIII1Iii1 + IiIIIii11 : ]
if 46 - 46: I1Ii111 / I1ii11iIi11i
packet = packet [ O0OOoooO : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 41 - 41: i1IIi % Ii1I + I1Ii111 . Oo0Ooo / iIii1I11I1II1
if 77 - 77: Oo0Ooo . OoO0O00 % O0 - OoO0O00 - Oo0Ooo
if ( O00o0O == 6 ) :
O0OOoooO = struct . calcsize ( "IHBB" )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 95 - 95: IiII * II111iiii % o0oOOo0O0Ooo * Oo0Ooo . I11i
II11iiii , IIi11I1i1I1I , OoOoO , O000o0Ooo = struct . unpack ( "IHBB" , packet [ : O0OOoooO ] )
self . length = socket . ntohs ( IIi11I1i1I1I )
self . protocol = OoOoO
self . ttl = O000o0Ooo
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 46 - 46: II111iiii - OoO0O00 % ooOoO0o
packet = packet [ O0OOoooO : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 97 - 97: OoO0O00 . OoOoOO00
if 78 - 78: I1ii11iIi11i + I1ii11iIi11i . OoOoOO00 - IiII * iIii1I11I1II1 * O0
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 26 - 26: OoooooooOO + oO0o + OoO0O00 . O0
O0OOoooO = struct . calcsize ( "HHHH" )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 46 - 46: OoooooooOO - Oo0Ooo * I1Ii111 * OOooOOo * I1Ii111 . oO0o
IiIIi1I1I11Ii , oOo0OOOOOO , IIi11I1i1I1I , Oo0ooooO0o00 = struct . unpack ( "HHHH" , packet [ : O0OOoooO ] )
self . udp_sport = socket . ntohs ( IiIIi1I1I11Ii )
self . udp_dport = socket . ntohs ( oOo0OOOOOO )
self . udp_length = socket . ntohs ( IIi11I1i1I1I )
self . udp_checksum = socket . ntohs ( Oo0ooooO0o00 )
packet = packet [ O0OOoooO : : ]
return ( packet )
if 96 - 96: Ii1I / IiII % o0oOOo0O0Ooo + I11i
if 46 - 46: OoO0O00 * I1IiiI
if 25 - 25: I1Ii111 . IiII % O0 % i1IIi
if 53 - 53: O0 % ooOoO0o
if 41 - 41: IiII
if 29 - 29: ooOoO0o
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
if 22 - 22: i1IIi
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
if 88 - 88: OOooOOo
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o - Ii1I . oO0o * Ii1I
if 85 - 85: i1IIi
if 94 - 94: OoooooooOO . O0 / OoooooooOO
if 67 - 67: i11iIiiIii + OoOoOO00
if 50 - 50: ooOoO0o . i1IIi + I1ii11iIi11i . OOooOOo
if 97 - 97: I1IiiI
if 63 - 63: O0 - OoOoOO00 / i11iIiiIii / OoooooooOO / ooOoO0o / II111iiii
if 45 - 45: II111iiii . OoO0O00 + OoO0O00 * iIii1I11I1II1
if 23 - 23: IiII * OoOoOO00 % Ii1I / Ii1I - ooOoO0o - OOooOOo
if 86 - 86: OOooOOo . OoooooooOO * I1IiiI - Oo0Ooo / i11iIiiIii * iII111i
if 56 - 56: I1IiiI . I11i % iII111i
if 33 - 33: I11i / OOooOOo - OOooOOo / i11iIiiIii * OoOoOO00 + O0
if 2 - 2: i11iIiiIii % I1IiiI
if 90 - 90: II111iiii
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii
if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo
if 71 - 71: i1IIi / I11i
if 14 - 14: OoooooooOO
if 99 - 99: o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 6 - 6: i11iIiiIii + oO0o % ooOoO0o + i11iIiiIii - OOooOOo
if 12 - 12: iII111i . oO0o % IiII * OoooooooOO . IiII
if 15 - 15: I1IiiI . I1IiiI / i11iIiiIii
if 17 - 17: iIii1I11I1II1 / OoO0O00 - II111iiii
if 46 - 46: iIii1I11I1II1 * oO0o / i11iIiiIii + II111iiii + I11i
if 30 - 30: O0 * IiII - I1Ii111 % O0 * Ii1I
if 29 - 29: I1ii11iIi11i % I1ii11iIi11i % Ii1I + ooOoO0o % iIii1I11I1II1
if 41 - 41: I1ii11iIi11i % I1Ii111
if 37 - 37: Oo0Ooo . I1IiiI % OoOoOO00 . OoO0O00 - Oo0Ooo / OoO0O00
if 34 - 34: i11iIiiIii + OoO0O00 + i11iIiiIii . IiII % O0
if 64 - 64: o0oOOo0O0Ooo . iIii1I11I1II1
if 86 - 86: ooOoO0o - I11i . iIii1I11I1II1 - iIii1I11I1II1
if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00
if 60 - 60: oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if 13 - 13: I1ii11iIi11i
if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if 93 - 93: OoooooooOO
if 80 - 80: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii / OOooOOo + oO0o
if 10 - 10: OoO0O00 . OoO0O00 + O0
if 13 - 13: i1IIi . I1IiiI
if 45 - 45: ooOoO0o % I11i
if 37 - 37: iII111i
if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o
if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00
if 55 - 55: OOooOOo
if 5 - 5: I11i / OoOoOO00
if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi
if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
if 89 - 89: OOooOOo
if 34 - 34: iII111i . OOooOOo
if 13 - 13: OoO0O00 * OOooOOo + oO0o
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
if 92 - 92: i1IIi + OoO0O00 * I11i
if 70 - 70: Oo0Ooo
if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO
if 51 - 51: O0 - iII111i
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
if 85 - 85: iII111i + OOooOOo
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo
if 47 - 47: iII111i + o0oOOo0O0Ooo % iIii1I11I1II1 * OoOoOO00
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
class lisp_rloc_record ( ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
OO000 = self . rloc_name
if ( cour ) : OO000 = lisp_print_cour ( OO000 )
return ( 'rloc-name: {}' . format ( blue ( OO000 , cour ) ) )
if 28 - 28: oO0o . ooOoO0o / I11i + Oo0Ooo
if 55 - 55: OoooooooOO % OoOoOO00 + i1IIi * OoO0O00 * OOooOOo
def print_record ( self , indent ) :
ooOo = self . print_rloc_name ( )
if ( ooOo != "" ) : ooOo = ", " + ooOo
iII1I1I11 = ""
if ( self . geo ) :
IiIII = ""
if ( self . geo . geo_name ) : IiIII = "'{}' " . format ( self . geo . geo_name )
iII1I1I11 = ", geo: {}{}" . format ( IiIII , self . geo . print_geo ( ) )
if 32 - 32: i11iIiiIii - OoooooooOO + I11i . i1IIi
iI11i1Ii = ""
if ( self . elp ) :
IiIII = ""
if ( self . elp . elp_name ) : IiIII = "'{}' " . format ( self . elp . elp_name )
iI11i1Ii = ", elp: {}{}" . format ( IiIII , self . elp . print_elp ( True ) )
if 82 - 82: iII111i + I11i * OoO0O00 - I1ii11iIi11i % iII111i
Oo0OooO00O = ""
if ( self . rle ) :
IiIII = ""
if ( self . rle . rle_name ) : IiIII = "'{}' " . format ( self . rle . rle_name )
Oo0OooO00O = ", rle: {}{}" . format ( IiIII , self . rle . print_rle ( False ) )
if 63 - 63: OoOoOO00
IiIiII = ""
if ( self . json ) :
IiIII = ""
if ( self . json . json_name ) :
IiIII = "'{}' " . format ( self . json . json_name )
if 99 - 99: OoooooooOO - i1IIi % o0oOOo0O0Ooo / o0oOOo0O0Ooo + IiII
IiIiII = ", json: {}" . format ( self . json . print_json ( False ) )
if 96 - 96: OoooooooOO + OOooOOo - I1Ii111 / oO0o % oO0o
if 34 - 34: IiII
o0OOO0Oo = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
o0OOO0Oo = ", " + self . keys [ 1 ] . print_keys ( )
if 81 - 81: I1Ii111 + Ii1I + i11iIiiIii * iIii1I11I1II1
if 46 - 46: OoOoOO00
oooOo = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( oooOo . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , ooOo , iII1I1I11 ,
iI11i1Ii , Oo0OooO00O , IiIiII , o0OOO0Oo ) )
if 15 - 15: oO0o + II111iiii / OOooOOo % iII111i
if 67 - 67: i11iIiiIii + i11iIiiIii % OoOoOO00 + oO0o
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 87 - 87: ooOoO0o * Oo0Ooo / iII111i + I11i + Ii1I
if 84 - 84: iIii1I11I1II1 * oO0o / Ii1I % OoO0O00
if 91 - 91: o0oOOo0O0Ooo - OOooOOo - I11i
def store_rloc_entry ( self , rloc_entry ) :
OoOOo = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 98 - 98: OoOoOO00 / O0
self . rloc . copy_address ( OoOOo )
if 92 - 92: OoooooooOO
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 23 - 23: i11iIiiIii / ooOoO0o + oO0o . Oo0Ooo
if 94 - 94: i11iIiiIii . IiII - OoO0O00 + O0
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
IiIII = rloc_entry . geo_name
if ( IiIII and lisp_geo_list . has_key ( IiIII ) ) :
self . geo = lisp_geo_list [ IiIII ]
if 89 - 89: iII111i * oO0o
if 36 - 36: ooOoO0o / II111iiii - ooOoO0o * iII111i
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
IiIII = rloc_entry . elp_name
if ( IiIII and lisp_elp_list . has_key ( IiIII ) ) :
self . elp = lisp_elp_list [ IiIII ]
if 43 - 43: iII111i * i1IIi . I1IiiI . OoOoOO00 / IiII - Oo0Ooo
if 95 - 95: OoooooooOO % OOooOOo * OOooOOo
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
IiIII = rloc_entry . rle_name
if ( IiIII and lisp_rle_list . has_key ( IiIII ) ) :
self . rle = lisp_rle_list [ IiIII ]
if 24 - 24: Ii1I * i11iIiiIii / O0 - I1ii11iIi11i
if 93 - 93: ooOoO0o - OoooooooOO / IiII . I11i
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
IiIII = rloc_entry . json_name
if ( IiIII and lisp_json_list . has_key ( IiIII ) ) :
self . json = lisp_json_list [ IiIII ]
if 7 - 7: o0oOOo0O0Ooo % Ii1I - i11iIiiIii
if 47 - 47: Oo0Ooo / OoOoOO00
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 26 - 26: I11i . I1ii11iIi11i
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
def encode_lcaf ( self ) :
O0OOOOO0O = socket . htons ( LISP_AFI_LCAF )
IiIIII11i1ii = ""
if ( self . geo ) :
IiIIII11i1ii = self . geo . encode_geo ( )
if 48 - 48: I1ii11iIi11i . i1IIi % OoO0O00 + ooOoO0o . I1ii11iIi11i
if 72 - 72: OoO0O00
O0oo0O00OOO0o = ""
if ( self . elp ) :
oo00O0o = ""
for Oo00o0o00oOo in self . elp . elp_nodes :
iioOO = socket . htons ( Oo00o0o00oOo . address . afi )
Ooooo0OO = 0
if ( Oo00o0o00oOo . eid ) : Ooooo0OO |= 0x4
if ( Oo00o0o00oOo . probe ) : Ooooo0OO |= 0x2
if ( Oo00o0o00oOo . strict ) : Ooooo0OO |= 0x1
Ooooo0OO = socket . htons ( Ooooo0OO )
oo00O0o += struct . pack ( "HH" , Ooooo0OO , iioOO )
oo00O0o += Oo00o0o00oOo . address . pack_address ( )
if 90 - 90: OoO0O00 + i1IIi
if 43 - 43: O0 % oO0o * I1IiiI
ooooo0 = socket . htons ( len ( oo00O0o ) )
O0oo0O00OOO0o = struct . pack ( "HBBBBH" , O0OOOOO0O , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , ooooo0 )
O0oo0O00OOO0o += oo00O0o
if 7 - 7: Oo0Ooo + IiII
if 15 - 15: iIii1I11I1II1 % OoOoOO00 + i1IIi . Ii1I - Oo0Ooo
oOOoo0O00 = ""
if ( self . rle ) :
i111 = ""
for IIi1i1111i in self . rle . rle_nodes :
iioOO = socket . htons ( IIi1i1111i . address . afi )
i111 += struct . pack ( "HBBH" , 0 , 0 , IIi1i1111i . level , iioOO )
i111 += IIi1i1111i . address . pack_address ( )
if ( IIi1i1111i . rloc_name ) :
i111 += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
i111 += IIi1i1111i . rloc_name + "\0"
if 55 - 55: I1Ii111 / i11iIiiIii / OoOoOO00
if 25 - 25: Oo0Ooo / Oo0Ooo
if 74 - 74: OOooOOo
Iii = socket . htons ( len ( i111 ) )
oOOoo0O00 = struct . pack ( "HBBBBH" , O0OOOOO0O , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , Iii )
oOOoo0O00 += i111
if 79 - 79: I1ii11iIi11i - O0 / IiII
if 1 - 1: I1IiiI
iii11Ii = ""
if ( self . json ) :
ii111 = socket . htons ( len ( self . json . json_string ) + 2 )
i1oO0o00oOo00oO = socket . htons ( len ( self . json . json_string ) )
iii11Ii = struct . pack ( "HBBBBHH" , O0OOOOO0O , 0 , 0 , LISP_LCAF_JSON_TYPE ,
0 , ii111 , i1oO0o00oOo00oO )
iii11Ii += self . json . json_string
iii11Ii += struct . pack ( "H" , 0 )
if 73 - 73: O0 * O0 / O0 . i1IIi
if 49 - 49: I1Ii111 - Ii1I . O0
iIiiiIiIIi = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
iIiiiIiIIi = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 87 - 87: i1IIi - O0 % OoooooooOO * i11iIiiIii % i11iIiiIii
if 19 - 19: ooOoO0o
i11ii1i1i = ""
if ( self . rloc_name ) :
i11ii1i1i += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
i11ii1i1i += self . rloc_name + "\0"
if 79 - 79: OoO0O00
if 4 - 4: I11i / I1ii11iIi11i
I1i1ii = len ( IiIIII11i1ii ) + len ( O0oo0O00OOO0o ) + len ( oOOoo0O00 ) + len ( iIiiiIiIIi ) + 2 + len ( iii11Ii ) + self . rloc . addr_length ( ) + len ( i11ii1i1i )
if 14 - 14: ooOoO0o
I1i1ii = socket . htons ( I1i1ii )
IIi = struct . pack ( "HBBBBHH" , O0OOOOO0O , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , I1i1ii , socket . htons ( self . rloc . afi ) )
IIi += self . rloc . pack_address ( )
return ( IIi + i11ii1i1i + IiIIII11i1ii + O0oo0O00OOO0o + oOOoo0O00 + iIiiiIiIIi + iii11Ii )
if 38 - 38: OOooOOo . i11iIiiIii - Ii1I . II111iiii
if 31 - 31: i1IIi . OoooooooOO
def encode ( self ) :
Ooooo0OO = 0
if ( self . local_bit ) : Ooooo0OO |= 0x0004
if ( self . probe_bit ) : Ooooo0OO |= 0x0002
if ( self . reach_bit ) : Ooooo0OO |= 0x0001
if 19 - 19: Ii1I * I11i . II111iiii
i1II1IiiIi = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( Ooooo0OO ) ,
socket . htons ( self . rloc . afi ) )
if 84 - 84: iIii1I11I1II1 / o0oOOo0O0Ooo / II111iiii
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 81 - 81: i11iIiiIii + o0oOOo0O0Ooo / II111iiii + I11i
i1II1IiiIi = i1II1IiiIi [ 0 : - 2 ] + self . encode_lcaf ( )
else :
i1II1IiiIi += self . rloc . pack_address ( )
if 73 - 73: OoO0O00 + OOooOOo + IiII - i1IIi
return ( i1II1IiiIi )
if 67 - 67: OoooooooOO - i1IIi + Ii1I + I1IiiI
if 18 - 18: Oo0Ooo * iII111i / II111iiii
def decode_lcaf ( self , packet , nonce ) :
oOoOo000 = "HBBBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 77 - 77: Ii1I . o0oOOo0O0Ooo * oO0o
iioOO , OOII1iI , Ooooo0OO , oOOi1I111II , o0o0OO0OO , ii111 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 42 - 42: Ii1I / Oo0Ooo
if 25 - 25: OoooooooOO % Ii1I * I1Ii111 * I11i + I1IiiI % I1ii11iIi11i
ii111 = socket . ntohs ( ii111 )
packet = packet [ O0OOoooO : : ]
if ( ii111 > len ( packet ) ) : return ( None )
if 70 - 70: Ii1I + I1ii11iIi11i * I11i * i1IIi . I1Ii111
if 76 - 76: OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if ( oOOi1I111II == LISP_LCAF_AFI_LIST_TYPE ) :
while ( ii111 > 0 ) :
oOoOo000 = "H"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( ii111 < O0OOoooO ) : return ( None )
if 30 - 30: oO0o - Ii1I % Ii1I
OooOOoO00OO00 = len ( packet )
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
iioOO = socket . ntohs ( iioOO )
if 8 - 8: IiII
if ( iioOO == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce )
if ( packet == None ) : return ( None )
else :
packet = packet [ O0OOoooO : : ]
self . rloc_name = None
if ( iioOO == LISP_AFI_NAME ) :
packet , OO000 = lisp_decode_dist_name ( packet )
self . rloc_name = OO000
else :
self . rloc . afi = iioOO
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 68 - 68: IiII . OoooooooOO - i11iIiiIii + i11iIiiIii
if 81 - 81: OoOoOO00 + iII111i . i11iIiiIii
if 10 - 10: OoOoOO00 + I11i - iIii1I11I1II1 - I11i
ii111 -= OooOOoO00OO00 - len ( packet )
if 58 - 58: ooOoO0o
if 98 - 98: Ii1I / OoO0O00 % OoooooooOO
elif ( oOOi1I111II == LISP_LCAF_GEO_COORD_TYPE ) :
if 65 - 65: ooOoO0o % Oo0Ooo - I1IiiI % I1Ii111 + iIii1I11I1II1 / iIii1I11I1II1
if 94 - 94: IiII - Oo0Ooo . o0oOOo0O0Ooo - ooOoO0o - oO0o . I11i
if 39 - 39: oO0o + OoOoOO00
if 68 - 68: i1IIi * oO0o / i11iIiiIii
o0oO0O = lisp_geo ( "" )
packet = o0oO0O . decode_geo ( packet , ii111 , o0o0OO0OO )
if ( packet == None ) : return ( None )
self . geo = o0oO0O
if 28 - 28: O0 % II111iiii / OoOoOO00 / OOooOOo
elif ( oOOi1I111II == LISP_LCAF_JSON_TYPE ) :
if 84 - 84: OOooOOo / iIii1I11I1II1 - I1ii11iIi11i . Ii1I
if 27 - 27: IiII * i1IIi + II111iiii . iIii1I11I1II1 - i11iIiiIii
if 29 - 29: OOooOOo - i11iIiiIii % IiII / OoooooooOO
if 92 - 92: I1ii11iIi11i
oOoOo000 = "H"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( ii111 < O0OOoooO ) : return ( None )
if 89 - 89: OoO0O00 * i11iIiiIii - IiII * i1IIi - ooOoO0o . Ii1I
i1oO0o00oOo00oO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
i1oO0o00oOo00oO = socket . ntohs ( i1oO0o00oOo00oO )
if ( ii111 < O0OOoooO + i1oO0o00oOo00oO ) : return ( None )
if 26 - 26: I1IiiI * OoooooooOO / I1IiiI . O0 . ooOoO0o + O0
packet = packet [ O0OOoooO : : ]
self . json = lisp_json ( "" , packet [ 0 : i1oO0o00oOo00oO ] )
packet = packet [ i1oO0o00oOo00oO : : ]
if 84 - 84: I1Ii111 . O0 + O0 % O0 % i1IIi + iIii1I11I1II1
elif ( oOOi1I111II == LISP_LCAF_ELP_TYPE ) :
if 71 - 71: iII111i / iIii1I11I1II1 . OOooOOo * i11iIiiIii
if 98 - 98: O0 % iIii1I11I1II1 . IiII - II111iiii
if 14 - 14: Ii1I % ooOoO0o - OoOoOO00
if 52 - 52: OoO0O00 / i1IIi - Ii1I
IIi1iIi = lisp_elp ( None )
IIi1iIi . elp_nodes = [ ]
while ( ii111 > 0 ) :
Ooooo0OO , iioOO = struct . unpack ( "HH" , packet [ : 4 ] )
if 25 - 25: I1IiiI
iioOO = socket . ntohs ( iioOO )
if ( iioOO == LISP_AFI_LCAF ) : return ( None )
if 88 - 88: i1IIi
Oo00o0o00oOo = lisp_elp_node ( )
IIi1iIi . elp_nodes . append ( Oo00o0o00oOo )
if 93 - 93: I1ii11iIi11i . OoO0O00
Ooooo0OO = socket . ntohs ( Ooooo0OO )
Oo00o0o00oOo . eid = ( Ooooo0OO & 0x4 )
Oo00o0o00oOo . probe = ( Ooooo0OO & 0x2 )
Oo00o0o00oOo . strict = ( Ooooo0OO & 0x1 )
Oo00o0o00oOo . address . afi = iioOO
Oo00o0o00oOo . address . mask_len = Oo00o0o00oOo . address . host_mask_len ( )
packet = Oo00o0o00oOo . address . unpack_address ( packet [ 4 : : ] )
ii111 -= Oo00o0o00oOo . address . addr_length ( ) + 4
if 67 - 67: II111iiii + OoooooooOO + I1IiiI
IIi1iIi . select_elp_node ( )
self . elp = IIi1iIi
if 76 - 76: O0 / Oo0Ooo . OoOoOO00
elif ( oOOi1I111II == LISP_LCAF_RLE_TYPE ) :
if 81 - 81: o0oOOo0O0Ooo + II111iiii % I1Ii111 - oO0o + ooOoO0o - I1ii11iIi11i
if 99 - 99: iIii1I11I1II1
if 100 - 100: OoOoOO00 + I1Ii111 * Oo0Ooo / IiII - IiII
if 19 - 19: OoooooooOO . Ii1I + Oo0Ooo + II111iiii
iiiI1i1111II = lisp_rle ( None )
iiiI1i1111II . rle_nodes = [ ]
while ( ii111 > 0 ) :
II11iiii , oo0Oo0o0O , oo0O , iioOO = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 81 - 81: oO0o . OoO0O00 % OOooOOo - iII111i * iIii1I11I1II1 . iIii1I11I1II1
iioOO = socket . ntohs ( iioOO )
if ( iioOO == LISP_AFI_LCAF ) : return ( None )
if 48 - 48: iIii1I11I1II1 - Oo0Ooo
IIi1i1111i = lisp_rle_node ( )
iiiI1i1111II . rle_nodes . append ( IIi1i1111i )
if 80 - 80: i1IIi
IIi1i1111i . level = oo0O
IIi1i1111i . address . afi = iioOO
IIi1i1111i . address . mask_len = IIi1i1111i . address . host_mask_len ( )
packet = IIi1i1111i . address . unpack_address ( packet [ 6 : : ] )
if 56 - 56: II111iiii - o0oOOo0O0Ooo
ii111 -= IIi1i1111i . address . addr_length ( ) + 6
if ( ii111 >= 2 ) :
iioOO = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( iioOO ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , IIi1i1111i . rloc_name = lisp_decode_dist_name ( packet )
if 48 - 48: Oo0Ooo - I1ii11iIi11i - II111iiii . Ii1I . oO0o / iIii1I11I1II1
if ( packet == None ) : return ( None )
ii111 -= len ( IIi1i1111i . rloc_name ) + 1 + 2
if 38 - 38: I1Ii111 % i11iIiiIii + Ii1I * ooOoO0o / I1Ii111
if 93 - 93: oO0o
if 60 - 60: I1Ii111 . oO0o / Oo0Ooo * ooOoO0o + OoOoOO00 - i1IIi
self . rle = iiiI1i1111II
self . rle . build_forwarding_list ( )
if 13 - 13: i11iIiiIii * oO0o / I11i * I1IiiI
elif ( oOOi1I111II == LISP_LCAF_SECURITY_TYPE ) :
if 31 - 31: iIii1I11I1II1 * Ii1I % OOooOOo . II111iiii
if 56 - 56: IiII / i11iIiiIii . o0oOOo0O0Ooo . oO0o - i11iIiiIii
if 23 - 23: I1ii11iIi11i * i11iIiiIii % ooOoO0o
if 47 - 47: iIii1I11I1II1 . OOooOOo / I11i % II111iiii
if 92 - 92: I1ii11iIi11i % i11iIiiIii
IIiIiIii11I1 = packet
IiI11IiIIi = lisp_keys ( 1 )
packet = IiI11IiIIi . decode_lcaf ( IIiIiIii11I1 , ii111 )
if ( packet == None ) : return ( None )
if 82 - 82: I1Ii111 * I1ii11iIi11i % Ii1I / o0oOOo0O0Ooo
if 28 - 28: iII111i % OoO0O00 - OOooOOo - Oo0Ooo
if 16 - 16: i11iIiiIii - i11iIiiIii . OoOoOO00 / i1IIi
if 76 - 76: O0 * OoO0O00 / O0
I1IIiIIiiI1i = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( IiI11IiIIi . cipher_suite in I1IIiIIiiI1i ) :
if ( IiI11IiIIi . cipher_suite == LISP_CS_25519_CBC ) :
iii11 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 23 - 23: I1ii11iIi11i . iIii1I11I1II1 - i11iIiiIii / II111iiii
if ( IiI11IiIIi . cipher_suite == LISP_CS_25519_CHACHA ) :
iii11 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 48 - 48: oO0o - II111iiii * I1IiiI
else :
iii11 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 78 - 78: I1IiiI * i11iIiiIii * II111iiii
packet = iii11 . decode_lcaf ( IIiIiIii11I1 , ii111 )
if ( packet == None ) : return ( None )
if 19 - 19: OoooooooOO * i11iIiiIii / O0 . I1IiiI % I11i
if ( len ( packet ) < 2 ) : return ( None )
iioOO = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( iioOO )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 35 - 35: iIii1I11I1II1 + I1IiiI - ooOoO0o / Oo0Ooo * I1ii11iIi11i * Oo0Ooo
if 17 - 17: OoOoOO00
if 24 - 24: iIii1I11I1II1 / OOooOOo % OoooooooOO / O0 / oO0o
if 93 - 93: Oo0Ooo
if 5 - 5: iII111i
if 61 - 61: OOooOOo * OoO0O00 - O0
if ( self . rloc . is_null ( ) ) : return ( packet )
if 30 - 30: iIii1I11I1II1
iI11Ii = self . rloc_name
if ( iI11Ii ) : iI11Ii = blue ( self . rloc_name , False )
if 18 - 18: OoOoOO00 - i11iIiiIii * i1IIi
if 13 - 13: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo
if 31 - 31: i11iIiiIii % OoO0O00 . i11iIiiIii % oO0o - i1IIi
if 62 - 62: oO0o + oO0o . OoooooooOO
if 59 - 59: iIii1I11I1II1 . Oo0Ooo * I11i
if 29 - 29: Oo0Ooo - I1IiiI * I11i
O00oO0OOOo0 = self . keys [ 1 ] if self . keys else None
if ( O00oO0OOOo0 == None ) :
if ( iii11 . remote_public_key == None ) :
OO0o0o0oo = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( OO0o0o0oo , iI11Ii ) )
iii11 = None
else :
OO0o0o0oo = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( OO0o0o0oo , iI11Ii ) )
iii11 . compute_shared_key ( "encap" )
if 58 - 58: i1IIi * Ii1I / ooOoO0o % iIii1I11I1II1
if 24 - 24: OoOoOO00 - o0oOOo0O0Ooo * I1IiiI . I11i / OoO0O00 * Ii1I
if 12 - 12: OoooooooOO % oO0o
if 92 - 92: ooOoO0o % OoO0O00 + O0 + OoOoOO00 / OoO0O00 * iIii1I11I1II1
if 79 - 79: O0
if 71 - 71: OoO0O00 - O0
if 73 - 73: iIii1I11I1II1
if 7 - 7: OoOoOO00
if 55 - 55: oO0o . OoO0O00 + iIii1I11I1II1 + OoOoOO00 / I1ii11iIi11i - O0
if 14 - 14: II111iiii - OoO0O00 - O0 * OoooooooOO / I1IiiI
if ( O00oO0OOOo0 ) :
if ( iii11 . remote_public_key == None ) :
iii11 = None
ii1I11iIi = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( ii1I11iIi , iI11Ii ) )
elif ( O00oO0OOOo0 . compare_keys ( iii11 ) ) :
iii11 = O00oO0OOOo0
lprint ( " Maintain stored encap-keys for {}" . format ( iI11Ii ) )
if 3 - 3: I11i
else :
if ( O00oO0OOOo0 . remote_public_key == None ) :
OO0o0o0oo = "New encap-keying for existing state"
else :
OO0o0o0oo = "Remote encap-rekeying"
if 46 - 46: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1
lprint ( " {} for {}" . format ( bold ( OO0o0o0oo , False ) ,
iI11Ii ) )
O00oO0OOOo0 . remote_public_key = iii11 . remote_public_key
O00oO0OOOo0 . compute_shared_key ( "encap" )
iii11 = O00oO0OOOo0
if 25 - 25: II111iiii / OOooOOo + Oo0Ooo - iIii1I11I1II1 - OoOoOO00
if 97 - 97: OOooOOo . OOooOOo / I1ii11iIi11i + I1IiiI * i1IIi
self . keys = [ None , iii11 , None , None ]
if 53 - 53: O0
else :
if 28 - 28: iII111i % OoO0O00 . OoO0O00 / IiII * Oo0Ooo * iII111i
if 49 - 49: I1IiiI / I1Ii111 * iII111i + I1IiiI % oO0o % ooOoO0o
if 27 - 27: OoO0O00 / iII111i . I1ii11iIi11i
if 71 - 71: OoO0O00 . i11iIiiIii . iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo
packet = packet [ ii111 : : ]
if 34 - 34: iII111i
return ( packet )
if 6 - 6: OoO0O00 . OoOoOO00 + I1ii11iIi11i
if 24 - 24: OoO0O00 . Ii1I
def decode ( self , packet , nonce ) :
oOoOo000 = "BBBBHH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 26 - 26: O0 * I1IiiI - OOooOOo * OoooooooOO * II111iiii % OoOoOO00
self . priority , self . weight , self . mpriority , self . mweight , Ooooo0OO , iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 56 - 56: OOooOOo * i11iIiiIii % ooOoO0o * OoOoOO00 % Oo0Ooo * IiII
if 30 - 30: i1IIi + o0oOOo0O0Ooo - OoOoOO00 . OOooOOo
Ooooo0OO = socket . ntohs ( Ooooo0OO )
iioOO = socket . ntohs ( iioOO )
self . local_bit = True if ( Ooooo0OO & 0x0004 ) else False
self . probe_bit = True if ( Ooooo0OO & 0x0002 ) else False
self . reach_bit = True if ( Ooooo0OO & 0x0001 ) else False
if 95 - 95: i1IIi . I11i + O0 . I11i - I11i / Oo0Ooo
if ( iioOO == LISP_AFI_LCAF ) :
packet = packet [ O0OOoooO - 2 : : ]
packet = self . decode_lcaf ( packet , nonce )
else :
self . rloc . afi = iioOO
packet = packet [ O0OOoooO : : ]
packet = self . rloc . unpack_address ( packet )
if 41 - 41: OoooooooOO . OOooOOo - Ii1I * OoO0O00 % i11iIiiIii
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 7 - 7: Ii1I
if 16 - 16: IiII * o0oOOo0O0Ooo % II111iiii - II111iiii + ooOoO0o
def end_of_rlocs ( self , packet , rloc_count ) :
for Ii11 in range ( rloc_count ) :
packet = self . decode ( packet , None )
if ( packet == None ) : return ( None )
if 55 - 55: OoO0O00 % OoOoOO00
return ( packet )
if 58 - 58: Ii1I
if 17 - 17: OoO0O00 - oO0o % Oo0Ooo % oO0o * I1Ii111 / IiII
if 88 - 88: ooOoO0o . II111iiii * O0 % IiII
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
if 1 - 1: I1IiiI
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
if 73 - 73: II111iiii
if 7 - 7: O0 / OoO0O00
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
if 11 - 11: OoOoOO00 % OoooooooOO
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
class lisp_map_referral ( ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# i1IIi % i11iIiiIii - OOooOOo . OoOoOO00
lisp_hex_string ( self . nonce ) ) )
if 25 - 25: iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
def encode ( self ) :
oOoOo00oo = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
i1II1IiiIi = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
i1II1IiiIi += struct . pack ( "Q" , self . nonce )
return ( i1II1IiiIi )
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
def decode ( self , packet ) :
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
oOoOo00oo = socket . ntohl ( oOoOo00oo [ 0 ] )
self . record_count = oOoOo00oo & 0xff
packet = packet [ O0OOoooO : : ]
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
oOoOo000 = "Q"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 52 - 52: II111iiii - IiII
self . nonce = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
return ( packet )
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
if 91 - 91: i11iIiiIii
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
class lisp_ddt_entry ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 72 - 72: iII111i
if 100 - 100: I1IiiI
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 55 - 55: i1IIi % IiII
if 44 - 44: oO0o - iIii1I11I1II1 / ooOoO0o - iIii1I11I1II1 % i1IIi + ooOoO0o
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
o0i1II1iI = self . delegation_set [ 0 ]
return ( o0i1II1iI . print_node_type ( ) )
if 82 - 82: OoO0O00 * o0oOOo0O0Ooo
if 67 - 67: OOooOOo . Ii1I * OoooooooOO
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 93 - 93: OoO0O00 . OoO0O00
if 52 - 52: OOooOOo . oO0o / Oo0Ooo . OoooooooOO % I1ii11iIi11i
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
O0ooO0OooO = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( O0ooO0OooO == None ) :
O0ooO0OooO = lisp_ddt_entry ( )
O0ooO0OooO . eid . copy_address ( self . group )
O0ooO0OooO . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , O0ooO0OooO )
if 49 - 49: I1IiiI
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( O0ooO0OooO . group )
O0ooO0OooO . add_source_entry ( self )
if 61 - 61: o0oOOo0O0Ooo / I1ii11iIi11i / ooOoO0o
if 54 - 54: I1Ii111 * I1Ii111
if 30 - 30: I1Ii111 . OoOoOO00 + I1ii11iIi11i - iIii1I11I1II1 * ooOoO0o
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 87 - 87: O0 + O0 - ooOoO0o . i11iIiiIii - Oo0Ooo * i11iIiiIii
if 72 - 72: I11i / OoooooooOO
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 95 - 95: I1IiiI * i11iIiiIii + i11iIiiIii / iIii1I11I1II1
if 20 - 20: I11i
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 15 - 15: o0oOOo0O0Ooo . i11iIiiIii * I1ii11iIi11i / ooOoO0o
if 41 - 41: ooOoO0o + IiII . i1IIi + iIii1I11I1II1
if 57 - 57: i11iIiiIii * oO0o * i11iIiiIii
class lisp_ddt_node ( ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 14 - 14: Oo0Ooo / I11i
if 14 - 14: Oo0Ooo - Ii1I + ooOoO0o - I1IiiI % IiII
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 70 - 70: I1IiiI % ooOoO0o * OoO0O00 + OoOoOO00 % i11iIiiIii
if 39 - 39: Oo0Ooo % I1Ii111 / I1IiiI / Oo0Ooo . o0oOOo0O0Ooo + o0oOOo0O0Ooo
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 83 - 83: OoooooooOO * II111iiii % OoooooooOO
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo + OoooooooOO + OoOoOO00 + OoO0O00
def is_ms_child ( self ) :
return ( self . map_server_child )
if 40 - 40: OoooooooOO / IiII
if 82 - 82: i11iIiiIii - oO0o - i1IIi
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 78 - 78: oO0o % iII111i / i1IIi / ooOoO0o
if 44 - 44: o0oOOo0O0Ooo + Ii1I + I1IiiI % O0
if 100 - 100: OoooooooOO
if 27 - 27: i11iIiiIii % II111iiii + I1Ii111
if 76 - 76: OOooOOo - I1Ii111 + iIii1I11I1II1 + I1IiiI * oO0o
if 93 - 93: i11iIiiIii * i11iIiiIii - I1IiiI + iIii1I11I1II1 * i11iIiiIii
if 14 - 14: ooOoO0o . OoooooooOO . I1IiiI - IiII + iIii1I11I1II1
class lisp_ddt_map_request ( ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 47 - 47: OOooOOo % i1IIi
if 23 - 23: Ii1I * Ii1I / I11i
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# iIii1I11I1II1
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 66 - 66: IiII / OoooooooOO + II111iiii + I1ii11iIi11i
if 85 - 85: OOooOOo / O0 . Oo0Ooo
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 23 - 23: Oo0Ooo % II111iiii
if 96 - 96: ooOoO0o % Ii1I
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( lisp_ddt_map_requestQ . has_key ( str ( self . nonce ) ) ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 83 - 83: I1IiiI - OOooOOo . I1IiiI * Oo0Ooo
if 76 - 76: i11iIiiIii + Ii1I
if 14 - 14: OoO0O00 * OoooooooOO
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
if 62 - 62: iII111i . I11i * i1IIi + iII111i
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
if 18 - 18: II111iiii . Ii1I + OoOoOO00 + O0 - I11i
if 30 - 30: II111iiii
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
if 99 - 99: oO0o . OoO0O00 / OOooOOo
if 12 - 12: iIii1I11I1II1 + ooOoO0o * I1Ii111 % OoooooooOO / iIii1I11I1II1
if 43 - 43: O0 . i1IIi - OoooooooOO - i1IIi - I1ii11iIi11i
if 8 - 8: OoOoOO00 / Ii1I
if 12 - 12: iIii1I11I1II1
if 52 - 52: oO0o . I1ii11iIi11i + oO0o
if 73 - 73: II111iiii / i11iIiiIii / ooOoO0o
if 1 - 1: iII111i + OoOoOO00 / IiII - I1IiiI % I1IiiI
if 6 - 6: OoOoOO00 - i1IIi + II111iiii % oO0o
if 72 - 72: OOooOOo + OOooOOo
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 30 - 30: I11i
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 15 - 15: O0 - i1IIi . iIii1I11I1II1 - i11iIiiIii / Ii1I
if 11 - 11: iIii1I11I1II1 + I1IiiI
if 15 - 15: o0oOOo0O0Ooo
if 55 - 55: i11iIiiIii / OoooooooOO - I11i
if 89 - 89: I11i - i1IIi - i1IIi * OOooOOo - O0
if 94 - 94: Oo0Ooo / I11i . I1ii11iIi11i
if 31 - 31: i11iIiiIii + iIii1I11I1II1 . II111iiii
if 72 - 72: I1Ii111 * OoO0O00 + Oo0Ooo / Ii1I % OOooOOo
if 84 - 84: OoOoOO00 / o0oOOo0O0Ooo
if 9 - 9: Ii1I
if 76 - 76: I1IiiI % Oo0Ooo / iIii1I11I1II1 - Oo0Ooo
if 34 - 34: OoOoOO00 - i1IIi + OOooOOo + Ii1I . o0oOOo0O0Ooo
if 42 - 42: OoO0O00
if 59 - 59: OoO0O00 . I1Ii111 % OoO0O00
if 22 - 22: Oo0Ooo
if 21 - 21: o0oOOo0O0Ooo
if 86 - 86: ooOoO0o / iIii1I11I1II1 . OOooOOo
if 93 - 93: Oo0Ooo / II111iiii . Oo0Ooo + i1IIi + i1IIi
if 30 - 30: OoOoOO00 . OOooOOo % OOooOOo / II111iiii + i1IIi
if 61 - 61: i1IIi % II111iiii * II111iiii . o0oOOo0O0Ooo / I1ii11iIi11i - I1Ii111
if 93 - 93: Ii1I - i1IIi
if 3 - 3: oO0o + OoO0O00 - iII111i / Ii1I
if 58 - 58: Ii1I * I11i
if 95 - 95: oO0o
if 49 - 49: I1IiiI
if 23 - 23: I1Ii111
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
if 54 - 54: ooOoO0o - O0 + iII111i
if 34 - 34: Ii1I - OOooOOo % iII111i
if 48 - 48: oO0o - O0
if 17 - 17: iIii1I11I1II1 . IiII / ooOoO0o % I11i + o0oOOo0O0Ooo - iIii1I11I1II1
if 95 - 95: OoOoOO00 + OOooOOo - I11i * i1IIi + i1IIi * O0
if 60 - 60: Oo0Ooo + I11i % iIii1I11I1II1 % oO0o - I1Ii111 / o0oOOo0O0Ooo
if 9 - 9: IiII / oO0o % O0 * I1Ii111 - iIii1I11I1II1 % i1IIi
if 83 - 83: OoOoOO00 + OOooOOo / OoooooooOO
if 39 - 39: OoO0O00 % iII111i . oO0o . II111iiii - i11iIiiIii
if 85 - 85: O0 - OoOoOO00
if 17 - 17: o0oOOo0O0Ooo / i1IIi / OOooOOo
if 91 - 91: I1ii11iIi11i / Ii1I - OoOoOO00 . I11i / oO0o
if 16 - 16: IiII % iII111i . oO0o . I1IiiI % O0 * I11i
if 99 - 99: OoOoOO00 / OoooooooOO + iII111i * I11i * i11iIiiIii + OOooOOo
if 40 - 40: II111iiii / I11i % I1IiiI - O0
if 39 - 39: i11iIiiIii - OoOoOO00 % OOooOOo + ooOoO0o + i11iIiiIii
if 59 - 59: IiII / OoOoOO00 - I1Ii111 - ooOoO0o . oO0o
if 87 - 87: oO0o + I1IiiI * I1Ii111 * o0oOOo0O0Ooo + O0
if 21 - 21: I1Ii111 + OoOoOO00 + OoOoOO00 . II111iiii / I1Ii111 . I1IiiI
if 66 - 66: I1Ii111 % oO0o . iII111i * i1IIi
if 81 - 81: OoooooooOO * I1IiiI / I1Ii111
if 10 - 10: I1IiiI - II111iiii / IiII * II111iiii
if 67 - 67: II111iiii . Ii1I % oO0o . Oo0Ooo + IiII
if 10 - 10: OOooOOo - OoO0O00 * oO0o / iIii1I11I1II1 - OoOoOO00
if 20 - 20: IiII % I1IiiI + iIii1I11I1II1 % iII111i
if 100 - 100: o0oOOo0O0Ooo - Oo0Ooo % I1Ii111 . i11iIiiIii % OoooooooOO
class lisp_info ( ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 39 - 39: I1ii11iIi11i / i11iIiiIii * i1IIi * Oo0Ooo
if 39 - 39: OoO0O00 * OoooooooOO / i1IIi + Oo0Ooo
def print_info ( self ) :
if ( self . info_reply ) :
o0o000ooOooO = "Info-Reply"
OoOOo = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# II111iiii % OoooooooOO
# o0oOOo0O0Ooo % i1IIi / Oo0Ooo / I11i * Oo0Ooo
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : OoOOo += "empty, "
for oOOoOO in self . rtr_list :
OoOOo += red ( oOOoOO . print_address_no_iid ( ) , False ) + ", "
if 63 - 63: I1IiiI
OoOOo = OoOOo [ 0 : - 2 ]
else :
o0o000ooOooO = "Info-Request"
i1II11 = "<none>" if self . hostname == None else self . hostname
OoOOo = ", hostname: {}" . format ( blue ( i1II11 , False ) )
if 64 - 64: ooOoO0o % IiII - iII111i * i1IIi * I1Ii111 + IiII
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( o0o000ooOooO , False ) ,
lisp_hex_string ( self . nonce ) , OoOOo ) )
if 43 - 43: O0 / IiII
if 41 - 41: OoOoOO00
def encode ( self ) :
oOoOo00oo = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : oOoOo00oo |= ( 1 << 27 )
if 81 - 81: Ii1I . I1IiiI % o0oOOo0O0Ooo . OoOoOO00
if 94 - 94: oO0o % Oo0Ooo + OoO0O00 * oO0o - i11iIiiIii / I11i
if 46 - 46: IiII - OoO0O00 * iII111i . I1Ii111 - ooOoO0o . i1IIi
if 53 - 53: I1Ii111 * I1IiiI + Oo0Ooo + I1IiiI + OOooOOo
if 8 - 8: i11iIiiIii + OoOoOO00 . I1ii11iIi11i / OoooooooOO % II111iiii
i1II1IiiIi = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
i1II1IiiIi += struct . pack ( "Q" , self . nonce )
i1II1IiiIi += struct . pack ( "III" , 0 , 0 , 0 )
if 21 - 21: oO0o - o0oOOo0O0Ooo + ooOoO0o . I1IiiI * oO0o * Ii1I
if 41 - 41: i1IIi % i11iIiiIii + I11i % OoooooooOO / I1ii11iIi11i
if 8 - 8: OoooooooOO - OoO0O00 / i11iIiiIii / O0 . IiII
if 86 - 86: ooOoO0o * OoooooooOO + iII111i + o0oOOo0O0Ooo
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
i1II1IiiIi += struct . pack ( "H" , 0 )
else :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
i1II1IiiIi += self . hostname + "\0"
if 79 - 79: i1IIi % I1ii11iIi11i - OoO0O00 % I1ii11iIi11i
return ( i1II1IiiIi )
if 6 - 6: Oo0Ooo / iII111i . i11iIiiIii
if 8 - 8: I1ii11iIi11i + O0 - oO0o % II111iiii . I1Ii111
if 86 - 86: IiII
if 71 - 71: Ii1I - i1IIi . I1IiiI
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
iioOO = socket . htons ( LISP_AFI_LCAF )
oOOi1I111II = LISP_LCAF_NAT_TYPE
ii111 = socket . htons ( 16 )
OoO = socket . htons ( self . ms_port )
i1IiiIiIII11 = socket . htons ( self . etr_port )
i1II1IiiIi += struct . pack ( "HHBBHHHH" , iioOO , 0 , oOOi1I111II , 0 , ii111 ,
OoO , i1IiiIiIII11 , socket . htons ( self . global_etr_rloc . afi ) )
i1II1IiiIi += self . global_etr_rloc . pack_address ( )
i1II1IiiIi += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
i1II1IiiIi += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : i1II1IiiIi += struct . pack ( "H" , 0 )
if 39 - 39: I1IiiI + oO0o . I1Ii111 * iII111i - OoOoOO00 / Ii1I
if 38 - 38: i1IIi / II111iiii
if 51 - 51: iII111i - OoOoOO00 + II111iiii
if 83 - 83: Ii1I
for oOOoOO in self . rtr_list :
i1II1IiiIi += struct . pack ( "H" , socket . htons ( oOOoOO . afi ) )
i1II1IiiIi += oOOoOO . pack_address ( )
if 25 - 25: OoOoOO00 + IiII . i11iIiiIii
return ( i1II1IiiIi )
if 87 - 87: I1IiiI + OoooooooOO + O0
if 32 - 32: Ii1I / I1ii11iIi11i . Ii1I
def decode ( self , packet ) :
IIiIiIii11I1 = packet
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 65 - 65: IiII
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
oOoOo00oo = oOoOo00oo [ 0 ]
packet = packet [ O0OOoooO : : ]
if 74 - 74: Oo0Ooo + i1IIi - II111iiii / ooOoO0o / iII111i
oOoOo000 = "Q"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
oOo0 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 42 - 42: I1Ii111 - i11iIiiIii % II111iiii * ooOoO0o . O0 % I11i
oOoOo00oo = socket . ntohl ( oOoOo00oo )
self . nonce = oOo0 [ 0 ]
self . info_reply = oOoOo00oo & 0x08000000
self . hostname = None
packet = packet [ O0OOoooO : : ]
if 82 - 82: Oo0Ooo % O0 + I1ii11iIi11i % I1ii11iIi11i
if 74 - 74: O0 * IiII . I11i - I1Ii111 + O0 + I11i
if 48 - 48: oO0o . o0oOOo0O0Ooo - OOooOOo
if 29 - 29: Oo0Ooo - Ii1I - Oo0Ooo
if 89 - 89: Oo0Ooo . OoO0O00 . I1ii11iIi11i * oO0o . O0
oOoOo000 = "HH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 72 - 72: i11iIiiIii % I11i / I1Ii111 + I1IiiI * iII111i
if 69 - 69: I1Ii111 + O0 . IiII . o0oOOo0O0Ooo
if 38 - 38: IiII / i1IIi
if 60 - 60: OoOoOO00
if 75 - 75: II111iiii / iIii1I11I1II1 / OoooooooOO
I1o0 , IiII1Iiii = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if ( IiII1Iiii != 0 ) : return ( None )
if 61 - 61: IiII . IiII
packet = packet [ O0OOoooO : : ]
oOoOo000 = "IBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 17 - 17: OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
iiI , i11iIi1I1i1 , Iii11i1 , ii1Ii1 = struct . unpack ( oOoOo000 ,
packet [ : O0OOoooO ] )
if 35 - 35: Ii1I * OoooooooOO + I1ii11iIi11i . IiII / O0 % I1ii11iIi11i
if ( ii1Ii1 != 0 ) : return ( None )
packet = packet [ O0OOoooO : : ]
if 31 - 31: O0 % ooOoO0o / I1IiiI * iII111i % iIii1I11I1II1 * OoOoOO00
if 76 - 76: I1Ii111 - O0
if 23 - 23: O0 * Ii1I * ooOoO0o % ooOoO0o
if 7 - 7: II111iiii + I11i
if ( self . info_reply == False ) :
oOoOo000 = "H"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) >= O0OOoooO ) :
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
if ( socket . ntohs ( iioOO ) == LISP_AFI_NAME ) :
packet = packet [ O0OOoooO : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 99 - 99: iIii1I11I1II1 * oO0o
if 37 - 37: ooOoO0o * iII111i * I11i
return ( IIiIiIii11I1 )
if 11 - 11: I1IiiI
if 48 - 48: O0 . I11i
if 9 - 9: oO0o / Oo0Ooo
if 85 - 85: i11iIiiIii / I1IiiI . OoO0O00 . I11i . oO0o * IiII
if 41 - 41: Ii1I / OoO0O00 / OoO0O00 * I11i
oOoOo000 = "HHBBHHH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 31 - 31: Ii1I / OoooooooOO % iIii1I11I1II1 - IiII * I1IiiI - O0
iioOO , II11iiii , oOOi1I111II , i11iIi1I1i1 , ii111 , OoO , i1IiiIiIII11 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 31 - 31: oO0o
if 74 - 74: OoO0O00
if ( socket . ntohs ( iioOO ) != LISP_AFI_LCAF ) : return ( None )
if 11 - 11: oO0o + O0 % Ii1I . I11i * o0oOOo0O0Ooo
self . ms_port = socket . ntohs ( OoO )
self . etr_port = socket . ntohs ( i1IiiIiIII11 )
packet = packet [ O0OOoooO : : ]
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
oOoOo000 = "H"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
if 30 - 30: i11iIiiIii % OOooOOo
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
if ( iioOO != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( iioOO )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
if 34 - 34: i1IIi % Oo0Ooo . oO0o
if 36 - 36: I1ii11iIi11i / I1Ii111 - IiII + OOooOOo + I1Ii111
if 62 - 62: Oo0Ooo . OoO0O00 * I1Ii111 . i11iIiiIii * O0
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
if 62 - 62: I1IiiI . Ii1I
if ( len ( packet ) < O0OOoooO ) : return ( IIiIiIii11I1 )
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
if ( iioOO != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( iioOO )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( IIiIiIii11I1 )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
if 52 - 52: Ii1I - I1IiiI * iIii1I11I1II1 % Oo0Ooo * OOooOOo
if 67 - 67: OoooooooOO * I11i * Ii1I * iIii1I11I1II1
if 22 - 22: OoO0O00 / o0oOOo0O0Ooo
if ( len ( packet ) < O0OOoooO ) : return ( IIiIiIii11I1 )
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
if ( iioOO != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( iioOO )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( IIiIiIii11I1 )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 40 - 40: OoOoOO00 - II111iiii
if 29 - 29: I1IiiI - O0
if 36 - 36: I1IiiI * I1IiiI
if 79 - 79: I1Ii111 - I11i
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
while ( len ( packet ) >= O0OOoooO ) :
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
if ( iioOO == 0 ) : continue
oOOoOO = lisp_address ( socket . ntohs ( iioOO ) , "" , 0 , 0 )
packet = oOOoOO . unpack_address ( packet )
if ( packet == None ) : return ( IIiIiIii11I1 )
oOOoOO . mask_len = oOOoOO . host_mask_len ( )
self . rtr_list . append ( oOOoOO )
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
return ( IIiIiIii11I1 )
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if 75 - 75: OoooooooOO - Oo0Ooo
class lisp_nat_info ( ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
if 4 - 4: i1IIi
def timed_out ( self ) :
i11IiIIi11I = time . time ( ) - self . uptime
return ( i11IiIIi11I >= ( LISP_INFO_INTERVAL * 2 ) )
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
if 41 - 41: I1IiiI . OoO0O00 / i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
class lisp_info_source ( ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 58 - 58: OOooOOo
if 72 - 72: OoO0O00 + OOooOOo - Oo0Ooo % ooOoO0o . IiII
def cache_address_for_info_source ( self ) :
iii11 = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ iii11 ] = self
if 95 - 95: iII111i % OOooOOo - IiII - OoOoOO00 % o0oOOo0O0Ooo * O0
if 16 - 16: I1Ii111 / Oo0Ooo
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 48 - 48: Oo0Ooo / oO0o + iII111i % iII111i
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 30 - 30: OoooooooOO - iIii1I11I1II1 / oO0o * Ii1I / Ii1I
if 52 - 52: OoOoOO00 - OoO0O00 + I1IiiI + IiII
if 49 - 49: oO0o / I11i - oO0o
if 31 - 31: OoOoOO00 + I1IiiI + I1ii11iIi11i + I11i * II111iiii % oO0o
if 90 - 90: OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if 51 - 51: I1IiiI . I11i - OoOoOO00
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 97 - 97: Ii1I . Ii1I % iII111i
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
OOO00Oo00o = auth1 + auth2 + auth3
if 25 - 25: I11i - I1ii11iIi11i
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
OOO00Oo00o = auth1 + auth2 + auth3 + auth4
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
return ( OOO00Oo00o )
if 83 - 83: O0
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
if 46 - 46: o0oOOo0O0Ooo
if 28 - 28: i1IIi
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
if 62 - 62: I1Ii111 * I11i / I11i
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
OOO0oO0O00o0000 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 80 - 80: o0oOOo0O0Ooo
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
OOO0oO0O00o0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 77 - 77: iIii1I11I1II1 * OOooOOo % ooOoO0o
OOO0oO0O00o0000 . bind ( ( local_addr , int ( port ) ) )
else :
IiIII = port
if ( os . path . exists ( IiIII ) ) :
os . system ( "rm " + IiIII )
time . sleep ( 1 )
if 80 - 80: II111iiii
OOO0oO0O00o0000 = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
OOO0oO0O00o0000 . bind ( IiIII )
if 66 - 66: Oo0Ooo . I1Ii111
return ( OOO0oO0O00o0000 )
if 59 - 59: iII111i - I1IiiI . I1IiiI - Ii1I * OoOoOO00
if 27 - 27: Oo0Ooo + Oo0Ooo / II111iiii % I1Ii111
if 11 - 11: Ii1I
if 54 - 54: I1IiiI * I1Ii111 / ooOoO0o / iIii1I11I1II1 % iII111i / oO0o
if 11 - 11: ooOoO0o + I1IiiI + Ii1I . II111iiii
if 50 - 50: Oo0Ooo
if 14 - 14: O0
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
OOO0oO0O00o0000 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 67 - 67: II111iiii / O0
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
OOO0oO0O00o0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 10 - 10: i1IIi / Oo0Ooo
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
OOO0oO0O00o0000 = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
OOO0oO0O00o0000 . bind ( internal_name )
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
return ( OOO0oO0O00o0000 )
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
if 23 - 23: iII111i . Ii1I - OoO0O00 / I1ii11iIi11i / O0
if 4 - 4: i1IIi % Oo0Ooo % Ii1I * ooOoO0o - I11i
if 76 - 76: iIii1I11I1II1 / ooOoO0o % I1ii11iIi11i % OOooOOo
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 13 - 13: IiII
if 56 - 56: Oo0Ooo
if 55 - 55: i11iIiiIii + iIii1I11I1II1 / i1IIi / I1ii11iIi11i
if 64 - 64: IiII . OoO0O00 * i11iIiiIii
if 18 - 18: Ii1I % o0oOOo0O0Ooo - Oo0Ooo
if 28 - 28: IiII
if 93 - 93: Oo0Ooo % i1IIi
if 51 - 51: oO0o % O0
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 41 - 41: I1IiiI * I1IiiI . I1Ii111
if 38 - 38: I1IiiI % i11iIiiIii
if 17 - 17: i11iIiiIii
if 81 - 81: I1Ii111
if 25 - 25: I1IiiI
if 52 - 52: I1ii11iIi11i % i1IIi . IiII % OoOoOO00
if 50 - 50: OOooOOo * I1IiiI / o0oOOo0O0Ooo
if 91 - 91: iIii1I11I1II1 / OOooOOo * O0 . o0oOOo0O0Ooo + oO0o / I1ii11iIi11i
if 33 - 33: II111iiii + Ii1I
def lisp_packet_ipc ( packet , source , sport ) :
return ( ( "packet@" + str ( len ( packet ) ) + "@" + source + "@" + str ( sport ) + "@" + packet ) )
if 46 - 46: IiII + O0 + i1IIi + ooOoO0o / iII111i
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
if 59 - 59: I11i % Ii1I / OoOoOO00
if 99 - 99: Ii1I + II111iiii / i11iIiiIii - IiII / iII111i + iII111i
if 55 - 55: IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
if 81 - 81: iIii1I11I1II1 . ooOoO0o + OoOoOO00
if 31 - 31: I11i / OoOoOO00 + o0oOOo0O0Ooo
if 80 - 80: Oo0Ooo
if 58 - 58: I1Ii111 + OOooOOo
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
return ( "control-packet@" + dest + "@" + str ( dport ) + "@" + packet )
if 76 - 76: II111iiii - o0oOOo0O0Ooo % OoO0O00 + iII111i
if 38 - 38: I1Ii111 - I11i * i1IIi + iIii1I11I1II1
if 41 - 41: Ii1I . OoO0O00 + I1ii11iIi11i + OoOoOO00
if 76 - 76: iII111i - iIii1I11I1II1
if 23 - 23: I11i / OoO0O00 % OOooOOo
if 9 - 9: ooOoO0o % I1ii11iIi11i . OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
if 21 - 21: Ii1I % O0
def lisp_data_packet_ipc ( packet , source ) :
return ( "data-packet@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 15 - 15: II111iiii * Ii1I + IiII % iII111i
if 96 - 96: II111iiii * I1Ii111 / Oo0Ooo
if 35 - 35: I1IiiI
if 54 - 54: I1ii11iIi11i % o0oOOo0O0Ooo . i1IIi
if 72 - 72: Ii1I
if 87 - 87: iII111i - I1IiiI
if 54 - 54: iIii1I11I1II1 + oO0o * o0oOOo0O0Ooo % OoooooooOO . Oo0Ooo
if 32 - 32: iII111i
if 33 - 33: ooOoO0o + Oo0Ooo * OoOoOO00 % ooOoO0o * oO0o - OoO0O00
def lisp_command_ipc ( packet , source ) :
return ( "command@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 40 - 40: I11i . OoooooooOO * O0 / I1Ii111 + O0
if 97 - 97: ooOoO0o - ooOoO0o * OOooOOo % OoOoOO00 - OoOoOO00 - I1Ii111
if 52 - 52: O0 % iII111i
if 81 - 81: OoooooooOO % OoOoOO00 % Oo0Ooo - I1IiiI
if 43 - 43: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 48 - 48: O0
if 5 - 5: OOooOOo / i11iIiiIii . I11i % OOooOOo
if 1 - 1: II111iiii + O0 * OoOoOO00 / IiII . O0
if 87 - 87: IiII + I1IiiI
def lisp_api_ipc ( source , data ) :
return ( "api@" + str ( len ( data ) ) + "@" + source + "@@" + data )
if 74 - 74: OoO0O00 + OoO0O00 % iII111i / I11i / O0
if 54 - 54: o0oOOo0O0Ooo / OoooooooOO * ooOoO0o . OoOoOO00 - I1Ii111
if 69 - 69: oO0o - OoO0O00
if 80 - 80: ooOoO0o + iIii1I11I1II1 . II111iiii + I1IiiI - oO0o % OoOoOO00
if 10 - 10: iIii1I11I1II1
if 44 - 44: OoOoOO00 * oO0o . I1ii11iIi11i + i11iIiiIii
if 85 - 85: I11i
if 36 - 36: ooOoO0o % OoO0O00
if 1 - 1: OoooooooOO - OoOoOO00
def lisp_ipc ( packet , send_socket , node ) :
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
if 92 - 92: iII111i % I1ii11iIi11i
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 16 - 16: oO0o
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
I1I1iii = 1500 if ( packet . find ( "control-packet" ) == - 1 ) else 9000
if 63 - 63: IiII / OoooooooOO - ooOoO0o
I11iiIi1i1 = 0
o00OOo00 = len ( packet )
iI1i11OO0o0OOoO = 0
O0Ooo0OOo = .001
while ( o00OOo00 > 0 ) :
o00ooooOOo = min ( o00OOo00 , I1I1iii )
O0000 = packet [ I11iiIi1i1 : o00ooooOOo + I11iiIi1i1 ]
if 15 - 15: iIii1I11I1II1 * OoOoOO00
try :
send_socket . sendto ( O0000 , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( O0000 ) , len ( packet ) , node ) )
if 82 - 82: II111iiii * I1IiiI * I1ii11iIi11i
iI1i11OO0o0OOoO = 0
O0Ooo0OOo = .001
if 79 - 79: o0oOOo0O0Ooo - oO0o . ooOoO0o / ooOoO0o - iII111i / OoooooooOO
except socket . error , ooo0OO :
if ( iI1i11OO0o0OOoO == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 58 - 58: ooOoO0o * I1IiiI - OoO0O00 + OOooOOo
if 79 - 79: Oo0Ooo . i11iIiiIii * OoO0O00 / I11i * OoOoOO00
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( O0000 ) , len ( packet ) , node , ooo0OO ) )
if 78 - 78: I11i . I1ii11iIi11i . I1ii11iIi11i
if 71 - 71: iII111i + IiII + I1IiiI - OoOoOO00
iI1i11OO0o0OOoO += 1
time . sleep ( O0Ooo0OOo )
if 49 - 49: I1IiiI % O0 - OoooooooOO * OoO0O00 / iIii1I11I1II1 + I11i
lprint ( "Retrying after {} ms ..." . format ( O0Ooo0OOo * 1000 ) )
O0Ooo0OOo *= 2
continue
if 7 - 7: iII111i * I1ii11iIi11i / oO0o
if 31 - 31: I1ii11iIi11i - II111iiii
I11iiIi1i1 += o00ooooOOo
o00OOo00 -= o00ooooOOo
if 86 - 86: IiII % OOooOOo % OoOoOO00 / I1IiiI % OoooooooOO
return
if 83 - 83: i1IIi . OoOoOO00 . i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
I11iiIi1i1 = 0
IIi1i1iI11I11 = ""
o00OOo00 = len ( packet ) * 2
while ( I11iiIi1i1 < o00OOo00 ) :
IIi1i1iI11I11 += packet [ I11iiIi1i1 : I11iiIi1i1 + 8 ] + " "
I11iiIi1i1 += 8
o00OOo00 -= 4
if 81 - 81: OoOoOO00
return ( IIi1i1iI11I11 )
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
if 14 - 14: iII111i / OoO0O00
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
def lisp_send ( lisp_sockets , dest , port , packet ) :
O0O0Ooo = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 29 - 29: iII111i % I1ii11iIi11i % o0oOOo0O0Ooo + O0 - I1ii11iIi11i
if 4 - 4: OOooOOo * i1IIi + OoO0O00 - I11i - I11i
if 9 - 9: OoO0O00 + ooOoO0o - OOooOOo - ooOoO0o + Ii1I
if 54 - 54: OoOoOO00
if 53 - 53: I1Ii111
if 72 - 72: i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
oOoO0Oo0 = dest . print_address_no_iid ( )
if ( oOoO0Oo0 . find ( "::ffff:" ) != - 1 and oOoO0Oo0 . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : O0O0Ooo = lisp_sockets [ 0 ]
if ( O0O0Ooo == None ) :
O0O0Ooo = lisp_sockets [ 0 ]
oOoO0Oo0 = oOoO0Oo0 . split ( "::ffff:" ) [ - 1 ]
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: i1IIi % I11i % OoOoOO00
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + oOoO0Oo0 , False ) , port ,
lisp_format_packet ( packet ) ) )
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
if 100 - 100: I1ii11iIi11i - i1IIi - OoO0O00 * o0oOOo0O0Ooo + OoOoOO00
if 31 - 31: i1IIi
if 21 - 21: o0oOOo0O0Ooo / O0 % O0 . OoooooooOO / I1IiiI
O0oOO0O = ( LISP_RLOC_PROBE_TTL == 255 )
if ( O0oOO0O ) :
oOOooO = struct . unpack ( "B" , packet [ 0 ] ) [ 0 ]
O0oOO0O = ( oOOooO in [ 0x12 , 0x28 ] )
if ( O0oOO0O ) : lisp_set_ttl ( O0O0Ooo , LISP_RLOC_PROBE_TTL )
if 38 - 38: OoooooooOO
if 78 - 78: OoooooooOO . iIii1I11I1II1 / II111iiii * Oo0Ooo
try : O0O0Ooo . sendto ( packet , ( oOoO0Oo0 , port ) )
except socket . error , ooo0OO :
lprint ( "socket.sendto() failed: {}" . format ( ooo0OO ) )
if 75 - 75: ooOoO0o * O0 / Oo0Ooo
if 57 - 57: II111iiii
if 93 - 93: IiII . i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . I1ii11iIi11i
if 49 - 49: OoOoOO00 % I1ii11iIi11i - OoooooooOO + O0
if 15 - 15: I1ii11iIi11i / IiII % I1IiiI
if ( O0oOO0O ) : lisp_set_ttl ( O0O0Ooo , 64 )
return
if 16 - 16: Ii1I
if 26 - 26: o0oOOo0O0Ooo / I11i + OoOoOO00 / OoOoOO00
if 31 - 31: I1Ii111
if 84 - 84: i11iIiiIii * OOooOOo . iII111i - Ii1I * i1IIi - I1ii11iIi11i
if 1 - 1: II111iiii
if 94 - 94: I1ii11iIi11i * iII111i % iII111i % I11i - iII111i
if 38 - 38: IiII - OoO0O00 % Ii1I - II111iiii
if 97 - 97: O0 . Ii1I
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 52 - 52: IiII
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
o00ooooOOo = total_length - len ( packet )
if ( o00ooooOOo == 0 ) : return ( [ True , packet ] )
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
o00OOo00 = o00ooooOOo
while ( o00OOo00 > 0 ) :
try : O0000 = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 7 - 7: oO0o
O0000 = O0000 [ 0 ]
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
if 4 - 4: IiII . I1ii11iIi11i + iII111i % ooOoO0o
if 28 - 28: I1Ii111
if ( O0000 . find ( "packet@" ) == 0 ) :
i1IIIiiIiII1I = O0000 . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( O0000 ) ,
# ooOoO0o / OOooOOo + I11i % I1Ii111 + Ii1I * I1IiiI
i1IIIiiIiII1I [ 1 ] if len ( i1IIIiiIiII1I ) > 2 else "?" )
return ( [ False , O0000 ] )
if 70 - 70: oO0o / i1IIi * iIii1I11I1II1 + I11i
if 48 - 48: ooOoO0o / I1ii11iIi11i / OoO0O00 / II111iiii * OoOoOO00
o00OOo00 -= len ( O0000 )
packet += O0000
if 73 - 73: I11i / I1IiiI - IiII - i1IIi * IiII - OOooOOo
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( O0000 ) , total_length , source ) )
if 39 - 39: I11i . ooOoO0o * II111iiii
if 21 - 21: Ii1I
return ( [ True , packet ] )
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
if 45 - 45: II111iiii
if 98 - 98: i11iIiiIii + I1ii11iIi11i * OOooOOo / OoOoOO00
if 84 - 84: o0oOOo0O0Ooo
if 40 - 40: OoooooooOO - oO0o / O0 * I1Ii111 . O0 + i11iIiiIii
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
i1II1IiiIi = ""
for O0000 in payload : i1II1IiiIi += O0000 + "\x40"
return ( i1II1IiiIi [ : - 1 ] )
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
if 66 - 66: OOooOOo * Oo0Ooo
if 58 - 58: OOooOOo
if 96 - 96: IiII % OoooooooOO + O0 * II111iiii / OOooOOo . I1Ii111
if 47 - 47: OoO0O00 - Oo0Ooo * OoO0O00 / oO0o
if 13 - 13: ooOoO0o
if 55 - 55: i1IIi . I11i . II111iiii + O0 + ooOoO0o - i1IIi
if 3 - 3: iIii1I11I1II1 / oO0o
if 61 - 61: I1Ii111 / O0 - iII111i
if 44 - 44: i1IIi
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
if 72 - 72: O0 . OOooOOo
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
if 74 - 74: i1IIi
try : O0o00O = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 17 - 17: iIii1I11I1II1 - OoOoOO00
if 97 - 97: iIii1I11I1II1 / OOooOOo * i1IIi - OoO0O00 / ooOoO0o % Ii1I
if 30 - 30: OoOoOO00 / oO0o . iII111i
if 56 - 56: OoOoOO00
if 83 - 83: OOooOOo
if 17 - 17: IiII + I1IiiI - I11i . I1IiiI
if ( internal == False ) :
i1II1IiiIi = O0o00O [ 0 ]
II1i1iI = lisp_convert_6to4 ( O0o00O [ 1 ] [ 0 ] )
IIiII = O0o00O [ 1 ] [ 1 ]
if 34 - 34: ooOoO0o . i11iIiiIii * I1IiiI . II111iiii - iIii1I11I1II1
if ( IIiII == LISP_DATA_PORT ) :
iiI1iii1Ii = lisp_data_plane_logging
O0OoO0O00o = lisp_format_packet ( i1II1IiiIi [ 0 : 60 ] ) + " ..."
else :
iiI1iii1Ii = True
O0OoO0O00o = lisp_format_packet ( i1II1IiiIi )
if 96 - 96: oO0o
if 88 - 88: OoO0O00 / OoO0O00 * I1ii11iIi11i + I1IiiI % i1IIi
if ( iiI1iii1Ii ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( i1II1IiiIi ) , bold ( "from " + II1i1iI , False ) , IIiII ,
O0OoO0O00o ) )
if 86 - 86: II111iiii / I1Ii111
return ( [ "packet" , II1i1iI , IIiII , i1II1IiiIi ] )
if 39 - 39: OoOoOO00 / o0oOOo0O0Ooo . II111iiii
if 74 - 74: I11i . OoO0O00 . I1Ii111 . iII111i
if 17 - 17: iIii1I11I1II1
if 10 - 10: i11iIiiIii / iII111i - oO0o
if 98 - 98: Ii1I % iII111i . I11i
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i % o0oOOo0O0Ooo . ooOoO0o - oO0o
o000o0oO0oO0o = False
IIII1iI1iiI = O0o00O [ 0 ]
Oo00O0OoooO = False
if 54 - 54: i1IIi
while ( o000o0oO0oO0o == False ) :
IIII1iI1iiI = IIII1iI1iiI . split ( "@" )
if 26 - 26: o0oOOo0O0Ooo % i11iIiiIii % OoOoOO00 % OoO0O00 * iII111i % I1IiiI
if ( len ( IIII1iI1iiI ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( IIII1iI1iiI [ 0 ] ) )
if 91 - 91: i1IIi * ooOoO0o
Oo00O0OoooO = True
break
if 33 - 33: I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
IiII111I1i = IIII1iI1iiI [ 0 ]
try :
O0o0o0OOo = int ( IIII1iI1iiI [ 1 ] )
except :
o0oOOOO = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( o0oOOOO , O0o00O ) )
Oo00O0OoooO = True
break
if 23 - 23: II111iiii * I11i * I11i % Ii1I - OoOoOO00
II1i1iI = IIII1iI1iiI [ 2 ]
IIiII = IIII1iI1iiI [ 3 ]
if 89 - 89: iIii1I11I1II1 . I1IiiI * i11iIiiIii + iII111i % OOooOOo / I11i
if 89 - 89: iIii1I11I1II1 * oO0o + IiII * o0oOOo0O0Ooo - iIii1I11I1II1
if 78 - 78: I1ii11iIi11i / Oo0Ooo
if 25 - 25: i11iIiiIii * i1IIi . oO0o - iII111i * I1Ii111
if 66 - 66: OoOoOO00 / I1Ii111
if 66 - 66: iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % I1Ii111 - II111iiii
if 24 - 24: ooOoO0o % Oo0Ooo . I11i * I1ii11iIi11i / I1Ii111
if 21 - 21: oO0o / I1ii11iIi11i % iII111i . I11i
if ( len ( IIII1iI1iiI ) > 5 ) :
i1II1IiiIi = lisp_bit_stuff ( IIII1iI1iiI [ 4 : : ] )
else :
i1II1IiiIi = IIII1iI1iiI [ 4 ]
if 58 - 58: I1IiiI - i1IIi - OOooOOo
if 33 - 33: O0 % I1IiiI + ooOoO0o % OOooOOo
if 49 - 49: ooOoO0o / O0 - OoOoOO00 % O0 * oO0o * OoooooooOO
if 66 - 66: o0oOOo0O0Ooo . I1ii11iIi11i / OoooooooOO . I11i
if 33 - 33: I1Ii111
if 41 - 41: ooOoO0o + Ii1I / i1IIi % Ii1I
o000o0oO0oO0o , i1II1IiiIi = lisp_receive_segments ( lisp_socket , i1II1IiiIi ,
II1i1iI , O0o0o0OOo )
if ( i1II1IiiIi == None ) : return ( [ "" , "" , "" , "" ] )
if 97 - 97: Oo0Ooo % OoOoOO00 / OOooOOo / iIii1I11I1II1 / OoooooooOO - I1ii11iIi11i
if 6 - 6: iIii1I11I1II1
if 27 - 27: Ii1I / i11iIiiIii / i1IIi
if 36 - 36: ooOoO0o % ooOoO0o . i11iIiiIii
if 42 - 42: OoO0O00 . I1Ii111 / Ii1I
if ( o000o0oO0oO0o == False ) :
IIII1iI1iiI = i1II1IiiIi
continue
if 57 - 57: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo / oO0o . OoOoOO00
if 74 - 74: I1IiiI * OoO0O00 + OoooooooOO * ooOoO0o . oO0o
if ( IIiII == "" ) : IIiII = "no-port"
if ( IiII111I1i == "command" and lisp_i_am_core == False ) :
iI11I = i1II1IiiIi . find ( " {" )
OoO0oo = i1II1IiiIi if iI11I == - 1 else i1II1IiiIi [ : iI11I ]
OoO0oo = ": '" + OoO0oo + "'"
else :
OoO0oo = ""
if 26 - 26: I1IiiI % iIii1I11I1II1 / OoO0O00
if 71 - 71: OoOoOO00 + iII111i - I1IiiI
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( i1II1IiiIi ) , bold ( "from " + II1i1iI , False ) , IIiII , IiII111I1i ,
OoO0oo if ( IiII111I1i in [ "command" , "api" ] ) else ": ... " if ( IiII111I1i == "data-packet" ) else ": " + lisp_format_packet ( i1II1IiiIi ) ) )
if 80 - 80: OoO0O00 . ooOoO0o
if 58 - 58: iII111i / o0oOOo0O0Ooo . iII111i % OoO0O00
if 38 - 38: iIii1I11I1II1 % IiII * OoooooooOO - OOooOOo
if 15 - 15: I1IiiI + iIii1I11I1II1 . i11iIiiIii % oO0o
if 92 - 92: I11i
if ( Oo00O0OoooO ) : continue
return ( [ IiII111I1i , II1i1iI , IIiII , i1II1IiiIi ] )
if 96 - 96: O0 / i1IIi - i11iIiiIii / OoOoOO00 + OoooooooOO
if 12 - 12: oO0o . OOooOOo
if 76 - 76: oO0o - I11i * I1Ii111 . oO0o % iIii1I11I1II1
if 86 - 86: OoooooooOO + I1Ii111
if 5 - 5: I1ii11iIi11i
if 89 - 89: OoO0O00 - OoOoOO00 / II111iiii . I1ii11iIi11i
if 50 - 50: Ii1I * I1Ii111 * OoooooooOO . OoooooooOO
if 67 - 67: i11iIiiIii % ooOoO0o . I1ii11iIi11i + II111iiii . OoO0O00
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
I1iI1 = False
if 42 - 42: OoO0O00 . II111iiii % oO0o . ooOoO0o * OoooooooOO
iIiI1I1II1 = lisp_control_header ( )
if ( iIiI1I1II1 . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( I1iI1 )
if 47 - 47: II111iiii + I1Ii111 + II111iiii
if 45 - 45: II111iiii % OoOoOO00 / O0 % iIii1I11I1II1 + oO0o
if 51 - 51: o0oOOo0O0Ooo * o0oOOo0O0Ooo . Ii1I
if 14 - 14: OoO0O00 . I11i % II111iiii % i11iIiiIii + OoooooooOO
if 50 - 50: i11iIiiIii * I11i + i11iIiiIii - i1IIi
OoO00O = source
if ( source . find ( "lisp" ) == - 1 ) :
IiIIi1I1I11Ii = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IiIIi1I1I11Ii . string_to_afi ( source )
IiIIi1I1I11Ii . store_address ( source )
source = IiIIi1I1I11Ii
if 15 - 15: iIii1I11I1II1 / Ii1I / I1ii11iIi11i / Oo0Ooo
if 99 - 99: iII111i / O0 % ooOoO0o - II111iiii - i11iIiiIii
if ( iIiI1I1II1 . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl )
if 44 - 44: i11iIiiIii . I11i - IiII + OoooooooOO . oO0o + I11i
elif ( iIiI1I1II1 . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl )
if 6 - 6: i1IIi . o0oOOo0O0Ooo + OoO0O00 + OOooOOo + oO0o
elif ( iIiI1I1II1 . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 30 - 30: O0
elif ( iIiI1I1II1 . type == LISP_MAP_NOTIFY ) :
if ( OoO00O == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
else :
if ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
if 98 - 98: I1Ii111
lisp_process_map_notify ( lisp_sockets , packet , source )
if 58 - 58: OOooOOo
if 6 - 6: I1ii11iIi11i
elif ( iIiI1I1II1 . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
elif ( iIiI1I1II1 . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 18 - 18: ooOoO0o
elif ( iIiI1I1II1 . type == LISP_NAT_INFO and iIiI1I1II1 . is_info_reply ( ) ) :
II11iiii , oo0Oo0o0O , I1iI1 = lisp_process_info_reply ( source , packet , True )
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
elif ( iIiI1I1II1 . type == LISP_NAT_INFO and iIiI1I1II1 . is_info_reply ( ) == False ) :
I1iiIiiii1111 = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , I1iiIiiii1111 , udp_sport ,
None )
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
elif ( iIiI1I1II1 . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 29 - 29: Ii1I . II111iiii / I1Ii111
else :
lprint ( "Invalid LISP control packet type {}" . format ( iIiI1I1II1 . type ) )
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
return ( I1iI1 )
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
if 81 - 81: i11iIiiIii - II111iiii + I11i
if 52 - 52: II111iiii
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl ) :
if 56 - 56: i1IIi / i1IIi . OoO0O00 % i1IIi - OoOoOO00 % OOooOOo
OoOoO = bold ( "RLOC-probe" , False )
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( OoOoO ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
if 15 - 15: Ii1I
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( OoOoO ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 29 - 29: I11i / I1IiiI / OoooooooOO . OoOoOO00 / I11i . I1Ii111
if 69 - 69: O0 * OoOoOO00 + o0oOOo0O0Ooo + I1IiiI % iII111i . OoooooooOO
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( OoOoO ) )
return
if 45 - 45: I1Ii111 + oO0o - o0oOOo0O0Ooo - OoOoOO00 + I1IiiI / II111iiii
if 46 - 46: II111iiii . iIii1I11I1II1
if 62 - 62: I1ii11iIi11i % i1IIi % I1Ii111 * ooOoO0o % OOooOOo + I1IiiI
if 100 - 100: II111iiii - o0oOOo0O0Ooo * OoooooooOO . ooOoO0o / II111iiii / oO0o
if 43 - 43: iIii1I11I1II1 + ooOoO0o * iII111i + iIii1I11I1II1 . I1Ii111
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 87 - 87: I1Ii111
if 47 - 47: II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1
if 14 - 14: i1IIi / OoO0O00 / iII111i % I1Ii111
if 72 - 72: OoO0O00 . II111iiii - IiII + IiII + iIii1I11I1II1 % oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
if 94 - 94: IiII
if 69 - 69: I1Ii111 . I1Ii111
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
if 87 - 87: Ii1I % I11i / I1Ii111
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , rloc_probe ,
keys , enc , auth , mr_ttl = - 1 ) :
iIi111 = lisp_map_reply ( )
iIi111 . rloc_probe = rloc_probe
iIi111 . echo_nonce_capable = enc
iIi111 . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
iIi111 . record_count = 1
iIi111 . nonce = nonce
i1II1IiiIi = iIi111 . encode ( )
iIi111 . print_map_reply ( )
if 55 - 55: I1Ii111
OoOO = lisp_eid_record ( )
OoOO . rloc_count = len ( rloc_set )
OoOO . authoritative = auth
OoOO . record_ttl = ttl
OoOO . action = action
OoOO . eid = eid
OoOO . group = group
if 3 - 3: ooOoO0o . OoOoOO00
i1II1IiiIi += OoOO . encode ( )
OoOO . print_record ( " " , False )
if 57 - 57: O0 + OoO0O00 % i1IIi - oO0o / I1IiiI
O0OOOoO00OO0o = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 34 - 34: I1ii11iIi11i + I1Ii111 / Ii1I
for IiI1I1iii11 in rloc_set :
iI11iII1IiiI = lisp_rloc_record ( )
I1iiIiiii1111 = IiI1I1iii11 . rloc . print_address_no_iid ( )
if ( I1iiIiiii1111 in O0OOOoO00OO0o ) :
iI11iII1IiiI . local_bit = True
iI11iII1IiiI . probe_bit = rloc_probe
iI11iII1IiiI . keys = keys
if ( IiI1I1iii11 . priority == 254 and lisp_i_am_rtr ) :
iI11iII1IiiI . rloc_name = "RTR"
if 98 - 98: Oo0Ooo * oO0o - Oo0Ooo * oO0o
if 24 - 24: IiII % i11iIiiIii + ooOoO0o
iI11iII1IiiI . store_rloc_entry ( IiI1I1iii11 )
iI11iII1IiiI . reach_bit = True
iI11iII1IiiI . print_record ( " " )
i1II1IiiIi += iI11iII1IiiI . encode ( )
if 28 - 28: I11i * I11i + I11i / O0 - OOooOOo
return ( i1II1IiiIi )
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
if 68 - 68: iII111i / OOooOOo
if 28 - 28: II111iiii
if 49 - 49: I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
O0oO0o = lisp_map_referral ( )
O0oO0o . record_count = 1
O0oO0o . nonce = nonce
i1II1IiiIi = O0oO0o . encode ( )
O0oO0o . print_map_referral ( )
if 41 - 41: oO0o
OoOO = lisp_eid_record ( )
if 45 - 45: Ii1I
iIII1I = 0
if ( ddt_entry == None ) :
OoOO . eid = eid
OoOO . group = group
else :
iIII1I = len ( ddt_entry . delegation_set )
OoOO . eid = ddt_entry . eid
OoOO . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 90 - 90: I1ii11iIi11i + Oo0Ooo - Oo0Ooo
OoOO . rloc_count = iIII1I
OoOO . authoritative = True
if 90 - 90: OOooOOo - Oo0Ooo
if 57 - 57: I1IiiI + IiII + IiII * I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 % IiII * I1Ii111 . IiII * oO0o % o0oOOo0O0Ooo
if 78 - 78: OOooOOo
if 10 - 10: oO0o
O0oOo00O = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( iIII1I == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
o0i1II1iI = ddt_entry . delegation_set [ 0 ]
if ( o0i1II1iI . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 19 - 19: OoOoOO00 * I11i
if ( o0i1II1iI . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 32 - 32: i1IIi
if 79 - 79: Oo0Ooo + II111iiii - o0oOOo0O0Ooo / Ii1I
if 15 - 15: I11i / i1IIi % O0 % ooOoO0o / II111iiii * I11i
if 18 - 18: i1IIi % oO0o
if 80 - 80: II111iiii
if 18 - 18: I1Ii111 % iII111i + OoOoOO00 . I1ii11iIi11i / I11i
if 29 - 29: II111iiii - I1Ii111 . OoooooooOO / i11iIiiIii / I1ii11iIi11i
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : O0oOo00O = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
O0oOo00O = ( lisp_i_am_ms and o0i1II1iI . is_ms_peer ( ) == False )
if 60 - 60: i1IIi % ooOoO0o / II111iiii * Oo0Ooo - i1IIi . Ii1I
if 63 - 63: OoO0O00 * OoooooooOO + iII111i / iIii1I11I1II1 . i11iIiiIii
OoOO . action = action
OoOO . ddt_incomplete = O0oOo00O
OoOO . record_ttl = ttl
if 17 - 17: OOooOOo
i1II1IiiIi += OoOO . encode ( )
OoOO . print_record ( " " , True )
if 21 - 21: i1IIi
if ( iIII1I == 0 ) : return ( i1II1IiiIi )
if 10 - 10: i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo . o0oOOo0O0Ooo
for o0i1II1iI in ddt_entry . delegation_set :
iI11iII1IiiI = lisp_rloc_record ( )
iI11iII1IiiI . rloc = o0i1II1iI . delegate_address
iI11iII1IiiI . priority = o0i1II1iI . priority
iI11iII1IiiI . weight = o0i1II1iI . weight
iI11iII1IiiI . mpriority = 255
iI11iII1IiiI . mweight = 0
iI11iII1IiiI . reach_bit = True
i1II1IiiIi += iI11iII1IiiI . encode ( )
iI11iII1IiiI . print_record ( " " )
if 8 - 8: iII111i + iIii1I11I1II1 . I1ii11iIi11i
return ( i1II1IiiIi )
if 68 - 68: OoooooooOO . OoooooooOO % I1ii11iIi11i + i1IIi % OoooooooOO + Ii1I
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
if 2 - 2: I1Ii111 % iIii1I11I1II1 . Ii1I - II111iiii
if 33 - 33: I11i . i11iIiiIii % i1IIi * II111iiii * i11iIiiIii + OoOoOO00
if 26 - 26: I1IiiI % OoOoOO00 % I11i + Oo0Ooo
if 86 - 86: iII111i / i1IIi % Oo0Ooo
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
if ( map_request . target_group . is_null ( ) ) :
I111I = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
I111I = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( I111I ) : I111I = I111I . lookup_source_cache ( map_request . target_eid , False )
if 61 - 61: Ii1I . OoOoOO00 / iIii1I11I1II1 . o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
oOoo0OooOOo00 = map_request . print_prefix ( )
if 92 - 92: OoO0O00 . i1IIi
if ( I111I == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( oOoo0OooOOo00 , False ) ) )
if 22 - 22: Ii1I . I1IiiI
return
if 54 - 54: OOooOOo / I1ii11iIi11i % oO0o
if 66 - 66: I11i + iII111i
iiI11IIii1i1 = I111I . print_eid_tuple ( )
if 90 - 90: ooOoO0o % o0oOOo0O0Ooo * Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo * OoOoOO00
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( iiI11IIii1i1 , False ) , green ( oOoo0OooOOo00 , False ) ) )
if 40 - 40: iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
if 8 - 8: I11i - I11i % IiII
Ii1 = map_request . itr_rlocs [ 0 ]
if ( Ii1 . is_private_address ( ) and lisp_nat_traversal ) :
Ii1 = source
if 86 - 86: o0oOOo0O0Ooo . I11i . I1IiiI . oO0o + Oo0Ooo + II111iiii
if 75 - 75: o0oOOo0O0Ooo / iII111i / iII111i % i1IIi
oOo0 = map_request . nonce
Ii11Ii11III = lisp_nonce_echoing
o00OO0o0 = map_request . keys
if 1 - 1: O0 * oO0o * OoOoOO00 . i1IIi . Ii1I - OoOoOO00
I111I . map_replies_sent += 1
if 27 - 27: O0
i1II1IiiIi = lisp_build_map_reply ( I111I . eid , I111I . group , I111I . rloc_set , oOo0 ,
LISP_NO_ACTION , 1440 , map_request . rloc_probe , o00OO0o0 , Ii11Ii11III , True , ttl )
if 86 - 86: IiII + Ii1I / Oo0Ooo / O0 % iII111i - oO0o
if 3 - 3: i11iIiiIii / I1ii11iIi11i % I1Ii111 + o0oOOo0O0Ooo + O0
if 42 - 42: IiII / i11iIiiIii % o0oOOo0O0Ooo / II111iiii / IiII
if 97 - 97: OOooOOo . OoOoOO00 / I11i - IiII - iIii1I11I1II1
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
if 75 - 75: OOooOOo * OoO0O00 + OoooooooOO + i11iIiiIii . OoO0O00
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
if 32 - 32: OoO0O00
if 22 - 22: II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if 94 - 94: OOooOOo / IiII
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
if 33 - 33: I11i + O0
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
oOOO = ( Ii1 . is_private_address ( ) == False )
oOOoOO = Ii1 . print_address_no_iid ( )
if ( ( oOOO and lisp_rtr_list . has_key ( oOOoOO ) ) or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , Ii1 , None , i1II1IiiIi )
return
if 9 - 9: I11i . iII111i * ooOoO0o * ooOoO0o
if 68 - 68: O0 - i11iIiiIii % iIii1I11I1II1 % ooOoO0o
if 12 - 12: II111iiii + I11i
if 9 - 9: I1ii11iIi11i
if 51 - 51: I1ii11iIi11i
if 37 - 37: I1IiiI % I1Ii111
lisp_send_map_reply ( lisp_sockets , i1II1IiiIi , Ii1 , sport )
return
if 22 - 22: o0oOOo0O0Ooo % OOooOOo - I11i + ooOoO0o / OOooOOo
if 98 - 98: I11i * O0 + IiII - oO0o
if 35 - 35: OoooooooOO * Ii1I
if 73 - 73: ooOoO0o . OoO0O00 % I1ii11iIi11i - oO0o
if 67 - 67: o0oOOo0O0Ooo . I11i + i1IIi
if 100 - 100: Oo0Ooo - I1IiiI . OOooOOo % iIii1I11I1II1 . I11i
if 83 - 83: OoOoOO00 * iII111i
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 75 - 75: i11iIiiIii . o0oOOo0O0Ooo / oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
Ii1 = map_request . itr_rlocs [ 0 ]
if ( Ii1 . is_private_address ( ) ) : Ii1 = source
oOo0 = map_request . nonce
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
i1OO0o = map_request . target_eid
O0oo0oo0 = map_request . target_group
if 100 - 100: Oo0Ooo + IiII
oooo0O = [ ]
for o0O0 in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( o0O0 == None ) : continue
OoOOo = lisp_rloc ( )
OoOOo . rloc . copy_address ( o0O0 )
OoOOo . priority = 254
oooo0O . append ( OoOOo )
if 26 - 26: ooOoO0o * i1IIi
if 2 - 2: ooOoO0o % Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1
Ii11Ii11III = lisp_nonce_echoing
o00OO0o0 = map_request . keys
if 15 - 15: Oo0Ooo % I11i . i1IIi
i1II1IiiIi = lisp_build_map_reply ( i1OO0o , O0oo0oo0 , oooo0O , oOo0 , LISP_NO_ACTION ,
1440 , True , o00OO0o0 , Ii11Ii11III , True , ttl )
lisp_send_map_reply ( lisp_sockets , i1II1IiiIi , Ii1 , sport )
return
if 77 - 77: O0 * iII111i % Oo0Ooo * I1Ii111
if 41 - 41: OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . OoO0O00 + Ii1I % i1IIi
if 14 - 14: i1IIi * OoooooooOO % i1IIi % iII111i . I11i
if 83 - 83: O0 % I1ii11iIi11i - i1IIi . i11iIiiIii * I11i
if 2 - 2: Ii1I / OOooOOo
if 64 - 64: i1IIi % Oo0Ooo / O0 % Oo0Ooo
if 49 - 49: II111iiii * iIii1I11I1II1 / I11i - oO0o
if 76 - 76: I1Ii111 . Oo0Ooo - ooOoO0o . II111iiii - iII111i
if 36 - 36: iIii1I11I1II1 % Oo0Ooo
if 67 - 67: oO0o / II111iiii . I11i / oO0o
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
oooo0O = target_site_eid . registered_rlocs
if 46 - 46: oO0o * Oo0Ooo - I11i / iIii1I11I1II1
ooOOO00Ooo0 = lisp_site_eid_lookup ( seid , group , False )
if ( ooOOO00Ooo0 == None ) : return ( oooo0O )
if 3 - 3: I1ii11iIi11i
if 38 - 38: ooOoO0o % Ii1I % I11i % iIii1I11I1II1
if 2 - 2: I11i * oO0o - Ii1I
if 41 - 41: OoOoOO00 * IiII + iII111i
OOOO = None
oooOooO0 = [ ]
for IiI1I1iii11 in oooo0O :
if ( IiI1I1iii11 . is_rtr ( ) ) : continue
if ( IiI1I1iii11 . rloc . is_private_address ( ) ) :
II1iI = copy . deepcopy ( IiI1I1iii11 )
oooOooO0 . append ( II1iI )
continue
if 62 - 62: i1IIi / I1IiiI - OoO0O00 % OOooOOo + O0 + O0
OOOO = IiI1I1iii11
break
if 50 - 50: iIii1I11I1II1
if ( OOOO == None ) : return ( oooo0O )
OOOO = OOOO . rloc . print_address_no_iid ( )
if 86 - 86: iIii1I11I1II1
if 81 - 81: OOooOOo / I11i / OoooooooOO
if 74 - 74: I11i + OoooooooOO % II111iiii % o0oOOo0O0Ooo
if 27 - 27: OoO0O00 * Oo0Ooo
Ooo00o0oo0O0 = None
for IiI1I1iii11 in ooOOO00Ooo0 . registered_rlocs :
if ( IiI1I1iii11 . is_rtr ( ) ) : continue
if ( IiI1I1iii11 . rloc . is_private_address ( ) ) : continue
Ooo00o0oo0O0 = IiI1I1iii11
break
if 89 - 89: OoOoOO00 / Oo0Ooo + O0 * ooOoO0o
if ( Ooo00o0oo0O0 == None ) : return ( oooo0O )
Ooo00o0oo0O0 = Ooo00o0oo0O0 . rloc . print_address_no_iid ( )
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
ooO0OOoOooO = target_site_eid . site_id
if ( ooO0OOoOooO == 0 ) :
if ( Ooo00o0oo0O0 == OOOO ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( OOOO ) )
if 34 - 34: O0 * o0oOOo0O0Ooo / IiII
return ( oooOooO0 )
if 75 - 75: I1Ii111 - i1IIi - OoO0O00
return ( oooo0O )
if 25 - 25: iII111i . o0oOOo0O0Ooo
if 62 - 62: I11i + i1IIi . I1ii11iIi11i - I1ii11iIi11i
if 68 - 68: ooOoO0o % OoooooooOO
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if ( ooO0OOoOooO == ooOOO00Ooo0 . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( ooO0OOoOooO ) )
return ( oooOooO0 )
if 19 - 19: I1IiiI
return ( oooo0O )
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
if 38 - 38: I11i
if 66 - 66: II111iiii
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
OO0O = [ ]
oooo0O = [ ]
if 88 - 88: OOooOOo - I1ii11iIi11i % iII111i
if 58 - 58: OoO0O00 . O0 - i11iIiiIii . I1IiiI
if 95 - 95: OoooooooOO / ooOoO0o * I11i - Ii1I
if 94 - 94: I1Ii111 + OoO0O00 . OoooooooOO
if 60 - 60: Ii1I . II111iiii
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
IiIIIi = False
O0000O00O00OO = False
for IiI1I1iii11 in registered_rloc_set :
if ( IiI1I1iii11 . priority != 254 ) : continue
O0000O00O00OO |= True
if ( IiI1I1iii11 . rloc . is_exact_match ( mr_source ) == False ) : continue
IiIIIi = True
break
if 10 - 10: I1ii11iIi11i
if 5 - 5: IiII - iIii1I11I1II1 % oO0o % i1IIi
if 68 - 68: OoooooooOO * Oo0Ooo / o0oOOo0O0Ooo * I11i + OoO0O00 . OoooooooOO
if 12 - 12: oO0o - I1ii11iIi11i
if 69 - 69: iII111i * IiII * oO0o % OoO0O00 - o0oOOo0O0Ooo
if 97 - 97: O0 + i11iIiiIii . i1IIi
if 43 - 43: II111iiii + OOooOOo . i11iIiiIii - II111iiii
if ( O0000O00O00OO == False ) : return ( registered_rloc_set )
if 80 - 80: o0oOOo0O0Ooo . oO0o . I1Ii111
if 26 - 26: i1IIi - I1IiiI + IiII / OoO0O00 . I1ii11iIi11i
if 82 - 82: I1Ii111 % iII111i . OoOoOO00 % OoO0O00 + I1ii11iIi11i
if 69 - 69: I1IiiI * OoOoOO00 - ooOoO0o . O0
if 15 - 15: oO0o . IiII + I1Ii111 - OoooooooOO
if 85 - 85: II111iiii - Oo0Ooo + oO0o . i11iIiiIii + Oo0Ooo
if 86 - 86: ooOoO0o . OoO0O00
if 47 - 47: IiII % I1IiiI
if 91 - 91: Ii1I
if 69 - 69: iII111i
oO0OO0oOo00 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 60 - 60: OoO0O00 % iIii1I11I1II1 - OoooooooOO + OoOoOO00
if 50 - 50: O0 . I1Ii111 + i1IIi * iIii1I11I1II1 % iIii1I11I1II1
if 18 - 18: iII111i . Oo0Ooo
if 4 - 4: o0oOOo0O0Ooo % oO0o - OoOoOO00 * iIii1I11I1II1
if 96 - 96: Ii1I
for IiI1I1iii11 in registered_rloc_set :
if ( oO0OO0oOo00 and IiI1I1iii11 . rloc . is_private_address ( ) ) : continue
if ( multicast == False and IiI1I1iii11 . priority == 255 ) : continue
if ( multicast and IiI1I1iii11 . mpriority == 255 ) : continue
if ( IiI1I1iii11 . priority == 254 ) :
OO0O . append ( IiI1I1iii11 )
else :
oooo0O . append ( IiI1I1iii11 )
if 1 - 1: i1IIi % O0 / I11i
if 52 - 52: I1IiiI + oO0o * II111iiii
if 15 - 15: I11i
if 72 - 72: O0
if 15 - 15: II111iiii / I11i % II111iiii % Ii1I % i11iIiiIii / I1Ii111
if 93 - 93: OOooOOo / OoooooooOO % iII111i
if ( IiIIIi ) : return ( oooo0O )
if 47 - 47: o0oOOo0O0Ooo - I1IiiI % O0 % I1Ii111 . O0 . OoOoOO00
if 95 - 95: o0oOOo0O0Ooo * OOooOOo - iII111i * OoooooooOO - ooOoO0o / I1IiiI
if 47 - 47: OoO0O00 % I1IiiI / OoOoOO00 - I1Ii111 / I1IiiI
if 13 - 13: o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: iII111i * I1IiiI . iIii1I11I1II1 % I1IiiI / O0
if 47 - 47: OoooooooOO - i11iIiiIii . I1IiiI / i1IIi
if 74 - 74: OoooooooOO * ooOoO0o
if 45 - 45: Oo0Ooo + iIii1I11I1II1 . o0oOOo0O0Ooo
if 50 - 50: o0oOOo0O0Ooo % O0
if 67 - 67: OoOoOO00
oooo0O = [ ]
for IiI1I1iii11 in registered_rloc_set :
if ( IiI1I1iii11 . rloc . is_private_address ( ) ) : oooo0O . append ( IiI1I1iii11 )
if 21 - 21: I11i % Oo0Ooo + Oo0Ooo / iIii1I11I1II1 % iIii1I11I1II1
oooo0O += OO0O
return ( oooo0O )
if 66 - 66: iII111i
if 72 - 72: ooOoO0o / oO0o / iII111i . I1Ii111 . I1ii11iIi11i + IiII
if 39 - 39: I1IiiI % I1Ii111
if 22 - 22: OoOoOO00 - OOooOOo % i1IIi + i1IIi
if 28 - 28: oO0o + OoOoOO00 * Ii1I . I11i
if 80 - 80: I1ii11iIi11i / OoOoOO00
if 74 - 74: I1ii11iIi11i + O0 + o0oOOo0O0Ooo - iII111i
if 48 - 48: ooOoO0o * iIii1I11I1II1 % Oo0Ooo
if 60 - 60: OoOoOO00 / i1IIi * iIii1I11I1II1
if 91 - 91: I1Ii111 . OoooooooOO / IiII / I1IiiI
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
Ooooo00oOO0Oo = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
Ooooo00oOO0Oo . add ( reply_eid )
return
if 2 - 2: ooOoO0o + OoO0O00 % i11iIiiIii
if 97 - 97: OoooooooOO % IiII * iIii1I11I1II1
if 97 - 97: iIii1I11I1II1 - I1Ii111 - o0oOOo0O0Ooo * o0oOOo0O0Ooo * OoOoOO00
if 80 - 80: II111iiii . I1ii11iIi11i % i11iIiiIii / Ii1I / oO0o
if 100 - 100: Ii1I . OoO0O00 * ooOoO0o
if 4 - 4: i1IIi + OoooooooOO
if 26 - 26: I1IiiI / II111iiii % I1ii11iIi11i * o0oOOo0O0Ooo . IiII / OoO0O00
if 10 - 10: i11iIiiIii / i1IIi + O0 - i11iIiiIii % I11i - i1IIi
if 38 - 38: O0 - I1IiiI + Oo0Ooo + ooOoO0o
if 56 - 56: I1Ii111 + oO0o / Ii1I + I1Ii111
if 21 - 21: OOooOOo / OoOoOO00 + OoOoOO00 + OoOoOO00 - i1IIi + Ii1I
if 43 - 43: O0 % II111iiii
if 60 - 60: iII111i / ooOoO0o - Ii1I - OoooooooOO
if 79 - 79: oO0o / iII111i . iIii1I11I1II1 * i11iIiiIii * i1IIi . iIii1I11I1II1
if 31 - 31: OoooooooOO / ooOoO0o / OoooooooOO + ooOoO0o . O0 - IiII
def lisp_convert_reply_to_notify ( packet ) :
if 53 - 53: Oo0Ooo % iII111i % iII111i
if 71 - 71: iII111i
if 99 - 99: O0 - OoOoOO00 * I1Ii111 - Oo0Ooo
if 62 - 62: i1IIi + ooOoO0o + Oo0Ooo - i11iIiiIii
ii1i = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
ii1i = socket . ntohl ( ii1i ) & 0xff
oOo0 = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 51 - 51: ooOoO0o - I1Ii111 * oO0o
if 47 - 47: Oo0Ooo % OoO0O00 * Ii1I / OoOoOO00
if 1 - 1: I1IiiI
if 68 - 68: ooOoO0o
oOoOo00oo = ( LISP_MAP_NOTIFY << 28 ) | ii1i
iIiI1I1II1 = struct . pack ( "I" , socket . htonl ( oOoOo00oo ) )
i111iii1I1 = struct . pack ( "I" , 0 )
if 68 - 68: I11i % IiII
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00
if 28 - 28: i1IIi / iII111i + OOooOOo
packet = iIiI1I1II1 + oOo0 + i111iii1I1 + packet
return ( packet )
if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii
if 59 - 59: O0 + Oo0Ooo
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
if 63 - 63: OoO0O00 . iII111i
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi
if 21 - 21: O0 * IiII . iII111i / iII111i % i11iIiiIii / I11i
def lisp_notify_subscribers ( lisp_sockets , eid_record , eid , site ) :
oOoo0OooOOo00 = eid . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( oOoo0OooOOo00 ) == False ) : return
if 15 - 15: o0oOOo0O0Ooo / OoO0O00 - i1IIi
for Ooooo00oOO0Oo in lisp_pubsub_cache [ oOoo0OooOOo00 ] . values ( ) :
o00ooOOo0ooO0 = Ooooo00oOO0Oo . itr
IIiII = Ooooo00oOO0Oo . port
iI111 = red ( o00ooOOo0ooO0 . print_address_no_iid ( ) , False )
iio0OOoO0 = bold ( "subscriber" , False )
i11IIii = "0x" + lisp_hex_string ( Ooooo00oOO0Oo . xtr_id )
oOo0 = "0x" + lisp_hex_string ( Ooooo00oOO0Oo . nonce )
if 71 - 71: iII111i / O0 . OoOoOO00 / iII111i . iIii1I11I1II1
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( iio0OOoO0 , iI111 , IIiII , i11IIii , green ( oOoo0OooOOo00 , False ) , oOo0 ) )
if 88 - 88: ooOoO0o + II111iiii
if 89 - 89: i1IIi - i1IIi / iII111i
lisp_build_map_notify ( lisp_sockets , eid_record , [ oOoo0OooOOo00 ] , 1 , o00ooOOo0ooO0 ,
IIiII , Ooooo00oOO0Oo . nonce , 0 , 0 , 0 , site , False )
Ooooo00oOO0Oo . map_notify_count += 1
if 43 - 43: I1IiiI / IiII
return
if 38 - 38: I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
if 69 - 69: OoooooooOO
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 52 - 52: II111iiii . iII111i
if 36 - 36: I1IiiI * II111iiii
if 68 - 68: oO0o * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i * i1IIi % OOooOOo
if 39 - 39: I1Ii111 / I11i + oO0o / I1Ii111 % IiII * I1ii11iIi11i
lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl , xtr_id )
if 66 - 66: I1ii11iIi11i * ooOoO0o . i11iIiiIii * Oo0Ooo - I11i . I1IiiI
i1OO0o = green ( reply_eid . print_prefix ( ) , False )
o00ooOOo0ooO0 = red ( itr_rloc . print_address_no_iid ( ) , False )
I1I1IIIIi11 = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( I1I1IIIIi11 ,
i1OO0o , o00ooOOo0ooO0 , xtr_id ) )
if 17 - 17: Oo0Ooo * II111iiii
if 1 - 1: oO0o + iIii1I11I1II1
if 36 - 36: iII111i * i1IIi % iIii1I11I1II1 . oO0o * Oo0Ooo
if 10 - 10: i11iIiiIii / O0 . iIii1I11I1II1 * ooOoO0o . I1Ii111 * iIii1I11I1II1
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
return
if 91 - 91: Ii1I * i11iIiiIii
if 24 - 24: I1IiiI * I11i - o0oOOo0O0Ooo / iII111i + IiII - I1ii11iIi11i
if 53 - 53: I11i / I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * OoOoOO00
if 86 - 86: iIii1I11I1II1 - I1Ii111
if 86 - 86: O0 * IiII + OoOoOO00 + OoO0O00
if 53 - 53: I1IiiI % i11iIiiIii + o0oOOo0O0Ooo . I1ii11iIi11i
if 73 - 73: iII111i - o0oOOo0O0Ooo / OOooOOo + iII111i + o0oOOo0O0Ooo % II111iiii
if 74 - 74: I11i * iIii1I11I1II1 - OoO0O00 / i1IIi / OoO0O00 / IiII
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 60 - 60: oO0o % I1Ii111 % Oo0Ooo
if 34 - 34: o0oOOo0O0Ooo * OOooOOo % Ii1I + I1IiiI
if 77 - 77: OoOoOO00 + IiII + Oo0Ooo
if 88 - 88: i1IIi
if 45 - 45: iII111i % I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o
if 55 - 55: OoO0O00 % IiII
i1OO0o = map_request . target_eid
O0oo0oo0 = map_request . target_group
oOoo0OooOOo00 = lisp_print_eid_tuple ( i1OO0o , O0oo0oo0 )
Ii1 = map_request . itr_rlocs [ 0 ]
i11IIii = map_request . xtr_id
oOo0 = map_request . nonce
ooOOoo0 = LISP_NO_ACTION
Ooooo00oOO0Oo = map_request . subscribe_bit
if 93 - 93: OoO0O00 . I1ii11iIi11i / OOooOOo % OoooooooOO + i1IIi + I1Ii111
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1
if 63 - 63: I1Ii111 + iII111i
if 6 - 6: I1ii11iIi11i + Ii1I
I1111I = True
OO0OOOO0Oo = ( lisp_get_eid_hash ( i1OO0o ) != None )
if ( OO0OOOO0Oo ) :
IiiiI1I1i = map_request . map_request_signature
if ( IiiiI1I1i == None ) :
I1111I = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 99 - 99: o0oOOo0O0Ooo . O0 % OoOoOO00 / I1IiiI + OoOoOO00
else :
O0O0o0OOOooo0 = map_request . signature_eid
iIIiI11I1 , OO000OO , I1111I = lisp_lookup_public_key ( O0O0o0OOOooo0 )
if ( I1111I ) :
I1111I = map_request . verify_map_request_sig ( OO000OO )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( O0O0o0OOOooo0 . print_address ( ) , iIIiI11I1 . print_address ( ) ) )
if 3 - 3: Oo0Ooo
if 80 - 80: O0 - i1IIi + OoO0O00 . i11iIiiIii
oooOoOoo0o = bold ( "passed" , False ) if I1111I else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( oooOoOoo0o ) )
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
if 53 - 53: o0oOOo0O0Ooo * Ii1I
if 42 - 42: I11i + iII111i / iIii1I11I1II1
if ( Ooooo00oOO0Oo and I1111I == False ) :
Ooooo00oOO0Oo = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 1 - 1: O0 - II111iiii
if 75 - 75: II111iiii / OoO0O00 % II111iiii
if 3 - 3: Ii1I - Ii1I % I1ii11iIi11i
if 44 - 44: OOooOOo - o0oOOo0O0Ooo
if 69 - 69: IiII + I1ii11iIi11i / o0oOOo0O0Ooo / OOooOOo
if 31 - 31: oO0o + I1ii11iIi11i * i1IIi % I1IiiI % I1IiiI + iIii1I11I1II1
if 62 - 62: OoooooooOO
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
if 52 - 52: ooOoO0o . iIii1I11I1II1 / iIii1I11I1II1 % oO0o - oO0o * II111iiii
if 57 - 57: I1Ii111
if 23 - 23: I1ii11iIi11i + II111iiii
if 99 - 99: o0oOOo0O0Ooo . I1IiiI + o0oOOo0O0Ooo * o0oOOo0O0Ooo / O0
if 27 - 27: OOooOOo - I1Ii111
if 33 - 33: OOooOOo - Ii1I - iII111i + I1ii11iIi11i - i11iIiiIii
ooO0OooOoOoO = Ii1 if ( Ii1 . afi == ecm_source . afi ) else ecm_source
if 37 - 37: ooOoO0o
Iiii1IIIiIi = lisp_site_eid_lookup ( i1OO0o , O0oo0oo0 , False )
if 33 - 33: ooOoO0o . I1Ii111 + I1IiiI . Oo0Ooo
if ( Iiii1IIIiIi == None or Iiii1IIIiIi . is_star_g ( ) ) :
iIiiiiiiI1II = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( iIiiiiiiI1II ,
green ( oOoo0OooOOo00 , False ) ) )
if 72 - 72: OOooOOo * OOooOOo
if 5 - 5: o0oOOo0O0Ooo / i11iIiiIii
if 5 - 5: oO0o % iII111i . Oo0Ooo . O0 . OoOoOO00 / iII111i
if 78 - 78: Ii1I - I1ii11iIi11i + iIii1I11I1II1 + OoooooooOO . OoO0O00 - ooOoO0o
lisp_send_negative_map_reply ( lisp_sockets , i1OO0o , O0oo0oo0 , oOo0 , Ii1 ,
mr_sport , 15 , i11IIii , Ooooo00oOO0Oo )
if 81 - 81: o0oOOo0O0Ooo * OoooooooOO
return ( [ i1OO0o , O0oo0oo0 , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 32 - 32: OoOoOO00 - I11i * i11iIiiIii . I1ii11iIi11i . IiII . iIii1I11I1II1
if 41 - 41: iII111i / OoOoOO00 / OoO0O00 / ooOoO0o
iiI11IIii1i1 = Iiii1IIIiIi . print_eid_tuple ( )
iiIII1 = Iiii1IIIiIi . site . site_name
if 18 - 18: OoO0O00 . Oo0Ooo
if 52 - 52: OoOoOO00 . iIii1I11I1II1 / OoOoOO00
if 14 - 14: i1IIi
if 63 - 63: OoOoOO00 . i11iIiiIii / IiII
if 36 - 36: OOooOOo * OoOoOO00 + i11iIiiIii + O0 + O0
if ( OO0OOOO0Oo == False and Iiii1IIIiIi . require_signature ) :
IiiiI1I1i = map_request . map_request_signature
O0O0o0OOOooo0 = map_request . signature_eid
if ( IiiiI1I1i == None or O0O0o0OOOooo0 . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( iiIII1 ) )
I1111I = False
else :
O0O0o0OOOooo0 = map_request . signature_eid
iIIiI11I1 , OO000OO , I1111I = lisp_lookup_public_key ( O0O0o0OOOooo0 )
if ( I1111I ) :
I1111I = map_request . verify_map_request_sig ( OO000OO )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( O0O0o0OOOooo0 . print_address ( ) , iIIiI11I1 . print_address ( ) ) )
if 18 - 18: Oo0Ooo . I1ii11iIi11i * ooOoO0o % Ii1I + I1ii11iIi11i
if 23 - 23: oO0o / o0oOOo0O0Ooo + I11i % IiII * OoO0O00
oooOoOoo0o = bold ( "passed" , False ) if I1111I else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( oooOoOoo0o ) )
if 48 - 48: OoO0O00
if 30 - 30: iIii1I11I1II1
if 53 - 53: II111iiii
if 40 - 40: Ii1I % oO0o
if 69 - 69: iIii1I11I1II1 - O0 . I1Ii111 % I1IiiI / o0oOOo0O0Ooo
if 78 - 78: oO0o
if ( I1111I and Iiii1IIIiIi . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( iiIII1 , green ( iiI11IIii1i1 , False ) , green ( oOoo0OooOOo00 , False ) ) )
if 20 - 20: i1IIi + i1IIi * i1IIi
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
if 27 - 27: oO0o + Ii1I . i11iIiiIii
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
if 48 - 48: o0oOOo0O0Ooo * iIii1I11I1II1 + Oo0Ooo
if ( Iiii1IIIiIi . accept_more_specifics == False ) :
i1OO0o = Iiii1IIIiIi . eid
O0oo0oo0 = Iiii1IIIiIi . group
if 45 - 45: oO0o
if 50 - 50: Ii1I * Ii1I / O0 . Oo0Ooo + iII111i
if 9 - 9: OoooooooOO % O0 % I1ii11iIi11i
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
iiI = 1
if ( Iiii1IIIiIi . force_ttl != None ) :
iiI = Iiii1IIIiIi . force_ttl | 0x80000000
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
if 82 - 82: ooOoO0o % OOooOOo % Ii1I
if 82 - 82: I1ii11iIi11i
if 52 - 52: i11iIiiIii % I1Ii111 - iII111i / O0 - I1ii11iIi11i / iII111i
lisp_send_negative_map_reply ( lisp_sockets , i1OO0o , O0oo0oo0 , oOo0 , Ii1 ,
mr_sport , iiI , i11IIii , Ooooo00oOO0Oo )
if 7 - 7: OoooooooOO . OOooOOo . OOooOOo
return ( [ i1OO0o , O0oo0oo0 , LISP_DDT_ACTION_MS_NOT_REG ] )
if 53 - 53: OOooOOo * OoOoOO00 % iII111i
if 86 - 86: OOooOOo . OOooOOo + IiII - I1ii11iIi11i . OoO0O00
if 66 - 66: I1IiiI * OoOoOO00 . I1IiiI / Oo0Ooo - Ii1I
if 69 - 69: iIii1I11I1II1 % iII111i + ooOoO0o * i1IIi + iII111i * I1Ii111
if 67 - 67: Ii1I % Oo0Ooo - Oo0Ooo . I11i + IiII
oOooo0o = False
III1111 = ""
oOOO0ooo = False
if ( Iiii1IIIiIi . force_nat_proxy_reply ) :
III1111 = ", nat-forced"
oOooo0o = True
oOOO0ooo = True
elif ( Iiii1IIIiIi . force_proxy_reply ) :
III1111 = ", forced"
oOOO0ooo = True
elif ( Iiii1IIIiIi . proxy_reply_requested ) :
III1111 = ", requested"
oOOO0ooo = True
elif ( map_request . pitr_bit and Iiii1IIIiIi . pitr_proxy_reply_drop ) :
III1111 = ", drop-to-pitr"
ooOOoo0 = LISP_DROP_ACTION
elif ( Iiii1IIIiIi . proxy_reply_action != "" ) :
ooOOoo0 = Iiii1IIIiIi . proxy_reply_action
III1111 = ", forced, action {}" . format ( ooOOoo0 )
ooOOoo0 = LISP_DROP_ACTION if ( ooOOoo0 == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 9 - 9: IiII % OoO0O00
if 58 - 58: iII111i
if 12 - 12: OoO0O00
if 59 - 59: OOooOOo + i1IIi
if 8 - 8: i1IIi + Oo0Ooo / Ii1I . OoOoOO00 % i1IIi
if 33 - 33: OoooooooOO + iIii1I11I1II1
if 68 - 68: II111iiii * iIii1I11I1II1 - OoO0O00 - I1ii11iIi11i * II111iiii
iiiIiIII = False
Oo00O0Oo = None
if ( oOOO0ooo and lisp_policies . has_key ( Iiii1IIIiIi . policy ) ) :
OoOoO = lisp_policies [ Iiii1IIIiIi . policy ]
if ( OoOoO . match_policy_map_request ( map_request , mr_source ) ) : Oo00O0Oo = OoOoO
if 97 - 97: OoOoOO00 + O0 * O0 / I1IiiI . I1IiiI
if ( Oo00O0Oo ) :
oo0OooO = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( oo0OooO ,
OoOoO . policy_name , OoOoO . set_action ) )
else :
oo0OooO = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( oo0OooO ,
OoOoO . policy_name ) )
iiiIiIII = True
if 32 - 32: OOooOOo - I1IiiI . Oo0Ooo
if 86 - 86: ooOoO0o
if 16 - 16: I11i - I1ii11iIi11i / OoOoOO00 * Oo0Ooo
if ( III1111 != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( oOoo0OooOOo00 , False ) , iiIII1 , green ( iiI11IIii1i1 , False ) ,
# II111iiii * iIii1I11I1II1 / o0oOOo0O0Ooo
III1111 ) )
if 89 - 89: iII111i * I1IiiI - Ii1I + I1Ii111 / oO0o
oooo0O = Iiii1IIIiIi . registered_rlocs
iiI = 1440
if ( oOooo0o ) :
if ( Iiii1IIIiIi . site_id != 0 ) :
I1i1III1i = map_request . source_eid
oooo0O = lisp_get_private_rloc_set ( Iiii1IIIiIi , I1i1III1i , O0oo0oo0 )
if 93 - 93: IiII
if ( oooo0O == Iiii1IIIiIi . registered_rlocs ) :
OOO0Ooo0OoO0 = ( Iiii1IIIiIi . group . is_null ( ) == False )
oooOooO0 = lisp_get_partial_rloc_set ( oooo0O , ooO0OooOoOoO , OOO0Ooo0OoO0 )
if ( oooOooO0 != oooo0O ) :
iiI = 15
oooo0O = oooOooO0
if 22 - 22: oO0o - iIii1I11I1II1
if 33 - 33: II111iiii * O0 + O0
if 98 - 98: IiII * OoooooooOO . iII111i
if 34 - 34: OoooooooOO + I1Ii111
if 97 - 97: II111iiii + I11i + OOooOOo / i11iIiiIii - iII111i
if 9 - 9: i1IIi - I1Ii111 + I1Ii111
if 81 - 81: II111iiii % I11i % O0 . I1Ii111 % ooOoO0o - O0
if 58 - 58: OoooooooOO . II111iiii . O0 % I1Ii111 / OoooooooOO
if ( Iiii1IIIiIi . force_ttl != None ) :
iiI = Iiii1IIIiIi . force_ttl | 0x80000000
if 64 - 64: Oo0Ooo + oO0o . OoO0O00
if 67 - 67: I11i
if 91 - 91: OOooOOo / OoO0O00
if 36 - 36: I1IiiI . iII111i * I1Ii111 . IiII % I1ii11iIi11i
if 44 - 44: I11i % I1ii11iIi11i - OoooooooOO % iII111i
if 60 - 60: IiII % oO0o
if ( Oo00O0Oo ) :
if ( Oo00O0Oo . set_record_ttl ) :
iiI = Oo00O0Oo . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( iiI ) )
if 11 - 11: I1Ii111 - II111iiii
if ( Oo00O0Oo . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
ooOOoo0 = LISP_POLICY_DENIED_ACTION
oooo0O = [ ]
else :
OoOOo = Oo00O0Oo . set_policy_map_reply ( )
if ( OoOOo ) : oooo0O = [ OoOOo ]
if 12 - 12: i11iIiiIii
if 9 - 9: OOooOOo * I1ii11iIi11i + iIii1I11I1II1 / OoO0O00 * OoooooooOO
if 91 - 91: i11iIiiIii % IiII + oO0o . I1IiiI - I1IiiI
if ( iiiIiIII ) :
lprint ( "Implied drop action, send negative Map-Reply" )
ooOOoo0 = LISP_POLICY_DENIED_ACTION
oooo0O = [ ]
if 62 - 62: Oo0Ooo * II111iiii + o0oOOo0O0Ooo . OoOoOO00
if 94 - 94: Oo0Ooo / I1IiiI * iIii1I11I1II1 - OoO0O00
Ii11Ii11III = Iiii1IIIiIi . echo_nonce_capable
if 96 - 96: ooOoO0o - OoooooooOO * iIii1I11I1II1 . IiII - O0
if 7 - 7: iIii1I11I1II1 . OoO0O00
if 88 - 88: i1IIi * II111iiii / i11iIiiIii % IiII . IiII
if 93 - 93: OoOoOO00 * i1IIi . Ii1I
if ( I1111I ) :
i11i = Iiii1IIIiIi . eid
i1Ii1I1IIII = Iiii1IIIiIi . group
else :
i11i = i1OO0o
i1Ii1I1IIII = O0oo0oo0
ooOOoo0 = LISP_AUTH_FAILURE_ACTION
oooo0O = [ ]
if 54 - 54: OoO0O00 * OoOoOO00 + o0oOOo0O0Ooo . IiII
if 87 - 87: i11iIiiIii . OoooooooOO - II111iiii
if 69 - 69: iII111i
if 70 - 70: O0 + iII111i % I11i % I1Ii111 + OoOoOO00 / ooOoO0o
if 35 - 35: IiII + OoO0O00
if 82 - 82: i1IIi - ooOoO0o / I11i + I11i % I1IiiI - OoooooooOO
packet = lisp_build_map_reply ( i11i , i1Ii1I1IIII , oooo0O ,
oOo0 , ooOOoo0 , iiI , False , None , Ii11Ii11III , False )
if 56 - 56: I1ii11iIi11i
if ( Ooooo00oOO0Oo ) :
lisp_process_pubsub ( lisp_sockets , packet , i11i , Ii1 ,
mr_sport , oOo0 , iiI , i11IIii )
else :
lisp_send_map_reply ( lisp_sockets , packet , Ii1 , mr_sport )
if 80 - 80: Oo0Ooo / OOooOOo / iII111i . o0oOOo0O0Ooo
if 43 - 43: IiII
return ( [ Iiii1IIIiIi . eid , Iiii1IIIiIi . group , LISP_DDT_ACTION_MS_ACK ] )
if 74 - 74: OoooooooOO
if 88 - 88: Ii1I * o0oOOo0O0Ooo / oO0o
if 58 - 58: O0
if 43 - 43: O0 / i1IIi / I11i % I1IiiI
if 82 - 82: i11iIiiIii * i11iIiiIii + I1Ii111 - I1ii11iIi11i * oO0o - Ii1I
iIII1I = len ( Iiii1IIIiIi . registered_rlocs )
if ( iIII1I == 0 ) :
lprint ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" . format ( green ( oOoo0OooOOo00 , False ) , iiIII1 ,
# OoOoOO00 % I11i - OoO0O00
green ( iiI11IIii1i1 , False ) ) )
return ( [ Iiii1IIIiIi . eid , Iiii1IIIiIi . group , LISP_DDT_ACTION_MS_ACK ] )
if 77 - 77: iII111i * I1Ii111
if 36 - 36: I1ii11iIi11i % II111iiii % I1Ii111 / I1ii11iIi11i
if 34 - 34: OoooooooOO * i11iIiiIii
if 33 - 33: II111iiii
if 59 - 59: iIii1I11I1II1 % I11i
oOO000OOO = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 61 - 61: i11iIiiIii + i1IIi . I1Ii111 * II111iiii . I11i + iII111i
IiiiI1I1iI11 = map_request . target_eid . hash_address ( oOO000OOO )
IiiiI1I1iI11 %= iIII1I
oo000oOOooo0O = Iiii1IIIiIi . registered_rlocs [ IiiiI1I1iI11 ]
if 63 - 63: Oo0Ooo % OOooOOo * IiII % iIii1I11I1II1 / iII111i
if ( oo000oOOooo0O . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( oOoo0OooOOo00 , False ) ,
# OoooooooOO
iiIII1 , green ( iiI11IIii1i1 , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( oOoo0OooOOo00 , False ) ,
# OoO0O00 * OoOoOO00 % O0 % iII111i / i1IIi
red ( oo000oOOooo0O . rloc . print_address ( ) , False ) , iiIII1 ,
green ( iiI11IIii1i1 , False ) ) )
if 100 - 100: i11iIiiIii
if 54 - 54: O0 * Ii1I + Ii1I
if 59 - 59: i11iIiiIii % iII111i
if 54 - 54: I11i . ooOoO0o / OOooOOo % I1Ii111
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , oo000oOOooo0O . rloc , to_etr = True )
if 13 - 13: I11i / O0 . o0oOOo0O0Ooo . ooOoO0o
return ( [ Iiii1IIIiIi . eid , Iiii1IIIiIi . group , LISP_DDT_ACTION_MS_ACK ] )
if 7 - 7: OoO0O00 + OoooooooOO % II111iiii % oO0o
if 48 - 48: OOooOOo . II111iiii * OOooOOo - I11i / iIii1I11I1II1 / i11iIiiIii
if 37 - 37: II111iiii % O0 + iIii1I11I1II1 - I1IiiI . I11i + I1ii11iIi11i
if 14 - 14: ooOoO0o % iIii1I11I1II1 % ooOoO0o / IiII + OOooOOo
if 14 - 14: Oo0Ooo
if 79 - 79: I1ii11iIi11i % I1Ii111 % I11i - iII111i * OoOoOO00
if 48 - 48: O0 + OoOoOO00 - O0
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 79 - 79: ooOoO0o . OoOoOO00 / OoooooooOO - II111iiii
if 48 - 48: Oo0Ooo
if 59 - 59: OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: iII111i % iIii1I11I1II1 / OOooOOo - OoOoOO00
i1OO0o = map_request . target_eid
O0oo0oo0 = map_request . target_group
oOoo0OooOOo00 = lisp_print_eid_tuple ( i1OO0o , O0oo0oo0 )
oOo0 = map_request . nonce
ooOOoo0 = LISP_DDT_ACTION_NULL
if 98 - 98: I11i % oO0o . I1IiiI % OoOoOO00
if 32 - 32: I1ii11iIi11i / Ii1I
if 54 - 54: I11i - i11iIiiIii
if 91 - 91: Ii1I - OoO0O00 - I1IiiI % OoO0O00 . o0oOOo0O0Ooo
if 85 - 85: ooOoO0o . ooOoO0o % Oo0Ooo . OOooOOo + OOooOOo / I1IiiI
oooo0o = None
if ( lisp_i_am_ms ) :
Iiii1IIIiIi = lisp_site_eid_lookup ( i1OO0o , O0oo0oo0 , False )
if ( Iiii1IIIiIi == None ) : return
if 76 - 76: OOooOOo % OOooOOo + o0oOOo0O0Ooo - I1ii11iIi11i * oO0o * IiII
if ( Iiii1IIIiIi . registered ) :
ooOOoo0 = LISP_DDT_ACTION_MS_ACK
iiI = 1440
else :
i1OO0o , O0oo0oo0 , ooOOoo0 = lisp_ms_compute_neg_prefix ( i1OO0o , O0oo0oo0 )
ooOOoo0 = LISP_DDT_ACTION_MS_NOT_REG
iiI = 1
if 14 - 14: I1Ii111 . OoOoOO00 % OOooOOo . i11iIiiIii * I1IiiI % Ii1I
else :
oooo0o = lisp_ddt_cache_lookup ( i1OO0o , O0oo0oo0 , False )
if ( oooo0o == None ) :
ooOOoo0 = LISP_DDT_ACTION_NOT_AUTH
iiI = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( oOoo0OooOOo00 , False ) ) )
if 7 - 7: OoooooooOO
elif ( oooo0o . is_auth_prefix ( ) ) :
if 41 - 41: OoOoOO00 + IiII % I1Ii111 / OOooOOo . I1IiiI
if 43 - 43: II111iiii - ooOoO0o / iIii1I11I1II1
if 30 - 30: O0 * o0oOOo0O0Ooo / iIii1I11I1II1 + iIii1I11I1II1 . OoOoOO00
if 78 - 78: OoOoOO00 . i11iIiiIii
ooOOoo0 = LISP_DDT_ACTION_DELEGATION_HOLE
iiI = 15
iiiiIII11Ii = oooo0o . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( iiiiIII11Ii ,
# Ii1I % O0 - i1IIi % iII111i * OoO0O00
green ( oOoo0OooOOo00 , False ) ) )
if 60 - 60: I1ii11iIi11i * iII111i / OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
if ( O0oo0oo0 . is_null ( ) ) :
i1OO0o = lisp_ddt_compute_neg_prefix ( i1OO0o , oooo0o ,
lisp_ddt_cache )
else :
O0oo0oo0 = lisp_ddt_compute_neg_prefix ( O0oo0oo0 , oooo0o ,
lisp_ddt_cache )
i1OO0o = lisp_ddt_compute_neg_prefix ( i1OO0o , oooo0o ,
oooo0o . source_cache )
if 94 - 94: OoO0O00 . ooOoO0o
oooo0o = None
else :
iiiiIII11Ii = oooo0o . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( iiiiIII11Ii , green ( oOoo0OooOOo00 , False ) ) )
if 25 - 25: I1Ii111 % OOooOOo
iiI = 1440
if 82 - 82: Ii1I
if 17 - 17: iII111i . i1IIi . i1IIi
if 76 - 76: OoooooooOO % IiII
if 81 - 81: iII111i . OOooOOo * i1IIi
if 14 - 14: oO0o
if 16 - 16: iII111i
i1II1IiiIi = lisp_build_map_referral ( i1OO0o , O0oo0oo0 , oooo0o , ooOOoo0 , iiI , oOo0 )
oOo0 = map_request . nonce >> 32
if ( map_request . nonce != 0 and oOo0 != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , i1II1IiiIi , ecm_source , port )
return
if 26 - 26: iII111i . oO0o * i11iIiiIii . iIii1I11I1II1
if 74 - 74: Ii1I / iIii1I11I1II1 + OOooOOo . II111iiii
if 65 - 65: OOooOOo * I11i * Oo0Ooo
if 21 - 21: Ii1I . iIii1I11I1II1
if 84 - 84: OOooOOo
if 67 - 67: I1IiiI % OoO0O00 % o0oOOo0O0Ooo % IiII
if 33 - 33: ooOoO0o % I1IiiI
if 98 - 98: oO0o . o0oOOo0O0Ooo + II111iiii
if 62 - 62: ooOoO0o - OoooooooOO / I1ii11iIi11i / iII111i - o0oOOo0O0Ooo
if 70 - 70: oO0o % OoooooooOO * I1IiiI - OoOoOO00 * OoOoOO00 . OOooOOo
if 9 - 9: iII111i * Oo0Ooo % iII111i % Oo0Ooo * II111iiii
if 71 - 71: II111iiii + I1ii11iIi11i * II111iiii
if 59 - 59: OoO0O00
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
oOOOo0O0 = eid . hash_address ( entry_prefix )
o00O00OOO = eid . addr_length ( ) * 8
ooooOo00OO0o = 0
if 14 - 14: iII111i
if 3 - 3: Oo0Ooo
if 81 - 81: OoO0O00 / OoO0O00 . I1ii11iIi11i
if 100 - 100: iIii1I11I1II1 % II111iiii - I1ii11iIi11i . iIii1I11I1II1 + IiII % iIii1I11I1II1
for ooooOo00OO0o in range ( o00O00OOO ) :
i1iIIi11Ii = 1 << ( o00O00OOO - ooooOo00OO0o - 1 )
if ( oOOOo0O0 & i1iIIi11Ii ) : break
if 64 - 64: I1Ii111 - OoOoOO00 * OoooooooOO - I1Ii111
if 43 - 43: I1Ii111 + I11i - Ii1I + I11i - Oo0Ooo
if ( ooooOo00OO0o > neg_prefix . mask_len ) : neg_prefix . mask_len = ooooOo00OO0o
return
if 63 - 63: IiII % I11i / OoOoOO00 % OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
def lisp_neg_prefix_walk ( entry , parms ) :
i1OO0o , I1I1I , OO0o0OoooOOoO = parms
if 4 - 4: O0 * iII111i - iII111i + iIii1I11I1II1 * iIii1I11I1II1
if ( I1I1I == None ) :
if ( entry . eid . instance_id != i1OO0o . instance_id ) :
return ( [ True , parms ] )
if 48 - 48: I1Ii111 * I11i
if ( entry . eid . afi != i1OO0o . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( I1I1I ) == False ) :
return ( [ True , parms ] )
if 52 - 52: ooOoO0o
if 16 - 16: ooOoO0o % iII111i - o0oOOo0O0Ooo % I11i + i11iIiiIii
if 6 - 6: i11iIiiIii
if 66 - 66: I1Ii111 * I1ii11iIi11i . Ii1I
if 28 - 28: oO0o - I1IiiI
if 42 - 42: i1IIi
lisp_find_negative_mask_len ( i1OO0o , entry . eid , OO0o0OoooOOoO )
return ( [ True , parms ] )
if 8 - 8: Ii1I - oO0o
if 73 - 73: Oo0Ooo . i11iIiiIii % i11iIiiIii / o0oOOo0O0Ooo * OoO0O00 . i11iIiiIii
if 61 - 61: i11iIiiIii + I11i * i1IIi . OoO0O00 . OoO0O00 - oO0o
if 52 - 52: OOooOOo / ooOoO0o + I1ii11iIi11i - I1IiiI . II111iiii
if 83 - 83: Oo0Ooo * OOooOOo - iIii1I11I1II1
if 18 - 18: o0oOOo0O0Ooo + Ii1I . iIii1I11I1II1
if 31 - 31: I1ii11iIi11i / I1IiiI % ooOoO0o . OoO0O00 / IiII . II111iiii
if 20 - 20: IiII * I1Ii111
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 11 - 11: I11i * OoO0O00 * OoO0O00 * I1ii11iIi11i * IiII
if 42 - 42: I1Ii111 * I1Ii111 * OoO0O00 - oO0o
if 96 - 96: Oo0Ooo
if 82 - 82: ooOoO0o - O0 / OoO0O00
if ( eid . is_binary ( ) == False ) : return ( eid )
if 24 - 24: IiII - OoOoOO00 / OoooooooOO . I1ii11iIi11i
OO0o0OoooOOoO = lisp_address ( eid . afi , "" , 0 , 0 )
OO0o0OoooOOoO . copy_address ( eid )
OO0o0OoooOOoO . mask_len = 0
if 88 - 88: I11i
Ii111IiI = ddt_entry . print_eid_tuple ( )
I1I1I = ddt_entry . eid
if 30 - 30: Oo0Ooo - o0oOOo0O0Ooo . OoO0O00
if 40 - 40: IiII * iIii1I11I1II1 * ooOoO0o . Ii1I
if 96 - 96: OoooooooOO * ooOoO0o * iIii1I11I1II1 % IiII + ooOoO0o
if 99 - 99: i1IIi
if 3 - 3: o0oOOo0O0Ooo + Ii1I - i1IIi . OoooooooOO % Ii1I
eid , I1I1I , OO0o0OoooOOoO = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , I1I1I , OO0o0OoooOOoO ) )
if 39 - 39: o0oOOo0O0Ooo
if 73 - 73: IiII
if 92 - 92: OOooOOo / ooOoO0o . I1Ii111 . iII111i / ooOoO0o
if 83 - 83: iIii1I11I1II1 - OoO0O00 - I1Ii111
OO0o0OoooOOoO . mask_address ( OO0o0OoooOOoO . mask_len )
if 27 - 27: IiII - iII111i * i11iIiiIii % i11iIiiIii + OoOoOO00 . I1Ii111
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# II111iiii
Ii111IiI , OO0o0OoooOOoO . print_prefix ( ) ) )
return ( OO0o0OoooOOoO )
if 24 - 24: O0 . I1ii11iIi11i / OOooOOo % IiII * Oo0Ooo / OoO0O00
if 67 - 67: Oo0Ooo * I11i - IiII + I1Ii111
if 90 - 90: iII111i % II111iiii % o0oOOo0O0Ooo + o0oOOo0O0Ooo + II111iiii
if 54 - 54: OoooooooOO . IiII - oO0o
if 26 - 26: o0oOOo0O0Ooo - i1IIi / I1ii11iIi11i / OoooooooOO . i1IIi
if 22 - 22: o0oOOo0O0Ooo * I1Ii111 * I1ii11iIi11i . OoOoOO00 . i1IIi % ooOoO0o
if 67 - 67: I11i
if 95 - 95: OoO0O00 % I1Ii111
def lisp_ms_compute_neg_prefix ( eid , group ) :
OO0o0OoooOOoO = lisp_address ( eid . afi , "" , 0 , 0 )
OO0o0OoooOOoO . copy_address ( eid )
OO0o0OoooOOoO . mask_len = 0
ii1I1IiIIii = lisp_address ( group . afi , "" , 0 , 0 )
ii1I1IiIIii . copy_address ( group )
ii1I1IiIIii . mask_len = 0
I1I1I = None
if 10 - 10: Oo0Ooo % OoOoOO00 - OOooOOo % iII111i + I1Ii111
if 82 - 82: IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
if ( group . is_null ( ) ) :
oooo0o = lisp_ddt_cache . lookup_cache ( eid , False )
if ( oooo0o == None ) :
OO0o0OoooOOoO . mask_len = OO0o0OoooOOoO . host_mask_len ( )
ii1I1IiIIii . mask_len = ii1I1IiIIii . host_mask_len ( )
return ( [ OO0o0OoooOOoO , ii1I1IiIIii , LISP_DDT_ACTION_NOT_AUTH ] )
if 79 - 79: iII111i . iIii1I11I1II1
Iii1 = lisp_sites_by_eid
if ( oooo0o . is_auth_prefix ( ) ) : I1I1I = oooo0o . eid
else :
oooo0o = lisp_ddt_cache . lookup_cache ( group , False )
if ( oooo0o == None ) :
OO0o0OoooOOoO . mask_len = OO0o0OoooOOoO . host_mask_len ( )
ii1I1IiIIii . mask_len = ii1I1IiIIii . host_mask_len ( )
return ( [ OO0o0OoooOOoO , ii1I1IiIIii , LISP_DDT_ACTION_NOT_AUTH ] )
if 26 - 26: O0
if ( oooo0o . is_auth_prefix ( ) ) : I1I1I = oooo0o . group
if 70 - 70: i1IIi % IiII % iIii1I11I1II1 . II111iiii * Oo0Ooo . o0oOOo0O0Ooo
group , I1I1I , ii1I1IiIIii = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , I1I1I , ii1I1IiIIii ) )
if 33 - 33: iIii1I11I1II1 / OoooooooOO / I1IiiI + II111iiii
if 42 - 42: OoOoOO00 / i1IIi * O0
ii1I1IiIIii . mask_address ( ii1I1IiIIii . mask_len )
if 46 - 46: OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , I1I1I . print_prefix ( ) if ( I1I1I != None ) else "'not found'" ,
# OoO0O00 * OOooOOo * iII111i / I1ii11iIi11i % I11i % OoO0O00
# o0oOOo0O0Ooo + iIii1I11I1II1
# i11iIiiIii + oO0o . iIii1I11I1II1 - I11i % IiII . I1Ii111
ii1I1IiIIii . print_prefix ( ) ) )
if 31 - 31: OoooooooOO % iII111i / OOooOOo
Iii1 = oooo0o . source_cache
if 54 - 54: o0oOOo0O0Ooo
if 37 - 37: ooOoO0o
if 46 - 46: iII111i - i11iIiiIii * iII111i
if 1 - 1: iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
ooOOoo0 = LISP_DDT_ACTION_DELEGATION_HOLE if ( I1I1I != None ) else LISP_DDT_ACTION_NOT_AUTH
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
if 42 - 42: Oo0Ooo . OoO0O00
if 22 - 22: ooOoO0o - o0oOOo0O0Ooo + I11i / I1IiiI + OOooOOo
if 10 - 10: oO0o / I1IiiI
if 95 - 95: II111iiii - IiII % IiII . o0oOOo0O0Ooo
if 19 - 19: II111iiii . ooOoO0o . I11i - OoooooooOO / I1ii11iIi11i . I1Ii111
eid , I1I1I , OO0o0OoooOOoO = Iii1 . walk_cache ( lisp_neg_prefix_walk ,
( eid , I1I1I , OO0o0OoooOOoO ) )
if 57 - 57: II111iiii . I1Ii111 . i11iIiiIii / OoOoOO00 - O0
if 56 - 56: OOooOOo / I1Ii111
if 13 - 13: oO0o + Oo0Ooo + Oo0Ooo / OoO0O00 + i1IIi + I1IiiI
if 56 - 56: OoOoOO00
OO0o0OoooOOoO . mask_address ( OO0o0OoooOOoO . mask_len )
if 10 - 10: iIii1I11I1II1 + i1IIi * Ii1I / iIii1I11I1II1 % OoOoOO00 / O0
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# O0
# oO0o
I1I1I . print_prefix ( ) if ( I1I1I != None ) else "'not found'" , OO0o0OoooOOoO . print_prefix ( ) ) )
if 21 - 21: oO0o * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo * IiII - o0oOOo0O0Ooo
return ( [ OO0o0OoooOOoO , ii1I1IiIIii , ooOOoo0 ] )
if 90 - 90: i1IIi + I1ii11iIi11i * oO0o % i11iIiiIii - OoO0O00
if 12 - 12: OoO0O00 . I1ii11iIi11i - I1IiiI % OOooOOo
if 9 - 9: Ii1I / O0
if 95 - 95: iII111i / I11i
if 86 - 86: O0 / II111iiii . Oo0Ooo / Oo0Ooo * II111iiii
if 22 - 22: Ii1I
if 81 - 81: iIii1I11I1II1 . ooOoO0o % I11i
if 64 - 64: I1Ii111 . Oo0Ooo * o0oOOo0O0Ooo
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 32 - 32: oO0o . I1Ii111 * I1Ii111
i1OO0o = map_request . target_eid
O0oo0oo0 = map_request . target_group
oOo0 = map_request . nonce
if 32 - 32: I1Ii111 . Ii1I / i1IIi
if ( action == LISP_DDT_ACTION_MS_ACK ) : iiI = 1440
if 2 - 2: OOooOOo * ooOoO0o / I11i + OoO0O00
if 96 - 96: II111iiii * OoO0O00 + I1ii11iIi11i + OoOoOO00 / II111iiii . iII111i
if 64 - 64: iII111i % Oo0Ooo
if 79 - 79: IiII + iII111i / II111iiii . i1IIi + iIii1I11I1II1
O0oO0o = lisp_map_referral ( )
O0oO0o . record_count = 1
O0oO0o . nonce = oOo0
i1II1IiiIi = O0oO0o . encode ( )
O0oO0o . print_map_referral ( )
if 32 - 32: Ii1I * iII111i
O0oOo00O = False
if 52 - 52: I11i
if 100 - 100: Oo0Ooo % Oo0Ooo % I1ii11iIi11i
if 33 - 33: I1Ii111 . I1Ii111 * i1IIi
if 22 - 22: I1ii11iIi11i . II111iiii + iIii1I11I1II1 / OoooooooOO . ooOoO0o
if 13 - 13: II111iiii
if 36 - 36: iII111i - oO0o / Oo0Ooo / O0 . OoO0O00 . i1IIi
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( i1OO0o ,
O0oo0oo0 )
iiI = 15
if 19 - 19: O0 . OoooooooOO % iIii1I11I1II1 - Ii1I . Ii1I + I1IiiI
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : iiI = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : iiI = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : iiI = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : iiI = 0
if 98 - 98: oO0o . Oo0Ooo
I1I1Ii1111 = False
iIII1I = 0
oooo0o = lisp_ddt_cache_lookup ( i1OO0o , O0oo0oo0 , False )
if ( oooo0o != None ) :
iIII1I = len ( oooo0o . delegation_set )
I1I1Ii1111 = oooo0o . is_ms_peer_entry ( )
oooo0o . map_referrals_sent += 1
if 20 - 20: o0oOOo0O0Ooo
if 54 - 54: II111iiii * OoOoOO00
if 46 - 46: ooOoO0o . I1IiiI - ooOoO0o + Oo0Ooo
if 31 - 31: OOooOOo + ooOoO0o . i1IIi - OoO0O00
if 16 - 16: I11i + I1IiiI - Ii1I / I1ii11iIi11i + Ii1I
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : O0oOo00O = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
O0oOo00O = ( I1I1Ii1111 == False )
if 38 - 38: i1IIi * iIii1I11I1II1 * iII111i + OoOoOO00
if 64 - 64: OoO0O00 % o0oOOo0O0Ooo
if 72 - 72: O0 + OoOoOO00 % OOooOOo / oO0o / IiII
if 98 - 98: Oo0Ooo . II111iiii * I11i
if 39 - 39: IiII * o0oOOo0O0Ooo + Ii1I - I11i
OoOO = lisp_eid_record ( )
OoOO . rloc_count = iIII1I
OoOO . authoritative = True
OoOO . action = action
OoOO . ddt_incomplete = O0oOo00O
OoOO . eid = eid_prefix
OoOO . group = group_prefix
OoOO . record_ttl = iiI
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
i1II1IiiIi += OoOO . encode ( )
OoOO . print_record ( " " , True )
if 91 - 91: OoO0O00 - OoO0O00 % O0
if 67 - 67: ooOoO0o * i1IIi
if 66 - 66: o0oOOo0O0Ooo - I1ii11iIi11i . OoOoOO00 / iII111i - Ii1I - i1IIi
if 97 - 97: oO0o % iII111i - OOooOOo . OoooooooOO
if ( iIII1I != 0 ) :
for o0i1II1iI in oooo0o . delegation_set :
iI11iII1IiiI = lisp_rloc_record ( )
iI11iII1IiiI . rloc = o0i1II1iI . delegate_address
iI11iII1IiiI . priority = o0i1II1iI . priority
iI11iII1IiiI . weight = o0i1II1iI . weight
iI11iII1IiiI . mpriority = 255
iI11iII1IiiI . mweight = 0
iI11iII1IiiI . reach_bit = True
i1II1IiiIi += iI11iII1IiiI . encode ( )
iI11iII1IiiI . print_record ( " " )
if 94 - 94: Oo0Ooo
if 10 - 10: i11iIiiIii / I1ii11iIi11i . i1IIi + i1IIi * iII111i
if 64 - 64: II111iiii % I1ii11iIi11i . OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i
if 43 - 43: OoooooooOO * I1IiiI
if 2 - 2: OOooOOo / oO0o + I1ii11iIi11i + i11iIiiIii % iIii1I11I1II1 . I1ii11iIi11i
if 100 - 100: Oo0Ooo * ooOoO0o + Ii1I / iII111i * o0oOOo0O0Ooo
if 26 - 26: I1Ii111 * OoOoOO00
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , i1II1IiiIi , ecm_source , port )
return
if 38 - 38: II111iiii
if 50 - 50: OoOoOO00 . IiII - OOooOOo
if 46 - 46: iIii1I11I1II1
if 97 - 97: O0 * OOooOOo - o0oOOo0O0Ooo % o0oOOo0O0Ooo * II111iiii % I11i
if 65 - 65: iIii1I11I1II1 / OOooOOo
if 2 - 2: I11i - OOooOOo / o0oOOo0O0Ooo
if 14 - 14: I11i + Oo0Ooo + i11iIiiIii - i1IIi . O0
if 47 - 47: o0oOOo0O0Ooo / i1IIi * IiII
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 50 - 50: I11i
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# OoO0O00
red ( dest . print_address ( ) , False ) ) )
if 12 - 12: OoOoOO00 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI . I1IiiI
ooOOoo0 = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 51 - 51: OoO0O00 % i11iIiiIii / oO0o / OoOoOO00 / I1Ii111 % i1IIi
if 86 - 86: Oo0Ooo % OoooooooOO
if 61 - 61: OOooOOo . i11iIiiIii
if 33 - 33: o0oOOo0O0Ooo - OoooooooOO
if 30 - 30: i1IIi + II111iiii + OoOoOO00 + I1ii11iIi11i % ooOoO0o % OOooOOo
if ( lisp_get_eid_hash ( eid ) != None ) :
ooOOoo0 = LISP_SEND_MAP_REQUEST_ACTION
if 40 - 40: I1IiiI % I1IiiI - i11iIiiIii % OoOoOO00
if 17 - 17: ooOoO0o - i1IIi
i1II1IiiIi = lisp_build_map_reply ( eid , group , [ ] , nonce , ooOOoo0 , ttl , False ,
None , False , False )
if 73 - 73: iIii1I11I1II1 - I1Ii111 % Oo0Ooo . O0
if 16 - 16: OoO0O00 / Oo0Ooo / IiII . Oo0Ooo - OoooooooOO
if 5 - 5: OoOoOO00 . I11i
if 28 - 28: I11i % OOooOOo + Oo0Ooo / OoO0O00 % o0oOOo0O0Ooo + OoO0O00
if ( pubsub ) :
lisp_process_pubsub ( sockets , i1II1IiiIi , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , i1II1IiiIi , dest , port )
if 20 - 20: ooOoO0o . iII111i % OOooOOo + i11iIiiIii
return
if 64 - 64: i1IIi . o0oOOo0O0Ooo * I1Ii111 - O0
if 76 - 76: I1IiiI % Ii1I + OoO0O00 + I1ii11iIi11i * II111iiii + Oo0Ooo
if 3 - 3: Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
if 54 - 54: Ii1I - o0oOOo0O0Ooo + OoOoOO00 / OoooooooOO
if 61 - 61: I11i / IiII % OoooooooOO - i11iIiiIii * i1IIi % o0oOOo0O0Ooo
def lisp_retransmit_ddt_map_request ( mr ) :
oO0oooOOo = mr . mr_source . print_address ( )
oooo0OOo0O = mr . print_eid_tuple ( )
oOo0 = mr . nonce
if 34 - 34: i1IIi / i11iIiiIii / OoooooooOO + OoO0O00 * II111iiii / O0
if 27 - 27: Oo0Ooo . IiII / OoooooooOO * i1IIi * IiII / I1ii11iIi11i
if 19 - 19: i11iIiiIii + II111iiii
if 37 - 37: I1Ii111 . I1IiiI - II111iiii / O0 . OoOoOO00
if 27 - 27: I1ii11iIi11i / II111iiii + O0 % I1ii11iIi11i
if ( mr . last_request_sent_to ) :
ooooOoo0O = mr . last_request_sent_to . print_address ( )
OoOo = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( OoOo and OoOo . referral_set . has_key ( ooooOoo0O ) ) :
OoOo . referral_set [ ooooOoo0O ] . no_responses += 1
if 60 - 60: II111iiii + I1IiiI % oO0o - o0oOOo0O0Ooo
if 50 - 50: iIii1I11I1II1 - i11iIiiIii / iII111i + ooOoO0o / OOooOOo
if 80 - 80: IiII / OoooooooOO
if 69 - 69: OoOoOO00 + IiII
if 18 - 18: O0 / I11i
if 10 - 10: I1Ii111 * i1IIi
if 48 - 48: Oo0Ooo % i1IIi / iII111i . O0
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( oooo0OOo0O , False ) , lisp_hex_string ( oOo0 ) ) )
if 27 - 27: I11i + iIii1I11I1II1 - i11iIiiIii
mr . dequeue_map_request ( )
return
if 81 - 81: I11i + oO0o * iIii1I11I1II1 * IiII
if 7 - 7: I11i - I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
mr . retry_count += 1
if 32 - 32: ooOoO0o
IiIIi1I1I11Ii = green ( oO0oooOOo , False )
oOo0OOOOOO = green ( oooo0OOo0O , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# i11iIiiIii
red ( mr . itr . print_address ( ) , False ) , IiIIi1I1I11Ii , oOo0OOOOOO ,
lisp_hex_string ( oOo0 ) ) )
if 91 - 91: ooOoO0o / I1Ii111 . OoO0O00 - IiII * ooOoO0o
if 64 - 64: OoooooooOO
if 56 - 56: I11i / iIii1I11I1II1 - OoOoOO00 . Oo0Ooo + oO0o - ooOoO0o
if 51 - 51: O0 . O0
lisp_send_ddt_map_request ( mr , False )
if 9 - 9: Oo0Ooo . i1IIi - i1IIi + I1Ii111 * ooOoO0o . I1ii11iIi11i
if 17 - 17: I11i * I1ii11iIi11i % I1IiiI + OoO0O00 + IiII
if 90 - 90: OoooooooOO - I1IiiI / I1ii11iIi11i + oO0o - o0oOOo0O0Ooo
if 84 - 84: OoOoOO00 + O0 % Oo0Ooo
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 22 - 22: iIii1I11I1II1 % i11iIiiIii
if 29 - 29: ooOoO0o - iII111i + IiII % Ii1I - oO0o - ooOoO0o
if 43 - 43: oO0o
if 22 - 22: I1Ii111 + i11iIiiIii
if 49 - 49: O0 % II111iiii . OOooOOo + iII111i + iIii1I11I1II1 / i11iIiiIii
if 79 - 79: II111iiii + ooOoO0o - i1IIi - i1IIi + II111iiii . i1IIi
if 78 - 78: I1IiiI * I11i % OOooOOo + Ii1I + OoOoOO00
if 23 - 23: iII111i / Oo0Ooo % OoooooooOO * OoooooooOO . iII111i / I1ii11iIi11i
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 30 - 30: oO0o - OoOoOO00 . I1IiiI
if 17 - 17: OoOoOO00
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
if 57 - 57: O0
IIiii = [ ]
for ooO in referral . referral_set . values ( ) :
if ( ooO . updown == False ) : continue
if ( len ( IIiii ) == 0 or IIiii [ 0 ] . priority == ooO . priority ) :
IIiii . append ( ooO )
elif ( IIiii [ 0 ] . priority > ooO . priority ) :
IIiii = [ ]
IIiii . append ( ooO )
if 32 - 32: OOooOOo / I11i + I1Ii111 / Oo0Ooo * OoooooooOO / II111iiii
if 8 - 8: OoO0O00
if 17 - 17: iIii1I11I1II1 - Oo0Ooo
iiIIii = len ( IIiii )
if ( iiIIii == 0 ) : return ( None )
if 18 - 18: oO0o + OOooOOo % OOooOOo
IiiiI1I1iI11 = dest_eid . hash_address ( source_eid )
IiiiI1I1iI11 = IiiiI1I1iI11 % iiIIii
return ( IIiii [ IiiiI1I1iI11 ] )
if 5 - 5: ooOoO0o
if 7 - 7: IiII
if 39 - 39: iII111i / i1IIi % Oo0Ooo - o0oOOo0O0Ooo / OoO0O00 / iII111i
if 43 - 43: oO0o % O0 * I1ii11iIi11i + i11iIiiIii
if 16 - 16: i1IIi . I11i + OoO0O00 % Ii1I * IiII + I1IiiI
if 96 - 96: II111iiii + O0 - II111iiii
if 97 - 97: I1IiiI
def lisp_send_ddt_map_request ( mr , send_to_root ) :
o0o0oO = mr . lisp_sockets
oOo0 = mr . nonce
o00ooOOo0ooO0 = mr . itr
ooO0 = mr . mr_source
oOoo0OooOOo00 = mr . print_eid_tuple ( )
if 20 - 20: IiII - i1IIi
if 68 - 68: OOooOOo / I11i / i11iIiiIii . i11iIiiIii + Ii1I . i11iIiiIii
if 45 - 45: ooOoO0o / II111iiii % OoOoOO00 % I1Ii111 . I1Ii111
if 43 - 43: I11i * II111iiii
if 14 - 14: I1ii11iIi11i * OoooooooOO / OoO0O00 / OoOoOO00 / OoooooooOO
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( oOoo0OooOOo00 , False ) , lisp_hex_string ( oOo0 ) ) )
if 17 - 17: i1IIi
mr . dequeue_map_request ( )
return
if 80 - 80: i1IIi - iIii1I11I1II1 + OoooooooOO + ooOoO0o / IiII - I1ii11iIi11i
if 90 - 90: I1IiiI * ooOoO0o - I11i + O0 - I11i
if 59 - 59: OOooOOo % II111iiii
if 30 - 30: i1IIi / I1ii11iIi11i
if 4 - 4: Oo0Ooo
if 31 - 31: IiII
if ( send_to_root ) :
OOo00OO = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
Ii1I11I1IiI = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( oOoo0OooOOo00 , False ) ) )
else :
OOo00OO = mr . eid
Ii1I11I1IiI = mr . group
if 35 - 35: iIii1I11I1II1
if 51 - 51: Ii1I
if 31 - 31: OoOoOO00
if 72 - 72: II111iiii + i11iIiiIii * OoO0O00 / II111iiii / I11i
if 59 - 59: OOooOOo
IiiII1 = lisp_referral_cache_lookup ( OOo00OO , Ii1I11I1IiI , False )
if ( IiiII1 == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( o0o0oO , OOo00OO , Ii1I11I1IiI ,
oOo0 , o00ooOOo0ooO0 , mr . sport , 15 , None , False )
return
if 72 - 72: iIii1I11I1II1 / Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 96 - 96: IiII + o0oOOo0O0Ooo - I11i + I1IiiI . iII111i
oOOooo0 = IiiII1 . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( oOOooo0 ,
IiiII1 . print_referral_type ( ) ) )
if 24 - 24: I1IiiI - IiII
ooO = lisp_get_referral_node ( IiiII1 , ooO0 , mr . eid )
if ( ooO == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( o0o0oO , IiiII1 . eid ,
IiiII1 . group , oOo0 , o00ooOOo0ooO0 , mr . sport , 1 , None , False )
return
if 32 - 32: I1Ii111 . I1ii11iIi11i / OoooooooOO + I1Ii111 . I1Ii111
if 52 - 52: O0 - I1Ii111 . oO0o
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( ooO . referral_address . print_address ( ) ,
# iII111i - I1ii11iIi11i * Ii1I
IiiII1 . print_referral_type ( ) , green ( oOoo0OooOOo00 , False ) ,
lisp_hex_string ( oOo0 ) ) )
if 88 - 88: o0oOOo0O0Ooo - iII111i - ooOoO0o - I11i
if 9 - 9: I1IiiI / O0 + I11i
if 39 - 39: OoooooooOO * I1ii11iIi11i + II111iiii . I1Ii111 / II111iiii . I1ii11iIi11i
if 72 - 72: OoOoOO00
iIIII1iI1 = ( IiiII1 . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
IiiII1 . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( o0o0oO , mr . packet , ooO0 , mr . sport , mr . eid ,
ooO . referral_address , to_ms = iIIII1iI1 , ddt = True )
if 23 - 23: iIii1I11I1II1 + OoO0O00 / I1IiiI
if 48 - 48: OoOoOO00 + I11i + oO0o . I1IiiI
if 7 - 7: iII111i * i1IIi % OoOoOO00 % Ii1I . I1IiiI
if 53 - 53: OOooOOo / I11i + OOooOOo / I1IiiI / OoO0O00
mr . last_request_sent_to = ooO . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
ooO . map_requests_sent += 1
return
if 12 - 12: i11iIiiIii % ooOoO0o / iII111i . IiII
if 68 - 68: OOooOOo / iIii1I11I1II1 + I1IiiI . ooOoO0o * IiII
if 72 - 72: I1Ii111
if 51 - 51: OoOoOO00
if 61 - 61: Oo0Ooo / i1IIi + I1Ii111 - OoooooooOO / O0
if 25 - 25: I1ii11iIi11i * i11iIiiIii / i1IIi
if 69 - 69: OOooOOo % ooOoO0o - i1IIi . Oo0Ooo
if 35 - 35: iIii1I11I1II1 - I11i / iIii1I11I1II1 % ooOoO0o % I1IiiI
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 46 - 46: oO0o
i1OO0o = map_request . target_eid
O0oo0oo0 = map_request . target_group
oooo0OOo0O = map_request . print_eid_tuple ( )
oO0oooOOo = mr_source . print_address ( )
oOo0 = map_request . nonce
if 5 - 5: i1IIi % o0oOOo0O0Ooo + OoOoOO00 - I11i . Ii1I
IiIIi1I1I11Ii = green ( oO0oooOOo , False )
oOo0OOOOOO = green ( oooo0OOo0O , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# IiII / II111iiii
red ( ecm_source . print_address ( ) , False ) , IiIIi1I1I11Ii , oOo0OOOOOO ,
lisp_hex_string ( oOo0 ) ) )
if 55 - 55: Oo0Ooo
if 80 - 80: o0oOOo0O0Ooo - I1Ii111 * O0 * iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i / OoO0O00
if 36 - 36: o0oOOo0O0Ooo + ooOoO0o * I11i
oOO0O000OOo0 = lisp_ddt_map_request ( lisp_sockets , packet , i1OO0o , O0oo0oo0 , oOo0 )
oOO0O000OOo0 . packet = packet
oOO0O000OOo0 . itr = ecm_source
oOO0O000OOo0 . mr_source = mr_source
oOO0O000OOo0 . sport = sport
oOO0O000OOo0 . from_pitr = map_request . pitr_bit
oOO0O000OOo0 . queue_map_request ( )
if 19 - 19: I11i % OoOoOO00 / OoO0O00 % I11i + o0oOOo0O0Ooo / iII111i
lisp_send_ddt_map_request ( oOO0O000OOo0 , False )
return
if 35 - 35: ooOoO0o % I11i * I1ii11iIi11i
if 10 - 10: OoO0O00 + OoooooooOO + I1Ii111
if 57 - 57: Ii1I % Ii1I * Oo0Ooo % i11iIiiIii
if 12 - 12: oO0o . Oo0Ooo . I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii + I1Ii111 . I1Ii111 * I1ii11iIi11i % I1Ii111 - OoooooooOO
if 76 - 76: IiII + i1IIi + i11iIiiIii . oO0o
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl ) :
if 66 - 66: iII111i % iII111i
IIiIiIii11I1 = packet
oooO0o = lisp_map_request ( )
packet = oooO0o . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 95 - 95: OoO0O00 / o0oOOo0O0Ooo - i1IIi % Ii1I - o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 6 - 6: i1IIi
oooO0o . print_map_request ( )
if 10 - 10: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / i11iIiiIii - I1IiiI . O0
if 2 - 2: II111iiii
if 13 - 13: Ii1I % i11iIiiIii
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
if ( oooO0o . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , oooO0o ,
mr_source , mr_port , ttl )
return
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
if ( oooO0o . smr_bit ) :
lisp_process_smr ( oooO0o )
if 66 - 66: I1IiiI + I11i
if 58 - 58: I1ii11iIi11i
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if ( oooO0o . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( oooO0o )
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
if 48 - 48: O0 / i1IIi / iII111i
if 11 - 11: O0 - OoO0O00 + OoOoOO00 * ooOoO0o - Ii1I
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , oooO0o , mr_source ,
mr_port , ttl )
if 82 - 82: Ii1I - O0 * ooOoO0o . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo . OoooooooOO % OOooOOo
if 2 - 2: OoOoOO00 + I1ii11iIi11i + oO0o
if 27 - 27: OoooooooOO - Ii1I / OoooooooOO + OoO0O00
if 58 - 58: OOooOOo * I11i . I1IiiI
if ( lisp_i_am_ms ) :
packet = IIiIiIii11I1
i1OO0o , O0oo0oo0 , I11i11 = lisp_ms_process_map_request ( lisp_sockets ,
IIiIiIii11I1 , oooO0o , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , oooO0o , ecm_source ,
ecm_port , I11i11 , i1OO0o , O0oo0oo0 )
if 54 - 54: I11i / I1Ii111 - i11iIiiIii - o0oOOo0O0Ooo . Ii1I * iIii1I11I1II1
return
if 12 - 12: i1IIi + IiII / OoOoOO00 . OoO0O00 / ooOoO0o
if 65 - 65: OoO0O00
if 87 - 87: oO0o . I11i / IiII * OoO0O00 / OoooooooOO % OoOoOO00
if 51 - 51: oO0o / IiII % Oo0Ooo
if 69 - 69: I1ii11iIi11i % oO0o / iIii1I11I1II1 * OoOoOO00 % I1IiiI + IiII
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , IIiIiIii11I1 , oooO0o ,
ecm_source , mr_port , mr_source )
if 34 - 34: ooOoO0o - OoooooooOO . o0oOOo0O0Ooo
if 83 - 83: II111iiii . OOooOOo
if 88 - 88: O0
if 12 - 12: Ii1I % OOooOOo % Oo0Ooo * I1Ii111
if 96 - 96: iII111i + ooOoO0o
if ( lisp_i_am_ddt or ddt_request ) :
packet = IIiIiIii11I1
lisp_ddt_process_map_request ( lisp_sockets , oooO0o , ecm_source ,
ecm_port )
if 100 - 100: OOooOOo . ooOoO0o + Ii1I + Ii1I
return
if 70 - 70: ooOoO0o . iIii1I11I1II1 / oO0o
if 18 - 18: Ii1I / OoooooooOO % i1IIi * o0oOOo0O0Ooo
if 70 - 70: IiII % i1IIi / IiII - o0oOOo0O0Ooo . Oo0Ooo / O0
if 54 - 54: o0oOOo0O0Ooo
if 53 - 53: II111iiii / IiII . i1IIi + I1Ii111 / OoO0O00 - OoooooooOO
if 67 - 67: ooOoO0o . Ii1I - Oo0Ooo * iII111i . I11i - OOooOOo
if 10 - 10: I11i
if 37 - 37: o0oOOo0O0Ooo / I1IiiI * oO0o / II111iiii
def lisp_store_mr_stats ( source , nonce ) :
oOO0O000OOo0 = lisp_get_map_resolver ( source , None )
if ( oOO0O000OOo0 == None ) : return
if 39 - 39: IiII - i1IIi - IiII - OoooooooOO - I1ii11iIi11i
if 66 - 66: IiII + i1IIi
if 21 - 21: IiII / i11iIiiIii / OoOoOO00
if 75 - 75: Ii1I . i1IIi / I1IiiI * iII111i . IiII / OoOoOO00
oOO0O000OOo0 . neg_map_replies_received += 1
oOO0O000OOo0 . last_reply = lisp_get_timestamp ( )
if 58 - 58: ooOoO0o + OOooOOo / ooOoO0o / i11iIiiIii
if 95 - 95: ooOoO0o
if 10 - 10: OoO0O00 % ooOoO0o * o0oOOo0O0Ooo
if 37 - 37: Ii1I . o0oOOo0O0Ooo
if ( ( oOO0O000OOo0 . neg_map_replies_received % 100 ) == 0 ) : oOO0O000OOo0 . total_rtt = 0
if 34 - 34: ooOoO0o * IiII . Ii1I + iIii1I11I1II1
if 1 - 1: i11iIiiIii + I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if ( oOO0O000OOo0 . last_nonce == nonce ) :
oOO0O000OOo0 . total_rtt += ( time . time ( ) - oOO0O000OOo0 . last_used )
oOO0O000OOo0 . last_nonce = 0
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
if ( ( oOO0O000OOo0 . neg_map_replies_received % 10 ) == 0 ) : oOO0O000OOo0 . last_nonce = 0
return
if 15 - 15: I1IiiI
if 88 - 88: IiII / I1ii11iIi11i % I11i + i11iIiiIii * O0 . I1Ii111
if 69 - 69: Oo0Ooo - OOooOOo / I1IiiI . i11iIiiIii * OoO0O00
if 45 - 45: I1Ii111 + OOooOOo
if 78 - 78: OoOoOO00 . Oo0Ooo % I11i
if 7 - 7: I1ii11iIi11i % Ii1I . OoooooooOO - iII111i
if 18 - 18: O0 * OoooooooOO % IiII - iIii1I11I1II1 % IiII * o0oOOo0O0Ooo
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl ) :
global lisp_map_cache
if 13 - 13: OoO0O00 + i11iIiiIii + O0 / ooOoO0o % iIii1I11I1II1
iIi111 = lisp_map_reply ( )
packet = iIi111 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 75 - 75: oO0o / i1IIi / Ii1I * Oo0Ooo
iIi111 . print_map_reply ( )
if 75 - 75: Oo0Ooo / OoooooooOO
if 98 - 98: II111iiii - I1Ii111 . ooOoO0o * iII111i
if 49 - 49: I1ii11iIi11i / OoooooooOO - I11i
if 76 - 76: i1IIi . OoO0O00 . O0 / OOooOOo - iII111i
oo00o = None
for Ii11 in range ( iIi111 . record_count ) :
OoOO = lisp_eid_record ( )
packet = OoOO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 68 - 68: I1IiiI - I1IiiI . I1Ii111 - OoooooooOO + O0 . II111iiii
OoOO . print_record ( " " , False )
if 26 - 26: iIii1I11I1II1 / iIii1I11I1II1 . IiII * i11iIiiIii
if 21 - 21: OOooOOo + o0oOOo0O0Ooo
if 28 - 28: OOooOOo + i1IIi + II111iiii / Oo0Ooo + iIii1I11I1II1 . Oo0Ooo
if 73 - 73: Ii1I * iIii1I11I1II1 / o0oOOo0O0Ooo - o0oOOo0O0Ooo / i1IIi
if 64 - 64: Ii1I * I1ii11iIi11i % II111iiii
if ( OoOO . rloc_count == 0 ) :
lisp_store_mr_stats ( source , iIi111 . nonce )
if 31 - 31: iIii1I11I1II1 % Oo0Ooo . I1IiiI % ooOoO0o
if 38 - 38: I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
iIiiIiI1I1ii = ( OoOO . group . is_null ( ) == False )
if 63 - 63: Oo0Ooo / IiII % o0oOOo0O0Ooo + I1IiiI - iII111i / iII111i
if 88 - 88: O0 * II111iiii
if 81 - 81: OoOoOO00 % I11i / i1IIi
if 87 - 87: II111iiii + oO0o - I1ii11iIi11i
if 42 - 42: Oo0Ooo - ooOoO0o % OoOoOO00 + OoOoOO00
if ( lisp_decent_push_configured ) :
ooOOoo0 = OoOO . action
if ( iIiiIiI1I1ii and ooOOoo0 == LISP_DROP_ACTION ) :
if ( OoOO . eid . is_local ( ) ) : continue
if 61 - 61: I1Ii111
if 67 - 67: I1IiiI / IiII / iII111i - I1Ii111 - o0oOOo0O0Ooo
if 75 - 75: OOooOOo . ooOoO0o
if 32 - 32: i1IIi / I11i + iIii1I11I1II1 . OOooOOo
if 67 - 67: iII111i - OoO0O00 % I1ii11iIi11i * Oo0Ooo
if 51 - 51: I1IiiI + O0
if 4 - 4: ooOoO0o / OoO0O00 * iIii1I11I1II1 * iIii1I11I1II1
if ( OoOO . eid . is_null ( ) ) : continue
if 33 - 33: iII111i . iIii1I11I1II1 - Ii1I
if 85 - 85: OoOoOO00
if 57 - 57: Oo0Ooo - II111iiii - I1ii11iIi11i * oO0o
if 41 - 41: I11i / ooOoO0o + IiII % OoooooooOO
if 72 - 72: Ii1I
if ( iIiiIiI1I1ii ) :
IIII = lisp_map_cache_lookup ( OoOO . eid , OoOO . group )
else :
IIII = lisp_map_cache . lookup_cache ( OoOO . eid , True )
if 50 - 50: Ii1I . II111iiii * I11i
oo0o0 = ( IIII == None )
if 37 - 37: i1IIi
if 87 - 87: I11i
if 32 - 32: ooOoO0o + I1ii11iIi11i + OoooooooOO - o0oOOo0O0Ooo % IiII
if 75 - 75: i1IIi + II111iiii
oooo0O = [ ]
for O0o0o00O in range ( OoOO . rloc_count ) :
iI11iII1IiiI = lisp_rloc_record ( )
iI11iII1IiiI . keys = iIi111 . keys
packet = iI11iII1IiiI . decode ( packet , iIi111 . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 20 - 20: IiII - OoO0O00 * I1Ii111
iI11iII1IiiI . print_record ( " " )
if 51 - 51: I11i * ooOoO0o * OOooOOo / I1Ii111 * I1IiiI * ooOoO0o
oO00Ooo0o = None
if ( IIII ) : oO00Ooo0o = IIII . get_rloc ( iI11iII1IiiI . rloc )
if ( oO00Ooo0o ) :
OoOOo = oO00Ooo0o
else :
OoOOo = lisp_rloc ( )
if 64 - 64: I11i * ooOoO0o / OoooooooOO
if 38 - 38: iIii1I11I1II1 . OoO0O00 * OoOoOO00 + OoOoOO00 + ooOoO0o
if 44 - 44: I1ii11iIi11i * OOooOOo % OoO0O00 . I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
if 63 - 63: OoOoOO00 % IiII . iII111i
if 44 - 44: I1IiiI
if 25 - 25: oO0o
IIiII = OoOOo . store_rloc_from_record ( iI11iII1IiiI , iIi111 . nonce ,
source )
OoOOo . echo_nonce_capable = iIi111 . echo_nonce_capable
if 100 - 100: I1IiiI / IiII + OoO0O00 . iII111i
if ( OoOOo . echo_nonce_capable ) :
I1iiIiiii1111 = OoOOo . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , I1iiIiiii1111 ) == None ) :
lisp_echo_nonce ( I1iiIiiii1111 )
if 39 - 39: OoooooooOO * OOooOOo - OoO0O00
if 3 - 3: I11i . i11iIiiIii % Oo0Ooo % II111iiii . I11i
if 88 - 88: iIii1I11I1II1 . OOooOOo % iII111i
if 72 - 72: ooOoO0o + i11iIiiIii / i1IIi
if 64 - 64: OOooOOo - OOooOOo
if 42 - 42: i1IIi / ooOoO0o . I1Ii111 % OoOoOO00
if 67 - 67: i1IIi * i11iIiiIii * I1IiiI
if ( IIII and IIII . gleaned ) :
OoOOo = IIII . rloc_set [ 0 ]
IIiII = OoOOo . translated_port
if 23 - 23: Oo0Ooo
if 81 - 81: I1Ii111 % II111iiii - Oo0Ooo / I1IiiI + i11iIiiIii . I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
if 60 - 60: OoO0O00 / oO0o . I1IiiI + OoOoOO00 + I1ii11iIi11i % Ii1I
if 70 - 70: i1IIi * II111iiii * I1IiiI
if 7 - 7: OoooooooOO + II111iiii % o0oOOo0O0Ooo * O0 . OoO0O00 * OoooooooOO
if 20 - 20: Oo0Ooo % OOooOOo
if 8 - 8: OOooOOo
if ( iIi111 . rloc_probe and iI11iII1IiiI . probe_bit ) :
if ( OoOOo . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( OoOOo . rloc , source , IIiII ,
iIi111 . nonce , iIi111 . hop_count , ttl )
if 92 - 92: iII111i / OOooOOo . IiII / I11i + o0oOOo0O0Ooo
if 99 - 99: II111iiii
if 70 - 70: O0 % I1ii11iIi11i
if 28 - 28: IiII - i1IIi - I1Ii111 % Ii1I - IiII
if 73 - 73: iIii1I11I1II1 . iIii1I11I1II1 + oO0o % i11iIiiIii . IiII
if 33 - 33: IiII - OOooOOo / i11iIiiIii * iIii1I11I1II1
oooo0O . append ( OoOOo )
if 2 - 2: i11iIiiIii % ooOoO0o
if 56 - 56: IiII % ooOoO0o + I1IiiI % I11i - OOooOOo
if 82 - 82: OoooooooOO . i1IIi . OoO0O00 . OoO0O00
if 31 - 31: iIii1I11I1II1
if ( lisp_data_plane_security and OoOOo . rloc_recent_rekey ( ) ) :
oo00o = OoOOo
if 64 - 64: ooOoO0o
if 30 - 30: OoO0O00 + o0oOOo0O0Ooo / iIii1I11I1II1
if 69 - 69: IiII - OoooooooOO + iII111i + iII111i - Ii1I
if 27 - 27: I1ii11iIi11i % Oo0Ooo * iIii1I11I1II1 * O0 / I11i * Oo0Ooo
if 97 - 97: IiII % Oo0Ooo % OoOoOO00
if 87 - 87: i11iIiiIii . oO0o * I1IiiI * I1Ii111
if 57 - 57: iIii1I11I1II1 / i11iIiiIii / IiII + I1ii11iIi11i % I1IiiI
if 80 - 80: iIii1I11I1II1
if 23 - 23: II111iiii . ooOoO0o % I1Ii111
if 39 - 39: OoooooooOO
if 10 - 10: Oo0Ooo * iII111i
if ( iIi111 . rloc_probe == False and lisp_nat_traversal ) :
oooOooO0 = [ ]
oOOoo = [ ]
for OoOOo in oooo0O :
if 51 - 51: ooOoO0o / Oo0Ooo - I1Ii111 - iII111i
if 68 - 68: I1ii11iIi11i - iIii1I11I1II1 * OoooooooOO
if 44 - 44: OoooooooOO + I1Ii111 + OoO0O00
if 15 - 15: iIii1I11I1II1 % i1IIi + iII111i
if 48 - 48: o0oOOo0O0Ooo / oO0o
if ( OoOOo . rloc . is_private_address ( ) ) :
OoOOo . priority = 1
OoOOo . state = LISP_RLOC_UNREACH_STATE
oooOooO0 . append ( OoOOo )
oOOoo . append ( OoOOo . rloc . print_address_no_iid ( ) )
continue
if 61 - 61: I1IiiI + iII111i * Ii1I % I1Ii111 . Ii1I
if 83 - 83: i11iIiiIii * OoOoOO00 * i11iIiiIii % II111iiii . i11iIiiIii * I11i
if 67 - 67: i1IIi / i1IIi + IiII . oO0o
if 70 - 70: i1IIi . I11i * o0oOOo0O0Ooo . iII111i
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
if ( OoOOo . priority == 254 and lisp_i_am_rtr == False ) :
oooOooO0 . append ( OoOOo )
oOOoo . append ( OoOOo . rloc . print_address_no_iid ( ) )
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
if ( OoOOo . priority != 254 and lisp_i_am_rtr ) :
oooOooO0 . append ( OoOOo )
oOOoo . append ( OoOOo . rloc . print_address_no_iid ( ) )
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
if 45 - 45: iIii1I11I1II1 . o0oOOo0O0Ooo % I1IiiI
if 10 - 10: I1IiiI / i1IIi * o0oOOo0O0Ooo + Oo0Ooo - OoOoOO00 % iII111i
if ( oOOoo != [ ] ) :
oooo0O = oooOooO0
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( oOOoo ) )
if 88 - 88: Ii1I % Ii1I
if 29 - 29: OOooOOo % I1ii11iIi11i
if 57 - 57: I1ii11iIi11i - OoOoOO00 + IiII
if 58 - 58: OOooOOo % I1IiiI / oO0o . ooOoO0o . OoO0O00 / IiII
if 72 - 72: ooOoO0o + ooOoO0o + o0oOOo0O0Ooo - o0oOOo0O0Ooo % Ii1I
if 52 - 52: I11i % i1IIi . I1ii11iIi11i
if 62 - 62: ooOoO0o - I1ii11iIi11i
oooOooO0 = [ ]
for OoOOo in oooo0O :
if ( OoOOo . json != None ) : continue
oooOooO0 . append ( OoOOo )
if 71 - 71: I11i
if ( oooOooO0 != [ ] ) :
I1I11Iiii111 = len ( oooo0O ) - len ( oooOooO0 )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( I1I11Iiii111 ) )
if 34 - 34: oO0o / O0 * oO0o
oooo0O = oooOooO0
if 47 - 47: iIii1I11I1II1 - o0oOOo0O0Ooo % Ii1I
if 38 - 38: ooOoO0o / IiII * I1ii11iIi11i % I1ii11iIi11i % oO0o
if 82 - 82: I1ii11iIi11i . i11iIiiIii - I11i . iII111i / OOooOOo
if 60 - 60: I1IiiI / I1IiiI / II111iiii
if 59 - 59: OOooOOo . oO0o + ooOoO0o % o0oOOo0O0Ooo . i11iIiiIii
if 27 - 27: OoOoOO00 - OoooooooOO / IiII / II111iiii * OOooOOo * ooOoO0o
if 43 - 43: II111iiii . IiII - I1IiiI * I1ii11iIi11i + OoooooooOO
if 34 - 34: I1Ii111 / i1IIi
if ( iIi111 . rloc_probe and IIII != None ) : oooo0O = IIII . rloc_set
if 95 - 95: OoOoOO00 * OOooOOo
if 68 - 68: I1Ii111 / iIii1I11I1II1 % Ii1I
if 77 - 77: i11iIiiIii + i11iIiiIii - I1ii11iIi11i % I1ii11iIi11i
if 26 - 26: oO0o + OoooooooOO % o0oOOo0O0Ooo
if 96 - 96: ooOoO0o * OoOoOO00 - II111iiii
III1I111I1i1I = oo0o0
if ( IIII and oooo0O != IIII . rloc_set ) :
IIII . delete_rlocs_from_rloc_probe_list ( )
III1I111I1i1I = True
if 43 - 43: ooOoO0o / OoooooooOO . Oo0Ooo % ooOoO0o
if 92 - 92: Oo0Ooo . I11i - IiII
if 49 - 49: ooOoO0o . Ii1I
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
I1II = IIII . uptime if ( IIII ) else None
if ( IIII == None or IIII . gleaned == False ) :
IIII = lisp_mapping ( OoOO . eid , OoOO . group , oooo0O )
IIII . mapping_source = source
IIII . map_cache_ttl = OoOO . store_ttl ( )
IIII . action = OoOO . action
IIII . add_cache ( III1I111I1i1I )
if 30 - 30: i11iIiiIii * I1IiiI
if 63 - 63: ooOoO0o + i11iIiiIii / i1IIi - I1Ii111 . O0 % OOooOOo
i1111ii1 = "Add"
if ( I1II ) :
IIII . uptime = I1II
i1111ii1 = "Replace"
if 11 - 11: Ii1I - IiII
if 20 - 20: I11i % oO0o * Oo0Ooo - I1Ii111 . Ii1I * I1ii11iIi11i
lprint ( "{} {} map-cache with {} RLOCs" . format ( i1111ii1 ,
green ( IIII . print_eid_tuple ( ) , False ) , len ( oooo0O ) ) )
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if 13 - 13: iII111i % i1IIi
if 13 - 13: iII111i / OoooooooOO + Ii1I / iII111i
if 29 - 29: OOooOOo + ooOoO0o % o0oOOo0O0Ooo
if ( lisp_ipc_dp_socket and oo00o != None ) :
lisp_write_ipc_keys ( oo00o )
if 18 - 18: I11i + OoO0O00 + OoO0O00 . ooOoO0o
if 37 - 37: i1IIi . IiII + I1IiiI % OoOoOO00
if 3 - 3: i11iIiiIii + Ii1I % IiII - I1Ii111 / Oo0Ooo % iIii1I11I1II1
if 86 - 86: Oo0Ooo + Oo0Ooo * oO0o * I1IiiI
if 95 - 95: IiII - OoO0O00 + OOooOOo
if 33 - 33: o0oOOo0O0Ooo . i11iIiiIii . ooOoO0o
if 100 - 100: i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / i11iIiiIii + OOooOOo
if ( oo0o0 ) :
Ooo0O = bold ( "RLOC-probe" , False )
for OoOOo in IIII . best_rloc_set :
I1iiIiiii1111 = red ( OoOOo . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( Ooo0O , I1iiIiiii1111 ) )
lisp_send_map_request ( lisp_sockets , 0 , IIII . eid , IIII . group , OoOOo )
if 69 - 69: iII111i - OoOoOO00 / O0
if 22 - 22: o0oOOo0O0Ooo % OoooooooOO + oO0o + Oo0Ooo
if 34 - 34: iII111i / I11i + i1IIi + I1ii11iIi11i * OoooooooOO * IiII
return
if 70 - 70: iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / II111iiii + I1IiiI
if 33 - 33: oO0o
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
packet = map_register . zero_auth ( packet )
IiiiI1I1iI11 = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
if 85 - 85: iII111i % i11iIiiIii
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
map_register . auth_data = IiiiI1I1iI11
packet = map_register . encode_auth ( packet )
return ( packet )
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 15 - 15: Oo0Ooo + oO0o . I11i % OoO0O00
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
iI111iI1I1I111 = hashlib . sha1
if 31 - 31: Ii1I % iII111i % Oo0Ooo
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
iI111iI1I1I111 = hashlib . sha256
if 75 - 75: iIii1I11I1II1 - IiII - I1Ii111
if 4 - 4: i11iIiiIii % OoooooooOO . i11iIiiIii
if ( do_hex ) :
IiiiI1I1iI11 = hmac . new ( password , packet , iI111iI1I1I111 ) . hexdigest ( )
else :
IiiiI1I1iI11 = hmac . new ( password , packet , iI111iI1I1I111 ) . digest ( )
if 61 - 61: iIii1I11I1II1 . Oo0Ooo . i1IIi
return ( IiiiI1I1iI11 )
if 45 - 45: I1Ii111
if 49 - 49: i1IIi * iII111i - iIii1I11I1II1 % I11i * O0 / OoOoOO00
if 48 - 48: IiII
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo * OoO0O00 . OoooooooOO
if 40 - 40: I1Ii111 + Oo0Ooo + I1Ii111
if 57 - 57: I1Ii111 / II111iiii % iII111i
if 32 - 32: IiII - OOooOOo + i11iIiiIii + I1IiiI . iII111i
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 75 - 75: o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1IiiI / OoO0O00
IiiiI1I1iI11 = lisp_hash_me ( packet , alg_id , password , True )
iIIiI = ( IiiiI1I1iI11 == auth_data )
if 16 - 16: II111iiii . Ii1I + I1Ii111 % i1IIi / i11iIiiIii + OOooOOo
if 43 - 43: I1IiiI . Oo0Ooo + i1IIi + I11i / OoO0O00
if 66 - 66: i11iIiiIii
if 83 - 83: I1Ii111 / iIii1I11I1II1 - oO0o
if ( iIIiI == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( IiiiI1I1iI11 , auth_data ) )
if 3 - 3: OOooOOo - Oo0Ooo * I1IiiI - OoO0O00 / OOooOOo + IiII
if 83 - 83: i1IIi * i1IIi - II111iiii / OoooooooOO . Ii1I + I1Ii111
return ( iIIiI )
if 10 - 10: I11i
if 24 - 24: Ii1I
if 30 - 30: II111iiii / Ii1I - I11i - OoO0O00
if 25 - 25: I11i % i1IIi / I11i * i11iIiiIii
if 71 - 71: IiII % I11i - OoooooooOO + I1IiiI / Oo0Ooo % I11i
if 6 - 6: i1IIi * i11iIiiIii + ooOoO0o - IiII
if 97 - 97: iIii1I11I1II1 * i1IIi * II111iiii - OOooOOo - Oo0Ooo - iIii1I11I1II1
def lisp_retransmit_map_notify ( map_notify ) :
iI111I1 = map_notify . etr
IIiII = map_notify . etr_port
if 26 - 26: ooOoO0o + Oo0Ooo
if 24 - 24: I1IiiI
if 43 - 43: OoO0O00
if 51 - 51: OoooooooOO % IiII % Oo0Ooo
if 50 - 50: I1IiiI - i11iIiiIii / I1ii11iIi11i . Ii1I - iIii1I11I1II1
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( iI111I1 . print_address ( ) , False ) ) )
if 91 - 91: I1IiiI . I1Ii111 + II111iiii . Oo0Ooo
if 95 - 95: iII111i
iii11 = map_notify . nonce_key
if ( lisp_map_notify_queue . has_key ( iii11 ) ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( iii11 ) )
if 77 - 77: I1IiiI * II111iiii * iIii1I11I1II1
try :
lisp_map_notify_queue . pop ( iii11 )
except :
lprint ( "Key not found in Map-Notify queue" )
if 19 - 19: OOooOOo * o0oOOo0O0Ooo
if 64 - 64: I11i % ooOoO0o / OOooOOo / iII111i
return
if 80 - 80: i1IIi
if 74 - 74: I1ii11iIi11i . OoO0O00 + i11iIiiIii
o0o0oO = map_notify . lisp_sockets
map_notify . retry_count += 1
if 19 - 19: i1IIi / I1IiiI + IiII . iII111i
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# iII111i . OOooOOo / II111iiii / II111iiii % Ii1I
red ( iI111I1 . print_address ( ) , False ) , map_notify . retry_count ) )
if 44 - 44: Oo0Ooo
lisp_send_map_notify ( o0o0oO , map_notify . packet , iI111I1 , IIiII )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 29 - 29: O0 + OoooooooOO
if 82 - 82: O0 . I1Ii111 - IiII
if 37 - 37: i11iIiiIii
if 67 - 67: ooOoO0o . Oo0Ooo
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 15 - 15: OoO0O00 . oO0o - o0oOOo0O0Ooo
if 28 - 28: OOooOOo * OoOoOO00 + OoooooooOO . OOooOOo / oO0o / OoOoOO00
if 94 - 94: OoO0O00 / i1IIi . OoO0O00 . I1Ii111 + OoO0O00
if 30 - 30: o0oOOo0O0Ooo + iIii1I11I1II1 - II111iiii - ooOoO0o + OoOoOO00 - II111iiii
if 69 - 69: oO0o / O0 / I1IiiI + OoooooooOO * I11i * IiII
if 41 - 41: ooOoO0o % i11iIiiIii
if 69 - 69: IiII - oO0o
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 21 - 21: Oo0Ooo / I1Ii111
if 72 - 72: OoOoOO00 . i11iIiiIii
if 25 - 25: i1IIi
if 69 - 69: OOooOOo / Ii1I
eid_record . rloc_count = len ( parent . registered_rlocs )
OoOIII = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 87 - 87: o0oOOo0O0Ooo / I1Ii111 % Oo0Ooo - iIii1I11I1II1 / IiII / IiII
if 57 - 57: OoOoOO00 . O0 / iII111i / i11iIiiIii
if 38 - 38: iII111i - Oo0Ooo / O0
if 40 - 40: ooOoO0o + iIii1I11I1II1 / OoOoOO00 * iIii1I11I1II1 - ooOoO0o * iIii1I11I1II1
for O0O in parent . registered_rlocs :
iI11iII1IiiI = lisp_rloc_record ( )
iI11iII1IiiI . store_rloc_entry ( O0O )
OoOIII += iI11iII1IiiI . encode ( )
iI11iII1IiiI . print_record ( " " )
del ( iI11iII1IiiI )
if 65 - 65: I1IiiI % iIii1I11I1II1 * II111iiii . IiII . IiII * OoOoOO00
if 66 - 66: IiII % I1ii11iIi11i . oO0o
if 65 - 65: o0oOOo0O0Ooo * OoO0O00
if 38 - 38: I1ii11iIi11i / I1ii11iIi11i % I1Ii111 % i11iIiiIii . OoOoOO00 * OoooooooOO
if 53 - 53: II111iiii . i11iIiiIii / oO0o - i11iIiiIii * iII111i . I11i
for O0O in parent . registered_rlocs :
iI111I1 = O0O . rloc
iiIi11I = lisp_map_notify ( lisp_sockets )
iiIi11I . record_count = 1
I1o0 = map_register . key_id
iiIi11I . key_id = I1o0
iiIi11I . alg_id = map_register . alg_id
iiIi11I . auth_len = map_register . auth_len
iiIi11I . nonce = map_register . nonce
iiIi11I . nonce_key = lisp_hex_string ( iiIi11I . nonce )
iiIi11I . etr . copy_address ( iI111I1 )
iiIi11I . etr_port = map_register . sport
iiIi11I . site = parent . site
i1II1IiiIi = iiIi11I . encode ( OoOIII , parent . site . auth_key [ I1o0 ] )
iiIi11I . print_notify ( )
if 53 - 53: OoooooooOO - OoOoOO00 / IiII - I1Ii111
if 16 - 16: iIii1I11I1II1 / OOooOOo + I1IiiI * II111iiii . OOooOOo
if 68 - 68: IiII * IiII + oO0o / o0oOOo0O0Ooo
if 41 - 41: OoOoOO00 - O0
iii11 = iiIi11I . nonce_key
if ( lisp_map_notify_queue . has_key ( iii11 ) ) :
Ii11iIIIII1 = lisp_map_notify_queue [ iii11 ]
Ii11iIIIII1 . retransmit_timer . cancel ( )
del ( Ii11iIIIII1 )
if 65 - 65: I1Ii111 / o0oOOo0O0Ooo - i11iIiiIii + I11i
lisp_map_notify_queue [ iii11 ] = iiIi11I
if 75 - 75: O0 - OoO0O00 / oO0o . i1IIi . I1ii11iIi11i + o0oOOo0O0Ooo
if 29 - 29: Ii1I . OOooOOo
if 59 - 59: O0 . OoO0O00
if 10 - 10: I1Ii111 / OoooooooOO / OoO0O00 * ooOoO0o
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( iI111I1 . print_address ( ) , False ) ) )
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
lisp_send ( lisp_sockets , iI111I1 , LISP_CTRL_PORT , i1II1IiiIi )
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
parent . site . map_notifies_sent += 1
if 59 - 59: II111iiii * I1IiiI
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
if 34 - 34: i1IIi % iII111i + Oo0Ooo * OoOoOO00 + OoO0O00
if 37 - 37: I1Ii111 / OoooooooOO
iiIi11I . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ iiIi11I ] )
iiIi11I . retransmit_timer . start ( )
if 19 - 19: Ii1I - O0 + I1IiiI + OoooooooOO + ooOoO0o - Oo0Ooo
return
if 45 - 45: I1IiiI . OoOoOO00 . OoOoOO00
if 20 - 20: OoOoOO00
if 69 - 69: OoOoOO00 * Ii1I % ooOoO0o . OoOoOO00 / oO0o * I1Ii111
if 93 - 93: OoO0O00 % IiII % ooOoO0o . I1IiiI
if 96 - 96: II111iiii
if 73 - 73: II111iiii
if 81 - 81: I1IiiI + OoO0O00
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
iii11 = lisp_hex_string ( nonce ) + source . print_address ( )
if 32 - 32: o0oOOo0O0Ooo - iII111i + i11iIiiIii / ooOoO0o . OoOoOO00 . IiII
if 9 - 9: iIii1I11I1II1
if 66 - 66: iIii1I11I1II1
if 13 - 13: O0 / ooOoO0o
if 64 - 64: i11iIiiIii + I1IiiI / Oo0Ooo - iII111i
if 26 - 26: I1ii11iIi11i
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( lisp_map_notify_queue . has_key ( iii11 ) ) :
iiIi11I = lisp_map_notify_queue [ iii11 ]
IiIIi1I1I11Ii = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( iiIi11I . nonce ) , IiIIi1I1I11Ii ) )
if 67 - 67: I1Ii111 * iIii1I11I1II1 / O0 + OoO0O00 * iIii1I11I1II1 % II111iiii
return
if 13 - 13: Ii1I / ooOoO0o / iII111i % II111iiii * I1IiiI * II111iiii
if 40 - 40: Ii1I / i1IIi . iII111i
iiIi11I = lisp_map_notify ( lisp_sockets )
iiIi11I . record_count = record_count
key_id = key_id
iiIi11I . key_id = key_id
iiIi11I . alg_id = alg_id
iiIi11I . auth_len = auth_len
iiIi11I . nonce = nonce
iiIi11I . nonce_key = lisp_hex_string ( nonce )
iiIi11I . etr . copy_address ( source )
iiIi11I . etr_port = port
iiIi11I . site = site
iiIi11I . eid_list = eid_list
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
if 76 - 76: i11iIiiIii % i11iIiiIii
if ( map_register_ack == False ) :
iii11 = iiIi11I . nonce_key
lisp_map_notify_queue [ iii11 ] = iiIi11I
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 69 - 69: O0 % I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 . OOooOOo
if 64 - 64: OoOoOO00 - i1IIi * i1IIi / iII111i * OoOoOO00 * OoO0O00
if 61 - 61: OOooOOo
if 51 - 51: Oo0Ooo * OOooOOo / iII111i
i1II1IiiIi = iiIi11I . encode ( eid_records , site . auth_key [ key_id ] )
iiIi11I . print_notify ( )
if 49 - 49: ooOoO0o . i1IIi % I1Ii111 . I1IiiI . I1ii11iIi11i + OoO0O00
if ( map_register_ack == False ) :
OoOO = lisp_eid_record ( )
OoOO . decode ( eid_records )
OoOO . print_record ( " " , False )
if 65 - 65: I1ii11iIi11i + Ii1I / i11iIiiIii * I1Ii111 + OoooooooOO
if 7 - 7: Oo0Ooo % o0oOOo0O0Ooo
if 40 - 40: oO0o * IiII
if 29 - 29: O0 - II111iiii + iII111i
if 73 - 73: I1Ii111 - I11i + IiII - o0oOOo0O0Ooo - I11i - OOooOOo
lisp_send_map_notify ( lisp_sockets , i1II1IiiIi , iiIi11I . etr , port )
site . map_notifies_sent += 1
if 40 - 40: iIii1I11I1II1 . iII111i * I1ii11iIi11i + IiII - iIii1I11I1II1
if ( map_register_ack ) : return
if 83 - 83: i1IIi
if 9 - 9: iIii1I11I1II1 + i11iIiiIii
if 70 - 70: I1IiiI - OoO0O00 % OOooOOo + ooOoO0o % II111iiii
if 19 - 19: I11i + i1IIi / i1IIi - II111iiii + I1Ii111
if 11 - 11: i11iIiiIii % i11iIiiIii / IiII - Oo0Ooo / O0 - I11i
if 29 - 29: OOooOOo * iIii1I11I1II1 * ooOoO0o
iiIi11I . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ iiIi11I ] )
iiIi11I . retransmit_timer . start ( )
return
if 80 - 80: oO0o * I1Ii111
if 87 - 87: iII111i + OoOoOO00 % ooOoO0o - oO0o
if 40 - 40: i1IIi / OoOoOO00 - I11i / ooOoO0o . Ii1I
if 8 - 8: I1IiiI . IiII . OOooOOo . O0
if 3 - 3: Ii1I + i11iIiiIii
if 87 - 87: ooOoO0o - iII111i % I11i
if 88 - 88: I11i . OoooooooOO
if 86 - 86: Ii1I - I1IiiI - iII111i % Ii1I . I1ii11iIi11i % i1IIi
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 84 - 84: OoOoOO00
if 99 - 99: OoO0O00 - OoOoOO00 - i1IIi / OoO0O00 * I1ii11iIi11i * iIii1I11I1II1
if 65 - 65: iII111i - O0 / i1IIi . I1Ii111
if 85 - 85: o0oOOo0O0Ooo % Ii1I
i1II1IiiIi = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 81 - 81: oO0o / OoO0O00 * i1IIi % iIii1I11I1II1
if 23 - 23: II111iiii . II111iiii
if 17 - 17: i11iIiiIii / IiII * I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - iIii1I11I1II1
if 21 - 21: OOooOOo % Ii1I
iI111I1 = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( iI111I1 . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , iI111I1 , LISP_CTRL_PORT , i1II1IiiIi )
return
if 3 - 3: OOooOOo / ooOoO0o / I1Ii111 . I11i
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI / iII111i
if 95 - 95: Ii1I . Oo0Ooo
if 42 - 42: IiII . i1IIi % O0 * ooOoO0o - OOooOOo % ooOoO0o
if 99 - 99: i1IIi + OoOoOO00 - iII111i % II111iiii
if 6 - 6: ooOoO0o - I1Ii111 . OoOoOO00
if 64 - 64: iII111i + I1ii11iIi11i
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 88 - 88: I1Ii111 / i11iIiiIii - O0 . II111iiii / II111iiii * II111iiii
iiIi11I = lisp_map_notify ( lisp_sockets )
iiIi11I . record_count = 1
iiIi11I . nonce = lisp_get_control_nonce ( )
iiIi11I . nonce_key = lisp_hex_string ( iiIi11I . nonce )
iiIi11I . etr . copy_address ( xtr )
iiIi11I . etr_port = LISP_CTRL_PORT
iiIi11I . eid_list = eid_list
iii11 = iiIi11I . nonce_key
if 56 - 56: Oo0Ooo / I1IiiI % I1Ii111 % I1ii11iIi11i * I1IiiI - IiII
if 39 - 39: oO0o + iII111i . I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + OOooOOo
if 61 - 61: ooOoO0o / I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * iII111i
if 94 - 94: I1IiiI / I11i
if 100 - 100: Ii1I % OoO0O00 % OoooooooOO / II111iiii * I1Ii111
if 64 - 64: I1Ii111 * OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / Oo0Ooo
lisp_remove_eid_from_map_notify_queue ( iiIi11I . eid_list )
if ( lisp_map_notify_queue . has_key ( iii11 ) ) :
iiIi11I = lisp_map_notify_queue [ iii11 ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( iiIi11I . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 50 - 50: OOooOOo % i11iIiiIii
return
if 99 - 99: IiII
if 87 - 87: IiII
if 35 - 35: oO0o . O0 . Ii1I / ooOoO0o
if 36 - 36: i11iIiiIii . II111iiii . I11i . II111iiii
if 36 - 36: Ii1I + ooOoO0o / Oo0Ooo % Oo0Ooo
lisp_map_notify_queue [ iii11 ] = iiIi11I
if 2 - 2: oO0o - Oo0Ooo * OoO0O00 . ooOoO0o . OOooOOo - oO0o
if 74 - 74: o0oOOo0O0Ooo
if 18 - 18: Oo0Ooo % OOooOOo / OOooOOo . I1IiiI + i1IIi . I1IiiI
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
II111iiIIiI = site_eid . rtrs_in_rloc_set ( )
if ( II111iiIIiI ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : II111iiIIiI = False
if 97 - 97: o0oOOo0O0Ooo
if 93 - 93: II111iiii - Ii1I
if 65 - 65: II111iiii % I1Ii111 / OoooooooOO - IiII
if 7 - 7: Ii1I
if 25 - 25: I1Ii111 . II111iiii % OoOoOO00
OoOO = lisp_eid_record ( )
OoOO . record_ttl = 1440
OoOO . eid . copy_address ( site_eid . eid )
OoOO . group . copy_address ( site_eid . group )
OoOO . rloc_count = 0
for IiI1I1iii11 in site_eid . registered_rlocs :
if ( II111iiIIiI ^ IiI1I1iii11 . is_rtr ( ) ) : continue
OoOO . rloc_count += 1
if 72 - 72: I1ii11iIi11i . I1IiiI % I11i - iII111i / ooOoO0o
i1II1IiiIi = OoOO . encode ( )
if 91 - 91: IiII / I1IiiI - Ii1I + o0oOOo0O0Ooo
if 90 - 90: I1ii11iIi11i * oO0o
if 29 - 29: OoOoOO00 % ooOoO0o . OoOoOO00 % OOooOOo - OoOoOO00
if 81 - 81: i1IIi + I1IiiI - iIii1I11I1II1 / O0 . iIii1I11I1II1 - iIii1I11I1II1
iiIi11I . print_notify ( )
OoOO . print_record ( " " , False )
if 54 - 54: iII111i + OOooOOo + OoO0O00
if 6 - 6: oO0o - OoooooooOO * iIii1I11I1II1 * I1ii11iIi11i
if 65 - 65: IiII + OoOoOO00
if 93 - 93: Ii1I
for IiI1I1iii11 in site_eid . registered_rlocs :
if ( II111iiIIiI ^ IiI1I1iii11 . is_rtr ( ) ) : continue
iI11iII1IiiI = lisp_rloc_record ( )
iI11iII1IiiI . store_rloc_entry ( IiI1I1iii11 )
i1II1IiiIi += iI11iII1IiiI . encode ( )
iI11iII1IiiI . print_record ( " " )
if 43 - 43: iIii1I11I1II1 / iII111i - Ii1I + I11i % iII111i - OoO0O00
if 5 - 5: OoO0O00 / ooOoO0o
if 92 - 92: Oo0Ooo / iII111i + O0 * ooOoO0o * OOooOOo % Oo0Ooo
if 97 - 97: oO0o / Ii1I
if 70 - 70: iII111i / Oo0Ooo . OoOoOO00 - II111iiii * II111iiii % I1IiiI
i1II1IiiIi = iiIi11I . encode ( i1II1IiiIi , "" )
if ( i1II1IiiIi == None ) : return
if 34 - 34: I1Ii111 + OOooOOo * iII111i / ooOoO0o % i11iIiiIii
if 91 - 91: IiII * Ii1I * OOooOOo
if 17 - 17: o0oOOo0O0Ooo + Ii1I % I1ii11iIi11i + IiII % I1Ii111 + I1ii11iIi11i
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
lisp_send_map_notify ( lisp_sockets , i1II1IiiIi , xtr , LISP_CTRL_PORT )
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
if 95 - 95: IiII + iII111i % I1IiiI
iiIi11I . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ iiIi11I ] )
iiIi11I . retransmit_timer . start ( )
return
if 18 - 18: Oo0Ooo
if 8 - 8: O0 + iIii1I11I1II1 - O0
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
if 28 - 28: O0 - Oo0Ooo
if 58 - 58: iIii1I11I1II1 - OoooooooOO - iII111i
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
Oo0Oooo00O00 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 100 - 100: OoO0O00 + I1ii11iIi11i + I1ii11iIi11i . I1Ii111
for OOoo in rle_list :
OO00 = lisp_site_eid_lookup ( OOoo [ 0 ] , OOoo [ 1 ] , True )
if ( OO00 == None ) : continue
if 89 - 89: o0oOOo0O0Ooo . Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo / O0 % i1IIi
if 82 - 82: OoOoOO00 * Ii1I . I1ii11iIi11i * OoO0O00 % Oo0Ooo
if 95 - 95: OoO0O00 / oO0o
if 15 - 15: I1IiiI - o0oOOo0O0Ooo % iIii1I11I1II1 % I11i * OoOoOO00 % IiII
if 74 - 74: iII111i - O0 * o0oOOo0O0Ooo / OoooooooOO + II111iiii + Ii1I
if 39 - 39: i11iIiiIii . IiII + I1ii11iIi11i % IiII
o0oo = OO00 . registered_rlocs
if ( len ( o0oo ) == 0 ) :
IiiIiiI1i1I1iI = { }
for ooOoOO0Oo in OO00 . individual_registrations . values ( ) :
for IiI1I1iii11 in ooOoOO0Oo . registered_rlocs :
if ( IiI1I1iii11 . is_rtr ( ) == False ) : continue
IiiIiiI1i1I1iI [ IiI1I1iii11 . rloc . print_address ( ) ] = IiI1I1iii11
if 12 - 12: Oo0Ooo / iII111i
if 96 - 96: i1IIi
o0oo = IiiIiiI1i1I1iI . values ( )
if 6 - 6: OOooOOo
if 7 - 7: I1ii11iIi11i + ooOoO0o . ooOoO0o + iII111i
if 100 - 100: I1Ii111
if 71 - 71: ooOoO0o * i1IIi / OoOoOO00 * i11iIiiIii - iII111i
if 88 - 88: IiII
if 29 - 29: iII111i . ooOoO0o
o00o0OoOo0OO = [ ]
I1iI111i11i1 = False
if ( OO00 . eid . address == 0 and OO00 . eid . mask_len == 0 ) :
ooi1I = [ ]
OO0oO000o00 = [ ] if len ( o0oo ) == 0 else o0oo [ 0 ] . rle . rle_nodes
if 100 - 100: iII111i + o0oOOo0O0Ooo / Oo0Ooo * I1IiiI
for IIi1i1111i in OO0oO000o00 :
o00o0OoOo0OO . append ( IIi1i1111i . address )
ooi1I . append ( IIi1i1111i . address . print_address_no_iid ( ) )
if 35 - 35: I1IiiI / Ii1I * IiII + OOooOOo - iIii1I11I1II1 + I11i
lprint ( "Notify existing RLE-nodes {}" . format ( ooi1I ) )
else :
if 50 - 50: I11i * Ii1I . iIii1I11I1II1 . iII111i - O0 . ooOoO0o
if 3 - 3: OoOoOO00
if 79 - 79: i11iIiiIii * OoooooooOO
if 50 - 50: I1IiiI * II111iiii . I1Ii111 / I1Ii111
if 28 - 28: ooOoO0o
for IiI1I1iii11 in o0oo :
if ( IiI1I1iii11 . is_rtr ( ) ) : o00o0OoOo0OO . append ( IiI1I1iii11 . rloc )
if 27 - 27: OoO0O00
if 80 - 80: o0oOOo0O0Ooo
if 70 - 70: iII111i . OOooOOo / ooOoO0o - OoO0O00 * oO0o / ooOoO0o
if 5 - 5: O0
if 73 - 73: iIii1I11I1II1 . i11iIiiIii * OOooOOo * O0
I1iI111i11i1 = ( len ( o00o0OoOo0OO ) != 0 )
if ( I1iI111i11i1 == False ) :
Iiii1IIIiIi = lisp_site_eid_lookup ( OOoo [ 0 ] , Oo0Oooo00O00 , False )
if ( Iiii1IIIiIi == None ) : continue
if 66 - 66: o0oOOo0O0Ooo
for IiI1I1iii11 in Iiii1IIIiIi . registered_rlocs :
if ( IiI1I1iii11 . rloc . is_null ( ) ) : continue
o00o0OoOo0OO . append ( IiI1I1iii11 . rloc )
if 91 - 91: OoOoOO00 + OOooOOo
if 23 - 23: Oo0Ooo % OOooOOo % iIii1I11I1II1 / O0 + i11iIiiIii
if 80 - 80: iII111i . Ii1I + iIii1I11I1II1
if 75 - 75: OoO0O00 . II111iiii + ooOoO0o - OoO0O00 + OoO0O00 - OoOoOO00
if 80 - 80: OOooOOo + OoooooooOO - iII111i
if 56 - 56: iIii1I11I1II1 - i1IIi
if ( len ( o00o0OoOo0OO ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( OO00 . print_eid_tuple ( ) , False ) ) )
if 96 - 96: Oo0Ooo . OoooooooOO + OoOoOO00 + i1IIi
continue
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 81 - 81: oO0o % I1ii11iIi11i % ooOoO0o * O0 - OOooOOo
if 17 - 17: O0 % O0 / I1ii11iIi11i . Oo0Ooo . iII111i
if 4 - 4: OoO0O00
if 65 - 65: Oo0Ooo % O0 / I1Ii111 * IiII - oO0o
if 32 - 32: Ii1I * OoO0O00 + ooOoO0o
for O0O in o00o0OoOo0OO :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if I1iI111i11i1 else "x" , red ( O0O . print_address_no_iid ( ) , False ) ,
# I1IiiI % I1Ii111 * I11i
green ( OO00 . print_eid_tuple ( ) , False ) ) )
if 39 - 39: Oo0Ooo . Oo0Ooo * I1Ii111 + oO0o % IiII / Oo0Ooo
OOoOO00000Oo = [ OO00 . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , OO00 , OOoOO00000Oo , O0O )
time . sleep ( .001 )
if 85 - 85: II111iiii - O0 . i11iIiiIii . o0oOOo0O0Ooo + ooOoO0o - ooOoO0o
if 25 - 25: I1ii11iIi11i % Ii1I * O0 / I1IiiI % OOooOOo
return
if 42 - 42: IiII - IiII - I1ii11iIi11i + i1IIi * Oo0Ooo
if 80 - 80: oO0o + O0
if 84 - 84: i1IIi - II111iiii
if 2 - 2: i11iIiiIii - OoO0O00 * Oo0Ooo
if 100 - 100: I1Ii111
if 5 - 5: IiII % oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / Ii1I
if 55 - 55: Oo0Ooo / o0oOOo0O0Ooo
if 51 - 51: I1IiiI + i11iIiiIii / ooOoO0o % I1IiiI + Oo0Ooo
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for Ii11 in range ( rloc_count ) :
iI11iII1IiiI = lisp_rloc_record ( )
packet = iI11iII1IiiI . decode ( packet , None )
iIiI1I1i1I11I = iI11iII1IiiI . json
if ( iIiI1I1i1I11I == None ) : continue
if 9 - 9: I11i
try :
iIiI1I1i1I11I = json . loads ( iIiI1I1i1I11I . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 83 - 83: i11iIiiIii
if 72 - 72: oO0o + II111iiii . O0 * oO0o + iII111i
if ( iIiI1I1i1I11I . has_key ( "signature" ) == False ) : continue
return ( iI11iII1IiiI )
if 22 - 22: I11i + Ii1I . IiII - OoO0O00 - o0oOOo0O0Ooo
return ( None )
if 84 - 84: OoooooooOO - Oo0Ooo
if 86 - 86: O0 + OoO0O00 + O0 . I1IiiI
if 82 - 82: OoOoOO00
if 61 - 61: oO0o . o0oOOo0O0Ooo
if 82 - 82: Oo0Ooo * OoooooooOO / ooOoO0o / I1IiiI
if 70 - 70: I1IiiI
if 74 - 74: ooOoO0o * II111iiii
if 96 - 96: i11iIiiIii . I1IiiI - II111iiii . I11i
if 79 - 79: OoO0O00 . OoOoOO00 - i1IIi + Ii1I * i11iIiiIii . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo / oO0o
if 24 - 24: Ii1I + oO0o / OoooooooOO % i11iIiiIii
if 1 - 1: iII111i / I1Ii111 * I1IiiI + OoOoOO00 . OoooooooOO
if 5 - 5: I1IiiI
if 74 - 74: i1IIi * Oo0Ooo - OoOoOO00 * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1 * IiII / i11iIiiIii - ooOoO0o - o0oOOo0O0Ooo
if 30 - 30: OoOoOO00 - OOooOOo . Oo0Ooo
if 11 - 11: IiII - I1Ii111 - OoO0O00 * o0oOOo0O0Ooo
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
def lisp_get_eid_hash ( eid ) :
oOo0oo0OOoOo = None
for iiI11Iiii in lisp_eid_hashes :
if 22 - 22: o0oOOo0O0Ooo . IiII * I1Ii111 / ooOoO0o
if 56 - 56: iII111i
if 95 - 95: OoooooooOO - Ii1I + I1Ii111 - I11i + O0 . O0
if 8 - 8: OoO0O00 % I11i + I1IiiI / IiII - OOooOOo + i1IIi
o0OOoOO = iiI11Iiii . instance_id
if ( o0OOoOO == - 1 ) : iiI11Iiii . instance_id = eid . instance_id
if 3 - 3: iII111i - o0oOOo0O0Ooo / I1Ii111
oooO0OOo0O0O0 = eid . is_more_specific ( iiI11Iiii )
iiI11Iiii . instance_id = o0OOoOO
if ( oooO0OOo0O0O0 ) :
oOo0oo0OOoOo = 128 - iiI11Iiii . mask_len
break
if 10 - 10: I11i + OoooooooOO / iII111i * OOooOOo
if 39 - 39: OoOoOO00
if ( oOo0oo0OOoOo == None ) : return ( None )
if 61 - 61: OoooooooOO / ooOoO0o . i1IIi . Oo0Ooo % OoOoOO00 * OoO0O00
oOoO0Oo0 = eid . address
i1O00oOO = ""
for Ii11 in range ( 0 , oOo0oo0OOoOo / 16 ) :
o0o0O00 = oOoO0Oo0 & 0xffff
o0o0O00 = hex ( o0o0O00 ) [ 2 : - 1 ]
i1O00oOO = o0o0O00 . zfill ( 4 ) + ":" + i1O00oOO
oOoO0Oo0 >>= 16
if 39 - 39: oO0o
if ( oOo0oo0OOoOo % 16 != 0 ) :
o0o0O00 = oOoO0Oo0 & 0xff
o0o0O00 = hex ( o0o0O00 ) [ 2 : - 1 ]
i1O00oOO = o0o0O00 . zfill ( 2 ) + ":" + i1O00oOO
if 49 - 49: I1IiiI * I1Ii111 . I1IiiI - II111iiii
return ( i1O00oOO [ 0 : - 1 ] )
if 57 - 57: oO0o + O0 - OoOoOO00
if 14 - 14: II111iiii + i11iIiiIii + Ii1I / o0oOOo0O0Ooo . OoO0O00
if 93 - 93: o0oOOo0O0Ooo + i1IIi
if 24 - 24: i1IIi
if 54 - 54: iIii1I11I1II1 - IiII + o0oOOo0O0Ooo + I1ii11iIi11i + IiII
if 99 - 99: Oo0Ooo
if 38 - 38: I1ii11iIi11i - I1IiiI
if 50 - 50: iII111i % OoO0O00 - oO0o + Oo0Ooo . O0 . iII111i
if 42 - 42: iII111i + I1ii11iIi11i
if 44 - 44: I1ii11iIi11i % IiII
if 1 - 1: Oo0Ooo + IiII - I1Ii111 / I1Ii111
def lisp_lookup_public_key ( eid ) :
o0OOoOO = eid . instance_id
if 25 - 25: OoOoOO00
if 52 - 52: OOooOOo + IiII
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
ooOO0Oo000 = lisp_get_eid_hash ( eid )
if ( ooOO0Oo000 == None ) : return ( [ None , None , False ] )
if 26 - 26: I1IiiI - OOooOOo
ooOO0Oo000 = "hash-" + ooOO0Oo000
iIIiI11I1 = lisp_address ( LISP_AFI_NAME , ooOO0Oo000 , len ( ooOO0Oo000 ) , o0OOoOO )
O0oo0oo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OOoOO )
if 34 - 34: I1Ii111 % I1IiiI . OoOoOO00 / iII111i + ooOoO0o . i11iIiiIii
if 51 - 51: OoooooooOO * I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if 50 - 50: OoooooooOO * II111iiii
if 7 - 7: ooOoO0o / I11i * iII111i
Iiii1IIIiIi = lisp_site_eid_lookup ( iIIiI11I1 , O0oo0oo0 , True )
if ( Iiii1IIIiIi == None ) : return ( [ iIIiI11I1 , None , False ] )
if 17 - 17: O0 % I1Ii111
if 28 - 28: i1IIi * ooOoO0o
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
OO000OO = None
for OoOOo in Iiii1IIIiIi . registered_rlocs :
iIIiIIIIiIiI1Ii = OoOOo . json
if ( iIIiIIIIiIiI1Ii == None ) : continue
try :
iIIiIIIIiIiI1Ii = json . loads ( iIIiIIIIiIiI1Ii . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( ooOO0Oo000 ) )
if 4 - 4: II111iiii - o0oOOo0O0Ooo / i1IIi - Oo0Ooo
return ( [ iIIiI11I1 , None , False ] )
if 26 - 26: o0oOOo0O0Ooo
if ( iIIiIIIIiIiI1Ii . has_key ( "public-key" ) == False ) : continue
OO000OO = iIIiIIIIiIiI1Ii [ "public-key" ]
break
if 43 - 43: OoOoOO00 * ooOoO0o % OoooooooOO * o0oOOo0O0Ooo
return ( [ iIIiI11I1 , OO000OO , True ] )
if 8 - 8: I1ii11iIi11i + Oo0Ooo - iII111i
if 53 - 53: ooOoO0o / IiII
if 36 - 36: iIii1I11I1II1
if 78 - 78: II111iiii * I11i
if 47 - 47: Ii1I
if 42 - 42: I11i . oO0o - I1IiiI / OoO0O00
if 75 - 75: I1IiiI / OoOoOO00 . I11i * iIii1I11I1II1
if 53 - 53: iIii1I11I1II1
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 8 - 8: O0 - O0 - II111iiii
if 77 - 77: i1IIi - ooOoO0o + O0 . OoO0O00 * I1Ii111 - I11i
if 64 - 64: i1IIi + OoooooooOO + OOooOOo / ooOoO0o % I1IiiI . OoooooooOO
if 96 - 96: II111iiii - OoOoOO00 + oO0o
if 80 - 80: oO0o / OoOoOO00 - I11i / oO0o - iII111i - OoooooooOO
IiiiI1I1i = json . loads ( rloc_record . json . json_string )
if 57 - 57: o0oOOo0O0Ooo
if ( lisp_get_eid_hash ( eid ) ) :
O0O0o0OOOooo0 = eid
elif ( IiiiI1I1i . has_key ( "signature-eid" ) ) :
i1IiII11ii11 = IiiiI1I1i [ "signature-eid" ]
O0O0o0OOOooo0 = lisp_address ( LISP_AFI_IPV6 , i1IiII11ii11 , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
iIIiI11I1 , OO000OO , III11 = lisp_lookup_public_key ( O0O0o0OOOooo0 )
if ( iIIiI11I1 == None ) :
oOoo0OooOOo00 = green ( O0O0o0OOOooo0 . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( oOoo0OooOOo00 ) )
return ( False )
if 68 - 68: O0 * iIii1I11I1II1 . I1IiiI . OOooOOo - IiII
if 79 - 79: i11iIiiIii - I1Ii111
OOoOO0 = "found" if III11 else bold ( "not found" , False )
oOoo0OooOOo00 = green ( iIIiI11I1 . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( oOoo0OooOOo00 , OOoOO0 ) )
if ( III11 == False ) : return ( False )
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
if ( OO000OO == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 20 - 20: IiII
if 17 - 17: o0oOOo0O0Ooo % iIii1I11I1II1
ooo0oOo = OO000OO [ 0 : 8 ] + "..." + OO000OO [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( ooo0oOo ) )
if 79 - 79: I11i
if 38 - 38: I1ii11iIi11i * ooOoO0o
if 77 - 77: OOooOOo - i11iIiiIii - I1ii11iIi11i
if 94 - 94: OoO0O00 % iII111i - I1Ii111 + OoO0O00 - I1IiiI
if 65 - 65: OOooOOo
o00 = IiiiI1I1i [ "signature" ]
if 94 - 94: o0oOOo0O0Ooo
try :
IiiiI1I1i = binascii . a2b_base64 ( o00 )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 46 - 46: I1ii11iIi11i + iII111i / OoO0O00 + oO0o * I11i % OOooOOo
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
i11IIiiII = len ( IiiiI1I1i )
if ( i11IIiiII & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( i11IIiiII ) )
return ( False )
if 31 - 31: OoO0O00 + i11iIiiIii / I11i % O0 / Ii1I
if 90 - 90: iIii1I11I1II1 % oO0o % IiII
if 84 - 84: I1IiiI * IiII * iII111i / i1IIi . II111iiii * o0oOOo0O0Ooo
if 1 - 1: oO0o - iIii1I11I1II1 % i1IIi
if 94 - 94: Oo0Ooo + iIii1I11I1II1 . OoO0O00 * oO0o . i1IIi
ii1Ii111I11 = O0O0o0OOOooo0 . print_address ( )
if 85 - 85: O0 / OoOoOO00 . iII111i
if 64 - 64: OoO0O00 + I1ii11iIi11i / OoO0O00 * I1Ii111 . Oo0Ooo
if 5 - 5: iII111i - iIii1I11I1II1 * IiII
if 52 - 52: OOooOOo
OO000OO = binascii . a2b_base64 ( OO000OO )
try :
iii11 = ecdsa . VerifyingKey . from_pem ( OO000OO )
except :
IIIIIiiIII = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( IIIIIiiIII ) )
return ( False )
if 40 - 40: OoO0O00 * o0oOOo0O0Ooo / i1IIi * I1Ii111 * I1ii11iIi11i
if 45 - 45: iII111i / Oo0Ooo - ooOoO0o . iII111i * OoOoOO00 / OoooooooOO
if 66 - 66: I1IiiI
if 45 - 45: II111iiii * I1Ii111 - II111iiii / I1IiiI % oO0o
if 83 - 83: oO0o % OoO0O00 + I1ii11iIi11i / OoooooooOO % iII111i
if 22 - 22: I1Ii111
if 41 - 41: O0 * i1IIi
if 89 - 89: iIii1I11I1II1 . I11i % I1ii11iIi11i + II111iiii . OoO0O00
if 5 - 5: I1ii11iIi11i / I1IiiI . iII111i
if 7 - 7: Ii1I
if 62 - 62: I1ii11iIi11i + IiII . O0 - OoooooooOO * o0oOOo0O0Ooo % O0
try :
O0o0O00O0 = iii11 . verify ( IiiiI1I1i , ii1Ii111I11 , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( ii1Ii111I11 ) )
if 63 - 63: OOooOOo + iII111i - IiII - I1IiiI % IiII . OoO0O00
lprint ( " Signature used '{}'" . format ( o00 ) )
return ( False )
if 73 - 73: OoOoOO00
return ( O0o0O00O0 )
if 47 - 47: oO0o
if 17 - 17: IiII
if 47 - 47: I11i . I1IiiI % ooOoO0o . i11iIiiIii
if 63 - 63: I1ii11iIi11i % I11i % OoooooooOO
if 100 - 100: O0
if 9 - 9: Ii1I
if 87 - 87: I1IiiI
if 56 - 56: OOooOOo % oO0o - OoOoOO00
if 27 - 27: I1ii11iIi11i - IiII * OoooooooOO * I1ii11iIi11i + i11iIiiIii . IiII
if 81 - 81: oO0o / iIii1I11I1II1
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 15 - 15: Ii1I + I1IiiI . OOooOOo / OoooooooOO + I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
if 88 - 88: oO0o
if 77 - 77: ooOoO0o + I1Ii111 . OoOoOO00
IiI11i1ii = [ ]
for iI1III1iii in eid_list :
for ii111i1 in lisp_map_notify_queue :
iiIi11I = lisp_map_notify_queue [ ii111i1 ]
if ( iI1III1iii not in iiIi11I . eid_list ) : continue
if 79 - 79: i11iIiiIii * IiII
IiI11i1ii . append ( ii111i1 )
O0O0 = iiIi11I . retransmit_timer
if ( O0O0 ) : O0O0 . cancel ( )
if 58 - 58: i11iIiiIii % Ii1I + Oo0Ooo - OoOoOO00 - i11iIiiIii / O0
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( iiIi11I . nonce_key , green ( iI1III1iii , False ) ) )
if 36 - 36: OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
if 79 - 79: oO0o - iII111i
if 34 - 34: OoooooooOO + Ii1I - iII111i + OoooooooOO / I1IiiI
for ii111i1 in IiI11i1ii : lisp_map_notify_queue . pop ( ii111i1 )
return
if 39 - 39: o0oOOo0O0Ooo . i1IIi * OoO0O00 / II111iiii / I1ii11iIi11i * OOooOOo
if 39 - 39: O0 . OOooOOo
if 95 - 95: I11i
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if 8 - 8: I1ii11iIi11i
if 100 - 100: OoooooooOO / I11i - Ii1I
if 11 - 11: OoO0O00
if 20 - 20: Oo0Ooo
def lisp_decrypt_map_register ( packet ) :
if 34 - 34: I1Ii111 % i11iIiiIii / oO0o - i1IIi . o0oOOo0O0Ooo / oO0o
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
if 1 - 1: I1ii11iIi11i
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
if 81 - 81: iII111i % IiII / I11i
iIiI1I1II1 = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
i11i11i = ( iIiI1I1II1 >> 13 ) & 0x1
if ( i11i11i == 0 ) : return ( packet )
if 93 - 93: I1IiiI
o00O = ( iIiI1I1II1 >> 14 ) & 0x7
if 49 - 49: I11i + iIii1I11I1II1 / Ii1I . iII111i . OoOoOO00 * OoOoOO00
if 60 - 60: i1IIi % ooOoO0o . OOooOOo + i11iIiiIii / O0
if 69 - 69: O0
if 53 - 53: I1IiiI % IiII % OoOoOO00
try :
Iiio0oO = lisp_ms_encryption_keys [ o00O ]
Iiio0oO = Iiio0oO . zfill ( 32 )
O0o = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( o00O ) )
return ( None )
if 87 - 87: I1ii11iIi11i + I1ii11iIi11i
if 1 - 1: i11iIiiIii . iII111i * OoOoOO00
oOo0OOOOOO = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( oOo0OOOOOO , o00O ) )
if 66 - 66: i1IIi / IiII
IiIIIIi = chacha . ChaCha ( Iiio0oO , O0o ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + IiIIIIi )
if 17 - 17: O0 - OOooOOo
if 96 - 96: OOooOOo * I1ii11iIi11i
if 85 - 85: O0 / II111iiii * O0 - iII111i % i11iIiiIii
if 47 - 47: OoOoOO00
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
if 15 - 15: O0 + o0oOOo0O0Ooo / oO0o
if 27 - 27: Ii1I * II111iiii / oO0o
if 99 - 99: I11i + ooOoO0o % I11i + O0 - Ii1I - I1Ii111
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 3 - 3: Oo0Ooo . I1IiiI
OOoO00o0o = lisp_map_register ( )
IIiIiIii11I1 , packet = OOoO00o0o . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 99 - 99: iII111i . oO0o + II111iiii % O0
OOoO00o0o . sport = sport
if 40 - 40: iIii1I11I1II1
OOoO00o0o . print_map_register ( )
if 64 - 64: ooOoO0o * OOooOOo % o0oOOo0O0Ooo + I11i
if 64 - 64: Ii1I - iIii1I11I1II1 . iII111i . ooOoO0o * O0
if 3 - 3: I1IiiI % II111iiii
if 38 - 38: Ii1I / I11i
O00 = True
if ( OOoO00o0o . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
O00 = True
if 94 - 94: II111iiii . Oo0Ooo - ooOoO0o
if ( OOoO00o0o . alg_id == LISP_SHA_256_128_ALG_ID ) :
O00 = False
if 97 - 97: oO0o
if 90 - 90: Oo0Ooo % ooOoO0o + I1Ii111 + OoO0O00 . II111iiii . OoO0O00
if 10 - 10: I1ii11iIi11i - II111iiii * o0oOOo0O0Ooo . OoO0O00 / i11iIiiIii / iII111i
if 42 - 42: O0 . OoooooooOO + Oo0Ooo
if 34 - 34: OOooOOo / I11i / OoooooooOO + i11iIiiIii / II111iiii - O0
Ii1oOOOOo00 = [ ]
if 62 - 62: I1Ii111 . ooOoO0o % I1ii11iIi11i . ooOoO0o - iIii1I11I1II1 + iII111i
if 79 - 79: II111iiii / I1Ii111 + II111iiii + Oo0Ooo - IiII / I1ii11iIi11i
if 93 - 93: OOooOOo
if 65 - 65: i1IIi * ooOoO0o * OoooooooOO - i11iIiiIii + IiII - o0oOOo0O0Ooo
iI1iI1 = None
iiOO0o0o00 = packet
O000 = [ ]
ii1i = OOoO00o0o . record_count
for Ii11 in range ( ii1i ) :
OoOO = lisp_eid_record ( )
iI11iII1IiiI = lisp_rloc_record ( )
packet = OoOO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 89 - 89: i11iIiiIii + i1IIi + OOooOOo . O0 / o0oOOo0O0Ooo - i11iIiiIii
OoOO . print_record ( " " , False )
if 74 - 74: OoO0O00 + OOooOOo . IiII . iIii1I11I1II1
if 2 - 2: OoOoOO00
if 11 - 11: ooOoO0o - I1Ii111 / I1IiiI
if 94 - 94: I1ii11iIi11i * ooOoO0o
Iiii1IIIiIi = lisp_site_eid_lookup ( OoOO . eid , OoOO . group ,
False )
if 12 - 12: Ii1I - OoOoOO00
o0O = Iiii1IIIiIi . print_eid_tuple ( ) if Iiii1IIIiIi else None
if 75 - 75: oO0o + OoOoOO00 - OoooooooOO
if 38 - 38: I11i / ooOoO0o / OoOoOO00 * OOooOOo . oO0o
if 8 - 8: OoO0O00 . OOooOOo % I1Ii111 * OOooOOo / I1IiiI
if 3 - 3: IiII - I1ii11iIi11i . o0oOOo0O0Ooo
if 39 - 39: oO0o . I1Ii111 + oO0o % OoOoOO00 - i11iIiiIii
if 69 - 69: I11i / OoO0O00
if 73 - 73: i11iIiiIii / i1IIi
if ( Iiii1IIIiIi and Iiii1IIIiIi . accept_more_specifics == False ) :
if ( Iiii1IIIiIi . eid_record_matches ( OoOO ) == False ) :
IiI1 = Iiii1IIIiIi . parent_for_more_specifics
if ( IiI1 ) : Iiii1IIIiIi = IiI1
if 72 - 72: iIii1I11I1II1 % iIii1I11I1II1 . OoOoOO00 * OoooooooOO * OoO0O00
if 26 - 26: Ii1I * I1IiiI % ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
if 89 - 89: i11iIiiIii - II111iiii
if 67 - 67: IiII % I1Ii111 + i11iIiiIii
if 53 - 53: OOooOOo
if 95 - 95: oO0o - OOooOOo % I1Ii111 / OoooooooOO % OoooooooOO - O0
I1o0oO0Oo00Oo = ( Iiii1IIIiIi and Iiii1IIIiIi . accept_more_specifics )
if ( I1o0oO0Oo00Oo ) :
oOo0O0 = lisp_site_eid ( Iiii1IIIiIi . site )
oOo0O0 . dynamic = True
oOo0O0 . eid . copy_address ( OoOO . eid )
oOo0O0 . group . copy_address ( OoOO . group )
oOo0O0 . parent_for_more_specifics = Iiii1IIIiIi
oOo0O0 . add_cache ( )
oOo0O0 . inherit_from_ams_parent ( )
Iiii1IIIiIi . more_specific_registrations . append ( oOo0O0 )
Iiii1IIIiIi = oOo0O0
else :
Iiii1IIIiIi = lisp_site_eid_lookup ( OoOO . eid , OoOO . group ,
True )
if 28 - 28: O0
if 29 - 29: I11i - OOooOOo / OoO0O00
oOoo0OooOOo00 = OoOO . print_eid_tuple ( )
if 81 - 81: I11i / oO0o
if ( Iiii1IIIiIi == None ) :
iIiiiiiiI1II = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( iIiiiiiiI1II , green ( oOoo0OooOOo00 , False ) ,
", matched non-ams {}" . format ( green ( o0O , False ) if o0O else "" ) ) )
if 89 - 89: OoOoOO00
if 70 - 70: Oo0Ooo - OOooOOo * OOooOOo / o0oOOo0O0Ooo
if 4 - 4: OoOoOO00 / OoO0O00
if 66 - 66: I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . i11iIiiIii - OoooooooOO
packet = iI11iII1IiiI . end_of_rlocs ( packet , OoOO . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 92 - 92: O0 - i11iIiiIii + OoO0O00 - OoooooooOO - o0oOOo0O0Ooo
continue
if 25 - 25: oO0o / oO0o / Ii1I / O0
if 56 - 56: ooOoO0o
iI1iI1 = Iiii1IIIiIi . site
if 19 - 19: O0 * I1IiiI + I1ii11iIi11i
if ( I1o0oO0Oo00Oo ) :
ooo0OO = Iiii1IIIiIi . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( ooo0OO , False ) , iI1iI1 . site_name , green ( oOoo0OooOOo00 , False ) ) )
if 25 - 25: I11i - ooOoO0o / OoO0O00 / iII111i - OoO0O00
else :
ooo0OO = green ( Iiii1IIIiIi . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( ooo0OO , iI1iI1 . site_name , green ( oOoo0OooOOo00 , False ) ) )
if 86 - 86: OoO0O00
if 89 - 89: OoooooooOO % iII111i * I1ii11iIi11i + I1ii11iIi11i . Oo0Ooo
if 4 - 4: I11i
if 8 - 8: IiII
if 1 - 1: ooOoO0o . IiII
if 4 - 4: iIii1I11I1II1 % I1IiiI - OoooooooOO / iII111i
if ( iI1iI1 . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( iI1iI1 . site_name ) )
packet = iI11iII1IiiI . end_of_rlocs ( packet , OoOO . rloc_count )
continue
if 55 - 55: O0 + iII111i * OoOoOO00 . i11iIiiIii * Ii1I + oO0o
if 66 - 66: i1IIi . I1ii11iIi11i
if 86 - 86: Oo0Ooo
if 48 - 48: OoO0O00
if 55 - 55: OoO0O00 * i1IIi * I11i / iII111i
if 42 - 42: IiII
if 28 - 28: OoOoOO00 + OoOoOO00
if 53 - 53: II111iiii % i1IIi + ooOoO0o . I1Ii111
I1o0 = OOoO00o0o . key_id
if ( iI1iI1 . auth_key . has_key ( I1o0 ) == False ) : I1o0 = 0
Oo00oO0 = iI1iI1 . auth_key [ I1o0 ]
if 4 - 4: II111iiii
iIiiIII1IIiI = lisp_verify_auth ( IIiIiIii11I1 , OOoO00o0o . alg_id ,
OOoO00o0o . auth_data , Oo00oO0 )
i1ii = "dynamic " if Iiii1IIIiIi . dynamic else ""
if 30 - 30: ooOoO0o % I11i
o0OOo0o0 = bold ( "passed" if iIiiIII1IIiI else "failed" , False )
I1o0 = "key-id {}" . format ( I1o0 ) if I1o0 == OOoO00o0o . key_id else "bad key-id {}" . format ( OOoO00o0o . key_id )
if 4 - 4: oO0o / OoO0O00
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( o0OOo0o0 , i1ii , green ( oOoo0OooOOo00 , False ) , I1o0 ) )
if 90 - 90: I11i . IiII / OoO0O00 . IiII
if 62 - 62: i11iIiiIii * I11i + oO0o - i1IIi
if 9 - 9: I1IiiI
if 17 - 17: II111iiii + i11iIiiIii + IiII
if 41 - 41: OoOoOO00 + i1IIi - iIii1I11I1II1
if 8 - 8: I1Ii111
II = True
IIII1iiI1111I = ( lisp_get_eid_hash ( OoOO . eid ) != None )
if ( IIII1iiI1111I or Iiii1IIIiIi . require_signature ) :
OOo0ooOO = "Required " if Iiii1IIIiIi . require_signature else ""
oOoo0OooOOo00 = green ( oOoo0OooOOo00 , False )
OoOOo = lisp_find_sig_in_rloc_set ( packet , OoOO . rloc_count )
if ( OoOOo == None ) :
II = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( OOo0ooOO ,
# O0 . Ii1I / iII111i * ooOoO0o * I1IiiI
bold ( "failed" , False ) , oOoo0OooOOo00 ) )
else :
II = lisp_verify_cga_sig ( OoOO . eid , OoOOo )
o0OOo0o0 = bold ( "passed" if II else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( OOo0ooOO , o0OOo0o0 , oOoo0OooOOo00 ) )
if 34 - 34: Ii1I / OoooooooOO + OoooooooOO % OoooooooOO . IiII
if 55 - 55: I11i / I1ii11iIi11i * O0 + IiII % I11i
if 69 - 69: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO - ooOoO0o
if 94 - 94: iIii1I11I1II1 / Oo0Ooo % IiII * IiII
if ( iIiiIII1IIiI == False or II == False ) :
packet = iI11iII1IiiI . end_of_rlocs ( packet , OoOO . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 62 - 62: I11i . IiII - OOooOOo - I1Ii111 / OoooooooOO . Ii1I
continue
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
if 94 - 94: oO0o
if 95 - 95: ooOoO0o * O0 + OOooOOo
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if ( OOoO00o0o . merge_register_requested ) :
IiI1 = Iiii1IIIiIi
IiI1 . inconsistent_registration = False
if 81 - 81: oO0o
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if ( Iiii1IIIiIi . group . is_null ( ) ) :
if ( IiI1 . site_id != OOoO00o0o . site_id ) :
IiI1 . site_id = OOoO00o0o . site_id
IiI1 . registered = False
IiI1 . individual_registrations = { }
IiI1 . registered_rlocs = [ ]
lisp_registered_count -= 1
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
iii11 = source . address + OOoO00o0o . xtr_id
if ( Iiii1IIIiIi . individual_registrations . has_key ( iii11 ) ) :
Iiii1IIIiIi = Iiii1IIIiIi . individual_registrations [ iii11 ]
else :
Iiii1IIIiIi = lisp_site_eid ( iI1iI1 )
Iiii1IIIiIi . eid . copy_address ( IiI1 . eid )
Iiii1IIIiIi . group . copy_address ( IiI1 . group )
IiI1 . individual_registrations [ iii11 ] = Iiii1IIIiIi
if 32 - 32: oO0o
else :
Iiii1IIIiIi . inconsistent_registration = Iiii1IIIiIi . merge_register_requested
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
if 58 - 58: iII111i / ooOoO0o - I1Ii111 + I1Ii111 * ooOoO0o
Iiii1IIIiIi . map_registers_received += 1
if 48 - 48: iII111i % O0 % Ii1I * OoO0O00 . OoO0O00
if 74 - 74: OoO0O00 * i1IIi + I1ii11iIi11i / o0oOOo0O0Ooo / i1IIi
if 94 - 94: Ii1I
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
IIIIIiiIII = ( Iiii1IIIiIi . is_rloc_in_rloc_set ( source ) == False )
if ( OoOO . record_ttl == 0 and IIIIIiiIII ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 34 - 34: iIii1I11I1II1
continue
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
if 70 - 70: OoO0O00
if 42 - 42: OoooooooOO - I1Ii111 + I1ii11iIi11i * iII111i * iII111i / OoO0O00
ooooo0000000oOoo = Iiii1IIIiIi . registered_rlocs
Iiii1IIIiIi . registered_rlocs = [ ]
if 54 - 54: OOooOOo
if 88 - 88: OoooooooOO / iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
O0Ooo0 = packet
for O0o0o00O in range ( OoOO . rloc_count ) :
iI11iII1IiiI = lisp_rloc_record ( )
packet = iI11iII1IiiI . decode ( packet , None )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 95 - 95: I11i - oO0o - OOooOOo * ooOoO0o % I1IiiI
iI11iII1IiiI . print_record ( " " )
if 82 - 82: oO0o / ooOoO0o
if 43 - 43: IiII - oO0o % ooOoO0o + Ii1I . Ii1I
if 100 - 100: Ii1I % iII111i
if 25 - 25: OoOoOO00 % O0 / I1IiiI * IiII + IiII
if ( len ( iI1iI1 . allowed_rlocs ) > 0 ) :
I1iiIiiii1111 = iI11iII1IiiI . rloc . print_address ( )
if ( iI1iI1 . allowed_rlocs . has_key ( I1iiIiiii1111 ) == False ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( I1iiIiiii1111 , False ) ) )
if 14 - 14: OOooOOo % I1IiiI
if 27 - 27: O0 . OOooOOo - iIii1I11I1II1 - Ii1I - I1IiiI
Iiii1IIIiIi . registered = False
packet = iI11iII1IiiI . end_of_rlocs ( packet ,
OoOO . rloc_count - O0o0o00O - 1 )
break
if 60 - 60: I1IiiI + Ii1I
if 24 - 24: I1IiiI / ooOoO0o
if 60 - 60: Ii1I * o0oOOo0O0Ooo
if 69 - 69: I1ii11iIi11i . OoooooooOO
if 92 - 92: Oo0Ooo . ooOoO0o * i1IIi - I1IiiI * OoooooooOO
if 3 - 3: Ii1I
OoOOo = lisp_rloc ( )
OoOOo . store_rloc_from_record ( iI11iII1IiiI , None , source )
if 64 - 64: OoooooooOO / IiII - IiII . Ii1I % Oo0Ooo
if 35 - 35: iII111i * I1IiiI * Oo0Ooo + I1Ii111 + i1IIi - ooOoO0o
if 23 - 23: II111iiii - O0
if 58 - 58: o0oOOo0O0Ooo * OoO0O00 + OoO0O00
if 93 - 93: IiII - I1ii11iIi11i % I11i + i1IIi % OoO0O00
if 20 - 20: oO0o . Oo0Ooo + IiII - II111iiii % Ii1I
if ( source . is_exact_match ( OoOOo . rloc ) ) :
OoOOo . map_notify_requested = OOoO00o0o . map_notify_requested
if 64 - 64: Ii1I % OoO0O00 + OOooOOo % OoOoOO00 + IiII
if 92 - 92: iII111i * Oo0Ooo - OoOoOO00
if 33 - 33: i11iIiiIii - OoOoOO00 . OOooOOo * II111iiii . Ii1I
if 59 - 59: OoOoOO00
if 29 - 29: iII111i - II111iiii * OoooooooOO * OoooooooOO
Iiii1IIIiIi . registered_rlocs . append ( OoOOo )
if 15 - 15: IiII / OOooOOo / iIii1I11I1II1 / OoOoOO00
if 91 - 91: i11iIiiIii % O0 . Oo0Ooo / I1Ii111
OOoO0 = ( Iiii1IIIiIi . do_rloc_sets_match ( ooooo0000000oOoo ) == False )
if 78 - 78: II111iiii - i11iIiiIii . OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
if 9 - 9: O0 / I1ii11iIi11i . iII111i . O0 + IiII % I11i
if 27 - 27: i11iIiiIii - I1ii11iIi11i / O0 - i1IIi + I1IiiI * iII111i
if 26 - 26: Oo0Ooo . Ii1I
if ( OOoO00o0o . map_register_refresh and OOoO0 and
Iiii1IIIiIi . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
Iiii1IIIiIi . registered_rlocs = ooooo0000000oOoo
continue
if 7 - 7: OoOoOO00 - o0oOOo0O0Ooo + oO0o
if 8 - 8: iIii1I11I1II1
if 6 - 6: oO0o
if 51 - 51: I1Ii111 - o0oOOo0O0Ooo
if 5 - 5: O0
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
if ( Iiii1IIIiIi . registered == False ) :
Iiii1IIIiIi . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 63 - 63: I1ii11iIi11i + iII111i * i1IIi
Iiii1IIIiIi . last_registered = lisp_get_timestamp ( )
Iiii1IIIiIi . registered = ( OoOO . record_ttl != 0 )
Iiii1IIIiIi . last_registerer = source
if 63 - 63: I1ii11iIi11i / II111iiii % oO0o + ooOoO0o . Ii1I % I11i
if 59 - 59: I1Ii111 % o0oOOo0O0Ooo - I1IiiI * i1IIi
if 5 - 5: I1IiiI
if 22 - 22: II111iiii / iII111i
Iiii1IIIiIi . auth_sha1_or_sha2 = O00
Iiii1IIIiIi . proxy_reply_requested = OOoO00o0o . proxy_reply_requested
Iiii1IIIiIi . lisp_sec_present = OOoO00o0o . lisp_sec_present
Iiii1IIIiIi . map_notify_requested = OOoO00o0o . map_notify_requested
Iiii1IIIiIi . mobile_node_requested = OOoO00o0o . mobile_node
Iiii1IIIiIi . merge_register_requested = OOoO00o0o . merge_register_requested
if 18 - 18: i11iIiiIii * ooOoO0o . I1IiiI + i1IIi + I11i
Iiii1IIIiIi . use_register_ttl_requested = OOoO00o0o . use_ttl_for_timeout
if ( Iiii1IIIiIi . use_register_ttl_requested ) :
Iiii1IIIiIi . register_ttl = OoOO . store_ttl ( )
else :
Iiii1IIIiIi . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 62 - 62: O0 % o0oOOo0O0Ooo + iIii1I11I1II1 + iIii1I11I1II1 * ooOoO0o
Iiii1IIIiIi . xtr_id_present = OOoO00o0o . xtr_id_present
if ( Iiii1IIIiIi . xtr_id_present ) :
Iiii1IIIiIi . xtr_id = OOoO00o0o . xtr_id
Iiii1IIIiIi . site_id = OOoO00o0o . site_id
if 21 - 21: o0oOOo0O0Ooo % O0
if 81 - 81: i1IIi + i1IIi
if 3 - 3: I1Ii111 . I1ii11iIi11i * iII111i * i11iIiiIii * IiII
if 52 - 52: iIii1I11I1II1 % o0oOOo0O0Ooo % I1IiiI
if 71 - 71: I1IiiI + iII111i
if ( OOoO00o0o . merge_register_requested ) :
if ( IiI1 . merge_in_site_eid ( Iiii1IIIiIi ) ) :
Ii1oOOOOo00 . append ( [ OoOO . eid , OoOO . group ] )
if 47 - 47: iIii1I11I1II1 . OoO0O00 . iIii1I11I1II1
if ( OOoO00o0o . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , IiI1 , OOoO00o0o ,
OoOO )
if 57 - 57: IiII * ooOoO0o * ooOoO0o * iIii1I11I1II1 * I1Ii111 + OoOoOO00
if 83 - 83: OoOoOO00 . Oo0Ooo . OoO0O00
if 65 - 65: iII111i * iIii1I11I1II1
if ( OOoO0 == False ) : continue
if ( len ( Ii1oOOOOo00 ) != 0 ) : continue
if 48 - 48: iII111i * OoO0O00
O000 . append ( Iiii1IIIiIi . print_eid_tuple ( ) )
if 57 - 57: ooOoO0o + I1IiiI
if 32 - 32: I1ii11iIi11i + OOooOOo - I11i
if 82 - 82: Oo0Ooo % Oo0Ooo
if 91 - 91: I11i
if 98 - 98: I11i - II111iiii . IiII % Oo0Ooo
if 65 - 65: OoO0O00
if 65 - 65: oO0o
OoOO = OoOO . encode ( )
OoOO += O0Ooo0
OOoOO00000Oo = [ Iiii1IIIiIi . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 77 - 77: I11i * i1IIi - OOooOOo / OoOoOO00
for OoOOo in ooooo0000000oOoo :
if ( OoOOo . map_notify_requested == False ) : continue
if ( OoOOo . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , OoOO , OOoOO00000Oo , 1 , OoOOo . rloc ,
LISP_CTRL_PORT , OOoO00o0o . nonce , OOoO00o0o . key_id ,
OOoO00o0o . alg_id , OOoO00o0o . auth_len , iI1iI1 , False )
if 50 - 50: O0 - oO0o . oO0o
if 98 - 98: IiII % Ii1I / Ii1I
if 10 - 10: Ii1I
if 69 - 69: I1Ii111 * OoooooooOO . o0oOOo0O0Ooo % I1IiiI
if 70 - 70: iII111i . i11iIiiIii * I1Ii111
lisp_notify_subscribers ( lisp_sockets , OoOO , Iiii1IIIiIi . eid , iI1iI1 )
if 54 - 54: o0oOOo0O0Ooo . i1IIi / iII111i
if 21 - 21: O0 + ooOoO0o
if 53 - 53: Ii1I - II111iiii * iIii1I11I1II1
if 91 - 91: OoOoOO00 % iIii1I11I1II1
if 81 - 81: i11iIiiIii / OoOoOO00 + iIii1I11I1II1
if ( len ( Ii1oOOOOo00 ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , Ii1oOOOOo00 )
if 65 - 65: o0oOOo0O0Ooo
if 73 - 73: I11i . I1ii11iIi11i - OoO0O00 + OoooooooOO
if 71 - 71: I1IiiI
if 27 - 27: OoO0O00 + i1IIi * OoooooooOO * iIii1I11I1II1 - Ii1I
if 85 - 85: OoO0O00 + II111iiii / OoO0O00 . II111iiii * OoOoOO00 * I1IiiI
if 19 - 19: iII111i / Ii1I + iIii1I11I1II1 * O0 - Oo0Ooo
if ( OOoO00o0o . merge_register_requested ) : return
if 47 - 47: iIii1I11I1II1 % I1ii11iIi11i
if 33 - 33: oO0o . oO0o / IiII + II111iiii
if 34 - 34: OoO0O00 . OoOoOO00 / i1IIi / OOooOOo
if 12 - 12: o0oOOo0O0Ooo . Oo0Ooo / II111iiii
if 18 - 18: I1Ii111 % II111iiii + Ii1I * Oo0Ooo - OoooooooOO . Oo0Ooo
if ( OOoO00o0o . map_notify_requested and iI1iI1 != None ) :
lisp_build_map_notify ( lisp_sockets , iiOO0o0o00 , O000 ,
OOoO00o0o . record_count , source , sport , OOoO00o0o . nonce ,
OOoO00o0o . key_id , OOoO00o0o . alg_id , OOoO00o0o . auth_len ,
iI1iI1 , True )
if 25 - 25: OoO0O00
return
if 83 - 83: II111iiii . iIii1I11I1II1
if 77 - 77: O0 . OoOoOO00 % oO0o / OOooOOo
if 8 - 8: iII111i - i1IIi
if 81 - 81: ooOoO0o / OOooOOo % OoOoOO00 . iIii1I11I1II1
if 45 - 45: I1IiiI . ooOoO0o - OoooooooOO
if 84 - 84: I1ii11iIi11i
if 69 - 69: I1Ii111 + II111iiii
if 92 - 92: OoooooooOO
if 80 - 80: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . oO0o % I1IiiI % I11i
if 4 - 4: OoO0O00 / iII111i / I1ii11iIi11i - o0oOOo0O0Ooo * I1Ii111
def lisp_process_multicast_map_notify ( packet , source ) :
iiIi11I = lisp_map_notify ( "" )
packet = iiIi11I . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 24 - 24: OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - o0oOOo0O0Ooo . I1ii11iIi11i
if 2 - 2: I1IiiI . o0oOOo0O0Ooo / Oo0Ooo - OoOoOO00 - OoooooooOO
iiIi11I . print_notify ( )
if ( iiIi11I . record_count == 0 ) : return
if 73 - 73: I1Ii111 . i11iIiiIii * ooOoO0o . IiII - I11i + I1Ii111
I1IiiI1 = iiIi11I . eid_records
if 7 - 7: OoO0O00 - OOooOOo * I11i . oO0o
for Ii11 in range ( iiIi11I . record_count ) :
OoOO = lisp_eid_record ( )
I1IiiI1 = OoOO . decode ( I1IiiI1 )
if ( packet == None ) : return
OoOO . print_record ( " " , False )
if 17 - 17: iII111i - OOooOOo * I1IiiI + i1IIi % I1ii11iIi11i
if 71 - 71: Ii1I - o0oOOo0O0Ooo - oO0o
if 27 - 27: O0 - iIii1I11I1II1
if 78 - 78: Oo0Ooo / o0oOOo0O0Ooo
IIII = lisp_map_cache_lookup ( OoOO . eid , OoOO . group )
if ( IIII == None ) :
IIII = lisp_mapping ( OoOO . eid , OoOO . group , [ ] )
IIII . add_cache ( )
if 35 - 35: o0oOOo0O0Ooo . OoO0O00 / o0oOOo0O0Ooo / IiII - I1ii11iIi11i . Oo0Ooo
if 97 - 97: i11iIiiIii + I1ii11iIi11i - I11i . oO0o
IIII . mapping_source = None if source == "lisp-etr" else source
IIII . map_cache_ttl = OoOO . store_ttl ( )
if 76 - 76: IiII * II111iiii * I1ii11iIi11i + OoooooooOO - OoOoOO00 . Ii1I
if 51 - 51: II111iiii % I1Ii111 * O0 . ooOoO0o * OoOoOO00
if 17 - 17: I1IiiI % I11i
if 28 - 28: I1ii11iIi11i * OoooooooOO
if 19 - 19: Oo0Ooo - iII111i % OoOoOO00 * i11iIiiIii / oO0o . i11iIiiIii
if ( len ( IIII . rloc_set ) != 0 and OoOO . rloc_count == 0 ) :
IIII . rloc_set = [ ]
IIII . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , IIII )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( IIII . print_eid_tuple ( ) , False ) ) )
if 46 - 46: I1ii11iIi11i
continue
if 50 - 50: OOooOOo * OoO0O00 * OOooOOo % I1IiiI - I1Ii111 * Ii1I
if 88 - 88: OOooOOo . iII111i / I11i
ii1Ii1iI1 = IIII . rtrs_in_rloc_set ( )
if 79 - 79: ooOoO0o . I1ii11iIi11i + IiII . iIii1I11I1II1 + OOooOOo
if 79 - 79: I1Ii111
if 81 - 81: OoooooooOO + OoOoOO00 / II111iiii
if 39 - 39: I1Ii111 * I1IiiI - o0oOOo0O0Ooo . oO0o . OOooOOo * i11iIiiIii
if 70 - 70: OoOoOO00 / OOooOOo - o0oOOo0O0Ooo
for O0o0o00O in range ( OoOO . rloc_count ) :
iI11iII1IiiI = lisp_rloc_record ( )
I1IiiI1 = iI11iII1IiiI . decode ( I1IiiI1 , None )
iI11iII1IiiI . print_record ( " " )
if ( OoOO . group . is_null ( ) ) : continue
if ( iI11iII1IiiI . rle == None ) : continue
if 82 - 82: OOooOOo . i11iIiiIii . I1ii11iIi11i % OoOoOO00 * Ii1I / OoO0O00
if 56 - 56: o0oOOo0O0Ooo / I1IiiI + I11i + I1IiiI
if 34 - 34: Oo0Ooo / i11iIiiIii - ooOoO0o
if 77 - 77: OoOoOO00 * OoooooooOO
if 41 - 41: iIii1I11I1II1 - O0 . II111iiii + I1IiiI - II111iiii / oO0o
I1iIii1Ii = IIII . rloc_set [ 0 ] . stats if len ( IIII . rloc_set ) != 0 else None
if 75 - 75: i11iIiiIii / oO0o
if 34 - 34: O0
if 11 - 11: o0oOOo0O0Ooo . IiII + OOooOOo
if 35 - 35: I1ii11iIi11i . OOooOOo * I1Ii111 / OoooooooOO
OoOOo = lisp_rloc ( )
OoOOo . store_rloc_from_record ( iI11iII1IiiI , None , IIII . mapping_source )
if ( I1iIii1Ii != None ) : OoOOo . stats = copy . deepcopy ( I1iIii1Ii )
if 8 - 8: ooOoO0o + O0 + IiII - Oo0Ooo % OOooOOo
if ( ii1Ii1iI1 and OoOOo . is_rtr ( ) == False ) : continue
if 47 - 47: O0 / oO0o / I1ii11iIi11i . OoooooooOO / II111iiii . OOooOOo
IIII . rloc_set = [ OoOOo ]
IIII . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , IIII )
if 58 - 58: oO0o / ooOoO0o
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( IIII . print_eid_tuple ( ) , False ) , OoOOo . rle . print_rle ( False ) ) )
if 31 - 31: o0oOOo0O0Ooo % I11i - OoO0O00
if 40 - 40: o0oOOo0O0Ooo % OoOoOO00 + I11i / O0 - II111iiii
if 9 - 9: OoooooooOO - OOooOOo . I11i * oO0o
return
if 3 - 3: iIii1I11I1II1 - OoO0O00
if 38 - 38: O0 + ooOoO0o * I1Ii111 - oO0o * o0oOOo0O0Ooo
if 97 - 97: Oo0Ooo - O0 * OoooooooOO
if 52 - 52: i1IIi + IiII
if 11 - 11: I1IiiI % iIii1I11I1II1 * Ii1I % ooOoO0o
if 33 - 33: iII111i / O0 % II111iiii % OoOoOO00 / I1Ii111
if 77 - 77: OoOoOO00 % I1IiiI % II111iiii * iII111i . OoOoOO00 / O0
if 21 - 21: ooOoO0o - I11i . i11iIiiIii
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
iiIi11I = lisp_map_notify ( "" )
i1II1IiiIi = iiIi11I . decode ( orig_packet )
if ( i1II1IiiIi == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 39 - 39: Oo0Ooo * II111iiii % OOooOOo / oO0o . ooOoO0o
if 75 - 75: I11i / O0 + OoooooooOO + OOooOOo % iII111i + I1IiiI
iiIi11I . print_notify ( )
if 10 - 10: II111iiii * I11i - IiII * iIii1I11I1II1 . OoooooooOO
if 39 - 39: I11i . I1IiiI % Oo0Ooo + oO0o
if 76 - 76: I1IiiI * OoooooooOO - i11iIiiIii / I11i / Oo0Ooo
if 82 - 82: IiII % ooOoO0o
if 100 - 100: Oo0Ooo . oO0o - iII111i + OoooooooOO
IiIIi1I1I11Ii = source . print_address ( )
if ( iiIi11I . alg_id != 0 or iiIi11I . auth_len != 0 ) :
oooO0OOo0O0O0 = None
for iii11 in lisp_map_servers_list :
if ( iii11 . find ( IiIIi1I1I11Ii ) == - 1 ) : continue
oooO0OOo0O0O0 = lisp_map_servers_list [ iii11 ]
if 27 - 27: Oo0Ooo . I1Ii111 - i1IIi * I1IiiI
if ( oooO0OOo0O0O0 == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( IiIIi1I1I11Ii ) )
if 96 - 96: I1ii11iIi11i - Ii1I . I1ii11iIi11i
return
if 89 - 89: II111iiii % I1ii11iIi11i % IiII . I11i
if 49 - 49: iII111i % i11iIiiIii * I11i - oO0o . OOooOOo . i11iIiiIii
oooO0OOo0O0O0 . map_notifies_received += 1
if 26 - 26: iIii1I11I1II1 + i11iIiiIii % iII111i + I1IiiI + oO0o - ooOoO0o
iIiiIII1IIiI = lisp_verify_auth ( i1II1IiiIi , iiIi11I . alg_id ,
iiIi11I . auth_data , oooO0OOo0O0O0 . password )
if 4 - 4: Oo0Ooo - IiII - I11i
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if iIiiIII1IIiI else "failed" ) )
if 72 - 72: OoooooooOO
if ( iIiiIII1IIiI == False ) : return
else :
oooO0OOo0O0O0 = lisp_ms ( IiIIi1I1I11Ii , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 19 - 19: Oo0Ooo . OOooOOo
if 58 - 58: IiII % iII111i + i1IIi % I1IiiI % OOooOOo . iII111i
if 85 - 85: i11iIiiIii . o0oOOo0O0Ooo * iII111i . I1ii11iIi11i / I1Ii111 % Ii1I
if 27 - 27: II111iiii . iIii1I11I1II1 / I1ii11iIi11i / i1IIi / iIii1I11I1II1
if 70 - 70: i11iIiiIii . OoO0O00 / OoooooooOO * OoooooooOO - OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
I1IiiI1 = iiIi11I . eid_records
if ( iiIi11I . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , I1IiiI1 , iiIi11I , oooO0OOo0O0O0 )
return
if 39 - 39: OoO0O00 + IiII - II111iiii % I11i
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
if 87 - 87: I1Ii111 + O0 / I1ii11iIi11i / OoOoOO00 . Oo0Ooo - IiII
if 24 - 24: OoOoOO00
if 19 - 19: ooOoO0o
if 43 - 43: O0 . I1Ii111 % OoooooooOO / I1IiiI . o0oOOo0O0Ooo - OoOoOO00
if 46 - 46: I11i - OoooooooOO % o0oOOo0O0Ooo
if 7 - 7: OoooooooOO - I1Ii111 * IiII
OoOO = lisp_eid_record ( )
i1II1IiiIi = OoOO . decode ( I1IiiI1 )
if ( i1II1IiiIi == None ) : return
if 20 - 20: o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
OoOO . print_record ( " " , False )
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
for O0o0o00O in range ( OoOO . rloc_count ) :
iI11iII1IiiI = lisp_rloc_record ( )
i1II1IiiIi = iI11iII1IiiI . decode ( i1II1IiiIi , None )
if ( i1II1IiiIi == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 8 - 8: OoooooooOO * ooOoO0o
iI11iII1IiiI . print_record ( " " )
if 26 - 26: i11iIiiIii + oO0o - i1IIi
if 71 - 71: I1IiiI % I1Ii111 / oO0o % oO0o / iIii1I11I1II1 + I1Ii111
if 86 - 86: IiII % i1IIi * o0oOOo0O0Ooo - I1Ii111
if 37 - 37: iII111i % I1IiiI - I1ii11iIi11i % I11i
if 35 - 35: O0 - OoooooooOO % iII111i
if ( OoOO . group . is_null ( ) == False ) :
if 48 - 48: OOooOOo % i11iIiiIii
if 49 - 49: O0 * iII111i + II111iiii - OOooOOo
if 29 - 29: OoooooooOO % II111iiii - Oo0Ooo / IiII - i11iIiiIii
if 64 - 64: iII111i . I1Ii111 + I1Ii111
if 1 - 1: OOooOOo % Oo0Ooo
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( OoOO . print_eid_tuple ( ) , False ) ) )
if 81 - 81: oO0o / I11i % Ii1I . I11i + OoooooooOO
if 31 - 31: OoO0O00
IIi1IiIii = lisp_control_packet_ipc ( orig_packet , IiIIi1I1I11Ii , "lisp-itr" , 0 )
lisp_ipc ( IIi1IiIii , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 41 - 41: i11iIiiIii - I1ii11iIi11i - II111iiii
if 5 - 5: OoOoOO00 + i1IIi
if 43 - 43: iII111i * I1IiiI
if 20 - 20: I1IiiI . I11i * OoO0O00 . ooOoO0o . II111iiii
if 6 - 6: Ii1I * OoOoOO00 % IiII + I11i
lisp_send_map_notify_ack ( lisp_sockets , I1IiiI1 , iiIi11I , oooO0OOo0O0O0 )
return
if 20 - 20: oO0o
if 34 - 34: i1IIi + oO0o * Oo0Ooo * I1Ii111 % OoooooooOO % ooOoO0o
if 17 - 17: I1ii11iIi11i + o0oOOo0O0Ooo / OoO0O00 . Oo0Ooo - o0oOOo0O0Ooo / oO0o
if 87 - 87: ooOoO0o
if 74 - 74: i11iIiiIii . i11iIiiIii . iIii1I11I1II1
if 100 - 100: i11iIiiIii - oO0o + iIii1I11I1II1 * OoOoOO00 % OOooOOo % i11iIiiIii
if 26 - 26: O0
if 97 - 97: OOooOOo + I11i % I1Ii111 % i11iIiiIii / I1ii11iIi11i
def lisp_process_map_notify_ack ( packet , source ) :
iiIi11I = lisp_map_notify ( "" )
packet = iiIi11I . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 21 - 21: O0 + iIii1I11I1II1 / i11iIiiIii . OOooOOo * i1IIi
if 3 - 3: i1IIi % o0oOOo0O0Ooo + OoOoOO00
iiIi11I . print_notify ( )
if 32 - 32: OoO0O00 . Oo0Ooo * iIii1I11I1II1
if 12 - 12: O0 + I1ii11iIi11i + I11i . I1Ii111
if 48 - 48: Ii1I . iIii1I11I1II1 - iIii1I11I1II1 * I11i . OoooooooOO
if 73 - 73: Ii1I / II111iiii - iIii1I11I1II1 . ooOoO0o * II111iiii . OOooOOo
if 50 - 50: iIii1I11I1II1 + OoOoOO00 % O0 + OoO0O00 . i11iIiiIii / oO0o
if ( iiIi11I . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 31 - 31: I1IiiI % o0oOOo0O0Ooo . i11iIiiIii % OOooOOo - iIii1I11I1II1
if 77 - 77: i11iIiiIii / OOooOOo
OoOO = lisp_eid_record ( )
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
if ( OoOO . decode ( iiIi11I . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
OoOO . print_record ( " " , False )
if 59 - 59: oO0o * OoO0O00 - I11i * I1IiiI
oOoo0OooOOo00 = OoOO . print_eid_tuple ( )
if 60 - 60: iII111i - OoooooooOO / iII111i % OoO0O00 . OoOoOO00 - o0oOOo0O0Ooo
if 71 - 71: iII111i * o0oOOo0O0Ooo * i11iIiiIii * O0
if 77 - 77: OOooOOo % iII111i + I11i / OoOoOO00
if 50 - 50: OoOoOO00 - i11iIiiIii - OOooOOo . iIii1I11I1II1
if ( iiIi11I . alg_id != LISP_NONE_ALG_ID and iiIi11I . auth_len != 0 ) :
Iiii1IIIiIi = lisp_sites_by_eid . lookup_cache ( OoOO . eid , True )
if ( Iiii1IIIiIi == None ) :
iIiiiiiiI1II = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( iIiiiiiiI1II , green ( oOoo0OooOOo00 , False ) ) )
if 97 - 97: oO0o % OOooOOo . OoooooooOO * Ii1I
return
if 100 - 100: I1ii11iIi11i / Ii1I % Oo0Ooo
iI1iI1 = Iiii1IIIiIi . site
if 83 - 83: O0 . I1Ii111 % I1ii11iIi11i
if 97 - 97: Oo0Ooo % OoO0O00 * I1ii11iIi11i * ooOoO0o * OoO0O00
if 12 - 12: ooOoO0o
if 56 - 56: i1IIi
iI1iI1 . map_notify_acks_received += 1
if 3 - 3: OOooOOo - Oo0Ooo * Ii1I + i11iIiiIii
I1o0 = iiIi11I . key_id
if ( iI1iI1 . auth_key . has_key ( I1o0 ) == False ) : I1o0 = 0
Oo00oO0 = iI1iI1 . auth_key [ I1o0 ]
if 53 - 53: i1IIi % I1ii11iIi11i
iIiiIII1IIiI = lisp_verify_auth ( packet , iiIi11I . alg_id ,
iiIi11I . auth_data , Oo00oO0 )
if 65 - 65: I11i + OoOoOO00 - i11iIiiIii
I1o0 = "key-id {}" . format ( I1o0 ) if I1o0 == iiIi11I . key_id else "bad key-id {}" . format ( iiIi11I . key_id )
if 72 - 72: i11iIiiIii - iII111i . i11iIiiIii
if 61 - 61: oO0o . i11iIiiIii / Ii1I % iII111i
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if iIiiIII1IIiI else "failed" , I1o0 ) )
if 36 - 36: OoO0O00 + Ii1I / I11i - iII111i % OoO0O00 / Oo0Ooo
if ( iIiiIII1IIiI == False ) : return
if 38 - 38: Ii1I - ooOoO0o - O0 + oO0o . iIii1I11I1II1
if 90 - 90: i1IIi * OoOoOO00
if 27 - 27: iIii1I11I1II1
if 95 - 95: iII111i / ooOoO0o % Ii1I
if 44 - 44: OOooOOo . OOooOOo
if ( iiIi11I . retransmit_timer ) : iiIi11I . retransmit_timer . cancel ( )
if 5 - 5: oO0o + OoooooooOO
oo000oOOooo0O = source . print_address ( )
iii11 = iiIi11I . nonce_key
if 88 - 88: oO0o + OOooOOo
if ( lisp_map_notify_queue . has_key ( iii11 ) ) :
iiIi11I = lisp_map_notify_queue . pop ( iii11 )
if ( iiIi11I . retransmit_timer ) : iiIi11I . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( iii11 ) )
if 14 - 14: I11i / i1IIi
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( iiIi11I . nonce_key , red ( oo000oOOooo0O , False ) ) )
if 56 - 56: OoooooooOO
if 59 - 59: I1ii11iIi11i + OoO0O00
return
if 37 - 37: IiII * I1IiiI % O0
if 32 - 32: ooOoO0o % II111iiii
if 60 - 60: i11iIiiIii
if 11 - 11: o0oOOo0O0Ooo
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
if 26 - 26: Ii1I
if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i
if 50 - 50: IiII / i1IIi . I1ii11iIi11i
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 44 - 44: II111iiii
if 65 - 65: I11i . iII111i . I1IiiI - Oo0Ooo % iIii1I11I1II1 / O0
if 54 - 54: iII111i - I1Ii111
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
Oo00O0OoooO = False
if ( group . is_null ( ) == False ) :
Oo00O0OoooO = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 7 - 7: i1IIi
if ( Oo00O0OoooO == False ) :
Oo00O0OoooO = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 30 - 30: oO0o . i1IIi / I11i
if 23 - 23: i1IIi + oO0o % iII111i - OoO0O00 - i1IIi
if ( Oo00O0OoooO ) :
iiI11IIii1i1 = lisp_print_eid_tuple ( eid , group )
O0o0OoO = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 2 - 2: oO0o - o0oOOo0O0Ooo
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( iiI11IIii1i1 , False ) , s ,
# i1IIi . ooOoO0o + O0 . ooOoO0o * iIii1I11I1II1
O0o0OoO ) )
if 82 - 82: iII111i % OoO0O00 * O0
return ( Oo00O0OoooO )
if 38 - 38: o0oOOo0O0Ooo * o0oOOo0O0Ooo - I1IiiI . iII111i % iIii1I11I1II1 + I1ii11iIi11i
if 56 - 56: I1Ii111 % oO0o
if 31 - 31: OOooOOo + IiII
if 56 - 56: OoooooooOO * II111iiii
if 99 - 99: i11iIiiIii - II111iiii . Oo0Ooo - oO0o . I1IiiI + i1IIi
if 69 - 69: O0 / i1IIi - OoOoOO00 + ooOoO0o - oO0o
if 80 - 80: o0oOOo0O0Ooo % O0 * I11i . i1IIi - ooOoO0o
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
O0oO0o = lisp_map_referral ( )
packet = O0oO0o . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
O0oO0o . print_map_referral ( )
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
IiIIi1I1I11Ii = source . print_address ( )
oOo0 = O0oO0o . nonce
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if 75 - 75: oO0o * Oo0Ooo * O0
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
if 62 - 62: oO0o % Ii1I - Ii1I
for Ii11 in range ( O0oO0o . record_count ) :
OoOO = lisp_eid_record ( )
packet = OoOO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
OoOO . print_record ( " " , True )
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
if 9 - 9: I11i . I11i . OoooooooOO
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
iii11 = str ( oOo0 )
if ( iii11 not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( oOo0 ) , IiIIi1I1I11Ii ) )
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
continue
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
oOO0O000OOo0 = lisp_ddt_map_requestQ [ iii11 ]
if ( oOO0O000OOo0 == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( oOo0 ) , IiIIi1I1I11Ii ) )
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
continue
if 58 - 58: IiII / Oo0Ooo + o0oOOo0O0Ooo
if 71 - 71: Ii1I - IiII
if 2 - 2: OoOoOO00 % IiII % OoO0O00 . i1IIi / I1Ii111 - iIii1I11I1II1
if 88 - 88: Oo0Ooo * i1IIi % OOooOOo
if 65 - 65: iII111i . oO0o
if 67 - 67: I1IiiI / iII111i / O0 % ooOoO0o - IiII / Ii1I
if ( lisp_map_referral_loop ( oOO0O000OOo0 , OoOO . eid , OoOO . group ,
OoOO . action , IiIIi1I1I11Ii ) ) :
oOO0O000OOo0 . dequeue_map_request ( )
continue
if 31 - 31: I11i - oO0o * ooOoO0o
if 64 - 64: I11i
oOO0O000OOo0 . last_cached_prefix [ 0 ] = OoOO . eid
oOO0O000OOo0 . last_cached_prefix [ 1 ] = OoOO . group
if 41 - 41: I1Ii111 * OoooooooOO / OoOoOO00 + OoO0O00 . OoOoOO00 + I1Ii111
if 9 - 9: IiII . I11i . I1Ii111 / i1IIi * OoOoOO00 - O0
if 3 - 3: O0 / iIii1I11I1II1 % IiII + I11i
if 43 - 43: Oo0Ooo % I11i
i1111ii1 = False
IiiII1 = lisp_referral_cache_lookup ( OoOO . eid , OoOO . group ,
True )
if ( IiiII1 == None ) :
i1111ii1 = True
IiiII1 = lisp_referral ( )
IiiII1 . eid = OoOO . eid
IiiII1 . group = OoOO . group
if ( OoOO . ddt_incomplete == False ) : IiiII1 . add_cache ( )
elif ( IiiII1 . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( IiiII1 . print_eid_tuple ( ) , False ) ) )
if 53 - 53: OoOoOO00 % OoooooooOO * o0oOOo0O0Ooo % OoooooooOO
oOO0O000OOo0 . dequeue_map_request ( )
continue
if 47 - 47: iIii1I11I1II1 - OOooOOo + I1ii11iIi11i * ooOoO0o + Oo0Ooo + OoO0O00
if 64 - 64: OoOoOO00 - OoOoOO00 . OoooooooOO + ooOoO0o
ooOOoo0 = OoOO . action
IiiII1 . referral_source = source
IiiII1 . referral_type = ooOOoo0
iiI = OoOO . store_ttl ( )
IiiII1 . referral_ttl = iiI
IiiII1 . expires = lisp_set_timestamp ( iiI )
if 100 - 100: ooOoO0o . OoooooooOO % i1IIi % OoO0O00
if 26 - 26: OoOoOO00 * IiII
if 76 - 76: I1IiiI + IiII * I1ii11iIi11i * I1IiiI % Ii1I + ooOoO0o
if 46 - 46: OoOoOO00
O0oo00ooo = IiiII1 . is_referral_negative ( )
if ( IiiII1 . referral_set . has_key ( IiIIi1I1I11Ii ) ) :
ooO = IiiII1 . referral_set [ IiIIi1I1I11Ii ]
if 41 - 41: ooOoO0o + IiII
if ( ooO . updown == False and O0oo00ooo == False ) :
ooO . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( IiIIi1I1I11Ii ) )
if 97 - 97: I11i % I11i
elif ( ooO . updown == True and O0oo00ooo == True ) :
ooO . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( IiIIi1I1I11Ii ) )
if 18 - 18: OoooooooOO . OOooOOo * Ii1I + II111iiii - I1ii11iIi11i
if 61 - 61: Ii1I % i1IIi + OoOoOO00 % o0oOOo0O0Ooo + Oo0Ooo % OoooooooOO
if 5 - 5: i1IIi % Oo0Ooo / OoooooooOO * OoOoOO00 + OOooOOo - ooOoO0o
if 24 - 24: oO0o / ooOoO0o % I1IiiI / I1ii11iIi11i
if 88 - 88: OoO0O00
if 96 - 96: IiII % I1ii11iIi11i % Oo0Ooo - i11iIiiIii % iIii1I11I1II1
if 100 - 100: IiII - Ii1I
if 9 - 9: II111iiii / Ii1I / O0 - OoOoOO00 - IiII
II1i = { }
for iii11 in IiiII1 . referral_set : II1i [ iii11 ] = None
if 94 - 94: I1ii11iIi11i . o0oOOo0O0Ooo
if 59 - 59: iII111i - Oo0Ooo . OOooOOo . ooOoO0o % oO0o
if 95 - 95: OoO0O00 + O0 * oO0o
if 39 - 39: i1IIi
for Ii11 in range ( OoOO . rloc_count ) :
iI11iII1IiiI = lisp_rloc_record ( )
packet = iI11iII1IiiI . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
iI11iII1IiiI . print_record ( " " )
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
if 85 - 85: o0oOOo0O0Ooo - Oo0Ooo / I1Ii111
I1iiIiiii1111 = iI11iII1IiiI . rloc . print_address ( )
if ( IiiII1 . referral_set . has_key ( I1iiIiiii1111 ) == False ) :
ooO = lisp_referral_node ( )
ooO . referral_address . copy_address ( iI11iII1IiiI . rloc )
IiiII1 . referral_set [ I1iiIiiii1111 ] = ooO
if ( IiIIi1I1I11Ii == I1iiIiiii1111 and O0oo00ooo ) : ooO . updown = False
else :
ooO = IiiII1 . referral_set [ I1iiIiiii1111 ]
if ( II1i . has_key ( I1iiIiiii1111 ) ) : II1i . pop ( I1iiIiiii1111 )
if 100 - 100: OoO0O00 * iIii1I11I1II1 - IiII . i1IIi % i11iIiiIii % Oo0Ooo
ooO . priority = iI11iII1IiiI . priority
ooO . weight = iI11iII1IiiI . weight
if 22 - 22: ooOoO0o - OOooOOo
if 90 - 90: i11iIiiIii . i11iIiiIii - iIii1I11I1II1
if 20 - 20: ooOoO0o - i11iIiiIii
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
if 83 - 83: I1IiiI * i11iIiiIii - I1ii11iIi11i + I11i
for iii11 in II1i : IiiII1 . referral_set . pop ( iii11 )
if 33 - 33: OoO0O00 . OoooooooOO % iII111i / oO0o * Ii1I + ooOoO0o
oOoo0OooOOo00 = IiiII1 . print_eid_tuple ( )
if 29 - 29: oO0o
if ( i1111ii1 ) :
if ( OoOO . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( oOoo0OooOOo00 , False ) ) )
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( oOoo0OooOOo00 , False ) , OoOO . rloc_count ) )
if 78 - 78: Oo0Ooo
if 77 - 77: oO0o % Oo0Ooo % O0
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( oOoo0OooOOo00 , False ) , OoOO . rloc_count ) )
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if 88 - 88: ooOoO0o
if ( ooOOoo0 == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( oOO0O000OOo0 . lisp_sockets , IiiII1 . eid ,
IiiII1 . group , oOO0O000OOo0 . nonce , oOO0O000OOo0 . itr , oOO0O000OOo0 . sport , 15 , None , False )
oOO0O000OOo0 . dequeue_map_request ( )
if 52 - 52: iIii1I11I1II1 % ooOoO0o * iIii1I11I1II1
if 20 - 20: i11iIiiIii * I11i
if ( ooOOoo0 == LISP_DDT_ACTION_NOT_AUTH ) :
if ( oOO0O000OOo0 . tried_root ) :
lisp_send_negative_map_reply ( oOO0O000OOo0 . lisp_sockets , IiiII1 . eid ,
IiiII1 . group , oOO0O000OOo0 . nonce , oOO0O000OOo0 . itr , oOO0O000OOo0 . sport , 0 , None , False )
oOO0O000OOo0 . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( oOO0O000OOo0 , True )
if 29 - 29: IiII / OOooOOo
if 39 - 39: O0 + II111iiii
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if ( ooOOoo0 == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( IiiII1 . referral_set . has_key ( IiIIi1I1I11Ii ) ) :
ooO = IiiII1 . referral_set [ IiIIi1I1I11Ii ]
ooO . updown = False
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
if ( len ( IiiII1 . referral_set ) == 0 ) :
oOO0O000OOo0 . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( oOO0O000OOo0 , False )
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
if 13 - 13: I1IiiI % ooOoO0o + OOooOOo
if ( ooOOoo0 in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( oOO0O000OOo0 . eid . is_exact_match ( OoOO . eid ) ) :
if ( not oOO0O000OOo0 . tried_root ) :
lisp_send_ddt_map_request ( oOO0O000OOo0 , True )
else :
lisp_send_negative_map_reply ( oOO0O000OOo0 . lisp_sockets ,
IiiII1 . eid , IiiII1 . group , oOO0O000OOo0 . nonce , oOO0O000OOo0 . itr ,
oOO0O000OOo0 . sport , 15 , None , False )
oOO0O000OOo0 . dequeue_map_request ( )
if 91 - 91: oO0o - ooOoO0o
else :
lisp_send_ddt_map_request ( oOO0O000OOo0 , False )
if 20 - 20: i1IIi . IiII / o0oOOo0O0Ooo / I11i
if 27 - 27: ooOoO0o . ooOoO0o - Ii1I % i11iIiiIii
if 74 - 74: I1Ii111 - II111iiii % o0oOOo0O0Ooo
if ( ooOOoo0 == LISP_DDT_ACTION_MS_ACK ) : oOO0O000OOo0 . dequeue_map_request ( )
if 7 - 7: I1IiiI + OoooooooOO + o0oOOo0O0Ooo . OoooooooOO
return
if 29 - 29: iII111i * O0 + I1IiiI * IiII + iII111i - IiII
if 38 - 38: I1ii11iIi11i - Ii1I % OoooooooOO
if 43 - 43: iIii1I11I1II1 / OoOoOO00
if 13 - 13: o0oOOo0O0Ooo / I1Ii111
if 67 - 67: OoooooooOO . oO0o * OoOoOO00 - OoooooooOO
if 32 - 32: oO0o
if 72 - 72: I1IiiI
if 34 - 34: ooOoO0o % II111iiii / ooOoO0o
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
iiIIIiI1 = lisp_ecm ( 0 )
packet = iiIIIiI1 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 87 - 87: Oo0Ooo
if 7 - 7: iIii1I11I1II1
iiIIIiI1 . print_ecm ( )
if 85 - 85: iIii1I11I1II1 . O0
iIiI1I1II1 = lisp_control_header ( )
if ( iIiI1I1II1 . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
if 62 - 62: ooOoO0o * OOooOOo . I11i + Oo0Ooo - I1Ii111
I11I1I1iiiIIi = iIiI1I1II1 . type
del ( iIiI1I1II1 )
if 63 - 63: I11i % I1ii11iIi11i / o0oOOo0O0Ooo
if ( I11I1I1iiiIIi != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 95 - 95: oO0o * I1IiiI / OOooOOo
if 79 - 79: O0 . iII111i . iII111i % ooOoO0o
if 74 - 74: ooOoO0o
if 37 - 37: oO0o / i1IIi * iII111i - i1IIi
if 12 - 12: OoO0O00 * IiII + OoOoOO00 * I1Ii111 % OoOoOO00 + OoOoOO00
II11iI1iI1I = iiIIIiI1 . udp_sport
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
iiIIIiI1 . source , II11iI1iI1I , iiIIIiI1 . ddt , - 1 )
return
if 33 - 33: OoO0O00 * I1IiiI / i1IIi
if 88 - 88: Ii1I / ooOoO0o - I11i % OoO0O00 * iII111i
if 47 - 47: i11iIiiIii + Oo0Ooo % oO0o % O0
if 98 - 98: oO0o - O0 / iII111i % oO0o % I1IiiI / i1IIi
if 61 - 61: ooOoO0o + II111iiii
if 54 - 54: OoOoOO00 * o0oOOo0O0Ooo . OoO0O00
if 53 - 53: oO0o % OoO0O00 / OoO0O00 / I11i * Oo0Ooo
if 13 - 13: i1IIi % iIii1I11I1II1 - iII111i - I1IiiI - IiII + iIii1I11I1II1
if 22 - 22: IiII - OOooOOo + I1ii11iIi11i
if 64 - 64: OoOoOO00
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 79 - 79: IiII
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
iI111I1 = ms . map_server
if ( lisp_decent_push_configured and iI111I1 . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
iI111I1 = copy . deepcopy ( iI111I1 )
iI111I1 . address = 0x7f000001
iIIi1I1ii = bold ( "Bootstrap" , False )
O0ooO0oOO = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( iIIi1I1ii , O0ooO0oOO ) )
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
if ( ms . ekey != None ) :
Iiio0oO = ms . ekey . zfill ( 32 )
O0o = "0" * 8
Oo0 = chacha . ChaCha ( Iiio0oO , O0o ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + Oo0
ooo0OO = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( ooo0OO , ms . ekey_id ) )
if 26 - 26: iII111i
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
i1iI11i1iiII = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
i1iI11i1iiII = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 76 - 76: II111iiii + i11iIiiIii - OoooooooOO % OoOoOO00
if 4 - 4: I1Ii111 + i11iIiiIii . Ii1I / iII111i
lprint ( "Send Map-Register to map-server {}{}{}" . format ( iI111I1 . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , i1iI11i1iiII ) )
if 24 - 24: Ii1I / II111iiii + I1IiiI
lisp_send ( lisp_sockets , iI111I1 , LISP_CTRL_PORT , packet )
return
if 100 - 100: Ii1I / IiII * O0
if 60 - 60: Oo0Ooo / IiII / OoOoOO00 % iIii1I11I1II1 . o0oOOo0O0Ooo % iIii1I11I1II1
if 35 - 35: OoooooooOO % O0 * I1Ii111 - iIii1I11I1II1 % iII111i
if 15 - 15: O0 - Ii1I + OoOoOO00
if 93 - 93: OoO0O00
if 68 - 68: OOooOOo
if 87 - 87: IiII * IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
II1i1iI = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 39 - 39: i11iIiiIii - OoO0O00 - i11iIiiIii / OoooooooOO
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 15 - 15: i1IIi . iII111i + IiII / I1ii11iIi11i - i1IIi / iII111i
if 27 - 27: OoOoOO00 / OoooooooOO + i1IIi % iIii1I11I1II1 / OoO0O00
packet = lisp_control_packet_ipc ( packet , II1i1iI , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 73 - 73: I1ii11iIi11i / OoOoOO00 / IiII + oO0o
if 73 - 73: I11i * o0oOOo0O0Ooo * I1IiiI . OoooooooOO % I1Ii111
if 9 - 9: oO0o % I1Ii111 . O0 + I1ii11iIi11i - Ii1I - I1ii11iIi11i
if 57 - 57: i11iIiiIii
if 21 - 21: iIii1I11I1II1 / I1IiiI / iII111i
if 19 - 19: Oo0Ooo / iIii1I11I1II1 / I11i
if 71 - 71: iIii1I11I1II1 * I1IiiI
if 35 - 35: O0
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
if 78 - 78: I1IiiI - iIii1I11I1II1
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if 92 - 92: i11iIiiIii
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
if 42 - 42: OoOoOO00 . I11i % II111iiii
if 19 - 19: OoooooooOO
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
if 56 - 56: I11i
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 24 - 24: I1IiiI . I1IiiI % ooOoO0o
if 32 - 32: OOooOOo / i1IIi / OOooOOo
if 97 - 97: ooOoO0o * Oo0Ooo * OoooooooOO * I1IiiI
if 45 - 45: Oo0Ooo
if 27 - 27: oO0o / IiII - iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * iIii1I11I1II1
if 40 - 40: oO0o - II111iiii * OOooOOo % OoooooooOO
if 52 - 52: OOooOOo + OoO0O00
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 42 - 42: i1IIi
if 52 - 52: OoO0O00 % iII111i % O0
if 11 - 11: i1IIi / i11iIiiIii + Ii1I % Oo0Ooo % O0
if 50 - 50: oO0o . I1Ii111
if 38 - 38: iIii1I11I1II1 . Ii1I
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
if ( lisp_nat_traversal ) :
oOO0ooi1iiIIiII1 = lisp_get_any_translated_port ( )
if ( oOO0ooi1iiIIiII1 != None ) : inner_sport = oOO0ooi1iiIIiII1
if 15 - 15: O0
iiIIIiI1 = lisp_ecm ( inner_sport )
if 44 - 44: Ii1I . Oo0Ooo . I1Ii111 + oO0o
iiIIIiI1 . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
iiIIIiI1 . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
iiIIIiI1 . ddt = ddt
I1Ii11Ii = iiIIIiI1 . encode ( packet , inner_source , inner_dest )
if ( I1Ii11Ii == None ) :
lprint ( "Could not encode ECM message" )
return
if 37 - 37: ooOoO0o . I1IiiI
iiIIIiI1 . print_ecm ( )
if 1 - 1: iIii1I11I1II1 . o0oOOo0O0Ooo % I11i
packet = I1Ii11Ii + packet
if 94 - 94: oO0o
I1iiIiiii1111 = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( I1iiIiiii1111 ) )
iI111I1 = lisp_convert_4to6 ( I1iiIiiii1111 )
lisp_send ( lisp_sockets , iI111I1 , LISP_CTRL_PORT , packet )
return
if 47 - 47: II111iiii + iII111i + I1ii11iIi11i - iIii1I11I1II1 . Ii1I * oO0o
if 40 - 40: i1IIi % I1IiiI / o0oOOo0O0Ooo
if 53 - 53: iIii1I11I1II1 * oO0o
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 60 - 60: oO0o * I1Ii111
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 81 - 81: oO0o - OOooOOo - oO0o
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 54 - 54: oO0o % I11i
if 71 - 71: oO0o / I1ii11iIi11i . Ii1I % II111iiii
if 22 - 22: iIii1I11I1II1 - OoooooooOO
if 8 - 8: ooOoO0o % i11iIiiIii
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 92 - 92: OoOoOO00 / ooOoO0o % iII111i / iIii1I11I1II1
if 73 - 73: O0 % i11iIiiIii
if 16 - 16: O0
if 15 - 15: i1IIi % i11iIiiIii
if 18 - 18: Ii1I . OoO0O00 . iII111i * oO0o + O0
if 35 - 35: OoOoOO00 . oO0o / II111iiii
def byte_swap_64 ( address ) :
o0o0O00 = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 97 - 97: Ii1I + I1Ii111 / II111iiii
if 14 - 14: iII111i / IiII / oO0o
if 55 - 55: OoO0O00 % O0
if 92 - 92: OoooooooOO / O0
if 14 - 14: i11iIiiIii
if 43 - 43: OOooOOo
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
if 93 - 93: OoOoOO00
return ( o0o0O00 )
if 49 - 49: i1IIi * OOooOOo % I11i * Ii1I . I1Ii111 * iIii1I11I1II1
if 72 - 72: ooOoO0o
if 63 - 63: Oo0Ooo . OoO0O00 . OoooooooOO / i1IIi
if 53 - 53: OOooOOo * O0 . iII111i
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
if 78 - 78: iII111i
if 80 - 80: i1IIi * I1IiiI + OOooOOo
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
if 63 - 63: O0
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
if 74 - 74: i11iIiiIii
class lisp_cache_entries ( ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
if 6 - 6: I11i
if 70 - 70: ooOoO0o + OoooooooOO % OoOoOO00 % oO0o / Ii1I . I11i
class lisp_cache ( ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 63 - 63: I1ii11iIi11i - ooOoO0o . OOooOOo / O0 . iIii1I11I1II1 - Ii1I
if 6 - 6: Ii1I
def cache_size ( self ) :
return ( self . cache_count )
if 60 - 60: iII111i + I1IiiI
if 36 - 36: i1IIi . O0 . OoO0O00 % OOooOOo * I11i / Ii1I
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
Iii11i1 = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
Iii11i1 = prefix . mask_len
else :
Iii11i1 = prefix . mask_len + 48
if 16 - 16: Oo0Ooo
if 44 - 44: iIii1I11I1II1 - II111iiii . IiII . i1IIi
o0OOoOO = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
iioOO = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
o00OOo00 = prefix . addr_length ( ) * 2
o0o0O00 = lisp_hex_string ( prefix . address ) . zfill ( o00OOo00 )
else :
o0o0O00 = prefix . address
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
iioOO = "8003"
o0o0O00 = prefix . address . print_geo ( )
else :
iioOO = ""
o0o0O00 = ""
if 43 - 43: I1ii11iIi11i + I11i
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
iii11 = o0OOoOO + iioOO + o0o0O00
return ( [ Iii11i1 , iii11 ] )
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
Iii11i1 , iii11 = self . build_key ( prefix )
if ( self . cache . has_key ( Iii11i1 ) == False ) :
self . cache [ Iii11i1 ] = lisp_cache_entries ( )
self . cache [ Iii11i1 ] . entries = { }
self . cache [ Iii11i1 ] . entries_sorted = [ ]
self . cache_sorted = sorted ( self . cache )
if 87 - 87: Oo0Ooo
if ( self . cache [ Iii11i1 ] . entries . has_key ( iii11 ) == False ) :
self . cache_count += 1
if 65 - 65: ooOoO0o . I1IiiI
self . cache [ Iii11i1 ] . entries [ iii11 ] = entry
self . cache [ Iii11i1 ] . entries_sorted = sorted ( self . cache [ Iii11i1 ] . entries )
if 51 - 51: IiII
if 43 - 43: oO0o - I11i . i11iIiiIii
def lookup_cache ( self , prefix , exact ) :
Oo0OO0 , iii11 = self . build_key ( prefix )
if ( exact ) :
if ( self . cache . has_key ( Oo0OO0 ) == False ) : return ( None )
if ( self . cache [ Oo0OO0 ] . entries . has_key ( iii11 ) == False ) : return ( None )
return ( self . cache [ Oo0OO0 ] . entries [ iii11 ] )
if 83 - 83: II111iiii . Ii1I + IiII / oO0o
if 17 - 17: I1IiiI . Ii1I . I1ii11iIi11i % OoooooooOO
OOoOO0 = None
for Iii11i1 in self . cache_sorted :
if ( Oo0OO0 < Iii11i1 ) : return ( OOoOO0 )
for O000o in self . cache [ Iii11i1 ] . entries_sorted :
ii1Ii1Iii11i1 = self . cache [ Iii11i1 ] . entries
if ( O000o in ii1Ii1Iii11i1 ) :
iIIiI11iI1Ii1 = ii1Ii1Iii11i1 [ O000o ]
if ( iIIiI11iI1Ii1 == None ) : continue
if ( prefix . is_more_specific ( iIIiI11iI1Ii1 . eid ) ) : OOoOO0 = iIIiI11iI1Ii1
if 57 - 57: I1IiiI * IiII
if 99 - 99: OOooOOo - I1ii11iIi11i
if 63 - 63: Ii1I - I1IiiI + I1Ii111 * oO0o
return ( OOoOO0 )
if 61 - 61: I11i . I1IiiI - iIii1I11I1II1
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
def delete_cache ( self , prefix ) :
Iii11i1 , iii11 = self . build_key ( prefix )
if ( self . cache . has_key ( Iii11i1 ) == False ) : return
if ( self . cache [ Iii11i1 ] . entries . has_key ( iii11 ) == False ) : return
self . cache [ Iii11i1 ] . entries . pop ( iii11 )
self . cache [ Iii11i1 ] . entries_sorted . remove ( iii11 )
self . cache_count -= 1
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if 67 - 67: I1IiiI * Ii1I
def walk_cache ( self , function , parms ) :
for Iii11i1 in self . cache_sorted :
for iii11 in self . cache [ Iii11i1 ] . entries_sorted :
iIIiI11iI1Ii1 = self . cache [ Iii11i1 ] . entries [ iii11 ]
o00o0OO0o , parms = function ( iIIiI11iI1Ii1 , parms )
if ( o00o0OO0o == False ) : return ( parms )
if 92 - 92: I11i + i11iIiiIii / i11iIiiIii / II111iiii - o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 93 - 93: i11iIiiIii / OoO0O00 + I1IiiI
return ( parms )
if 4 - 4: ooOoO0o . i11iIiiIii . i1IIi
if 37 - 37: i11iIiiIii + OoO0O00 * Ii1I
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 100 - 100: IiII . I1Ii111 + II111iiii + i1IIi
for Iii11i1 in self . cache_sorted :
for iii11 in self . cache [ Iii11i1 ] . entries_sorted :
iIIiI11iI1Ii1 = self . cache [ Iii11i1 ] . entries [ iii11 ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( Iii11i1 , iii11 ,
iIIiI11iI1Ii1 ) )
if 37 - 37: iII111i
if 27 - 27: iII111i / Ii1I / iII111i + OoooooooOO - O0 + OoO0O00
if 62 - 62: iIii1I11I1II1
if 60 - 60: Oo0Ooo % IiII % OoO0O00 - i11iIiiIii
if 53 - 53: i11iIiiIii + OoooooooOO
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
if 17 - 17: I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
if 51 - 51: IiII
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
def lisp_map_cache_lookup ( source , dest ) :
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
iIiiIiI1I1ii = dest . is_multicast_address ( )
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
if 68 - 68: ooOoO0o . I1Ii111
if 84 - 84: OoooooooOO + oO0o % i1IIi + o0oOOo0O0Ooo * i1IIi
IIII = lisp_map_cache . lookup_cache ( dest , False )
if ( IIII == None ) :
oOoo0OooOOo00 = source . print_sg ( dest ) if iIiiIiI1I1ii else dest . print_address ( )
oOoo0OooOOo00 = green ( oOoo0OooOOo00 , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( oOoo0OooOOo00 ) )
return ( None )
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
if 41 - 41: Oo0Ooo
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
if ( iIiiIiI1I1ii == False ) :
OOO0Ooo0OoO0 = green ( IIII . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , OOO0Ooo0OoO0 ) )
if 89 - 89: iIii1I11I1II1 - i1IIi
return ( IIII )
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
if 9 - 9: I1Ii111 / II111iiii * I1Ii111 / I11i - OoO0O00
if 36 - 36: IiII . OoOoOO00 . Ii1I
if 31 - 31: iIii1I11I1II1
if 84 - 84: I1ii11iIi11i - iII111i * I1IiiI
IIII = IIII . lookup_source_cache ( source , False )
if ( IIII == None ) :
oOoo0OooOOo00 = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( oOoo0OooOOo00 ) )
return ( None )
if 88 - 88: OOooOOo / Oo0Ooo
if 31 - 31: II111iiii
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 67 - 67: IiII + oO0o * IiII
if 26 - 26: I1ii11iIi11i + i1IIi . i1IIi - oO0o + I1IiiI * o0oOOo0O0Ooo
OOO0Ooo0OoO0 = green ( IIII . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , OOO0Ooo0OoO0 ) )
if 62 - 62: ooOoO0o + ooOoO0o % I11i
return ( IIII )
if 100 - 100: II111iiii . OoooooooOO
if 32 - 32: I11i % OOooOOo * O0 / iIii1I11I1II1 / i1IIi
if 87 - 87: OoO0O00 . I1ii11iIi11i * I1IiiI
if 83 - 83: OOooOOo
if 86 - 86: I1Ii111 / oO0o
if 67 - 67: OoOoOO00 + Oo0Ooo / i11iIiiIii . I1IiiI
if 53 - 53: Oo0Ooo + IiII * ooOoO0o % OoooooooOO * oO0o . iII111i
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
OoOo = lisp_referral_cache . lookup_cache ( eid , exact )
return ( OoOo )
if 78 - 78: O0 . Ii1I - I1ii11iIi11i
if 69 - 69: O0 % O0 . oO0o * OoooooooOO
if 13 - 13: i1IIi % oO0o . OoooooooOO + I1ii11iIi11i - OOooOOo
if 99 - 99: OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 22 - 22: OoooooooOO / oO0o
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
if 12 - 12: I1Ii111
if 17 - 17: I1Ii111 % oO0o + O0
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
OoOo = lisp_referral_cache . lookup_cache ( group , exact )
if ( OoOo == None ) : return ( None )
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
Iio00OO = OoOo . lookup_source_cache ( eid , exact )
if ( Iio00OO ) : return ( Iio00OO )
if 12 - 12: Oo0Ooo / OoOoOO00 + ooOoO0o . Oo0Ooo . o0oOOo0O0Ooo + OoOoOO00
if ( exact ) : OoOo = None
return ( OoOo )
if 32 - 32: OoOoOO00 * o0oOOo0O0Ooo - OoOoOO00 * oO0o
if 80 - 80: iII111i - O0 + IiII + iIii1I11I1II1 * I1ii11iIi11i
if 8 - 8: OoO0O00
if 99 - 99: iII111i . I1ii11iIi11i . o0oOOo0O0Ooo
if 4 - 4: I11i * Oo0Ooo . i11iIiiIii / Ii1I . I1ii11iIi11i % I1Ii111
if 68 - 68: ooOoO0o
if 58 - 58: iII111i * I1IiiI
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
O0ooO0OooO = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( O0ooO0OooO )
if 82 - 82: Oo0Ooo / OoO0O00 % Oo0Ooo . ooOoO0o * O0
if 39 - 39: I1Ii111 * IiII
if 16 - 16: ooOoO0o + OoO0O00 / I11i * OoO0O00 . Oo0Ooo % OoOoOO00
if 65 - 65: Oo0Ooo / I1Ii111 % II111iiii % Ii1I
if 70 - 70: II111iiii % Oo0Ooo * oO0o
if ( eid . is_null ( ) ) : return ( None )
if 54 - 54: O0 / ooOoO0o * I1Ii111
if 5 - 5: Ii1I / OoOoOO00 - O0 * OoO0O00
if 13 - 13: IiII + Oo0Ooo - I1Ii111
if 10 - 10: OOooOOo % OoooooooOO / I1IiiI . II111iiii % iII111i
if 47 - 47: o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - ooOoO0o * oO0o
if 95 - 95: oO0o / Ii1I + OoO0O00
O0ooO0OooO = lisp_ddt_cache . lookup_cache ( group , exact )
if ( O0ooO0OooO == None ) : return ( None )
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
iIi11Ii = O0ooO0OooO . lookup_source_cache ( eid , exact )
if ( iIi11Ii ) : return ( iIi11Ii )
if 70 - 70: oO0o - iII111i + Ii1I * Ii1I / o0oOOo0O0Ooo . o0oOOo0O0Ooo
if ( exact ) : O0ooO0OooO = None
return ( O0ooO0OooO )
if 41 - 41: I1Ii111 % Oo0Ooo - iIii1I11I1II1
if 96 - 96: I1Ii111 / II111iiii . oO0o + oO0o
if 62 - 62: I1IiiI
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
if 49 - 49: iII111i + I11i . Oo0Ooo
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
def lisp_site_eid_lookup ( eid , group , exact ) :
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if ( group . is_null ( ) ) :
Iiii1IIIiIi = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( Iiii1IIIiIi )
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
if 61 - 61: II111iiii + OOooOOo . o0oOOo0O0Ooo . iIii1I11I1II1
if 63 - 63: I11i + i11iIiiIii . o0oOOo0O0Ooo . i1IIi + OoOoOO00
if 1 - 1: i11iIiiIii
if ( eid . is_null ( ) ) : return ( None )
if 1 - 1: iIii1I11I1II1
if 73 - 73: iII111i + IiII
if 95 - 95: O0
if 75 - 75: ooOoO0o
if 8 - 8: O0 - OoooooooOO + I1ii11iIi11i / Oo0Ooo . oO0o + I1Ii111
if 85 - 85: ooOoO0o
Iiii1IIIiIi = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( Iiii1IIIiIi == None ) : return ( None )
if 29 - 29: iII111i . Ii1I
if 43 - 43: I11i - I1ii11iIi11i + iIii1I11I1II1 / I1ii11iIi11i * oO0o / iIii1I11I1II1
if 45 - 45: IiII
if 49 - 49: I1IiiI . Ii1I * I1IiiI - OoooooooOO . I11i / I1Ii111
if 9 - 9: iIii1I11I1II1 * Ii1I / O0 - OOooOOo
if 95 - 95: i11iIiiIii * II111iiii * OOooOOo * iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 / I1IiiI + OoOoOO00 - OOooOOo . i11iIiiIii / i11iIiiIii
if 10 - 10: iIii1I11I1II1 % i1IIi
if 78 - 78: I11i + II111iiii % o0oOOo0O0Ooo
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
if 44 - 44: I1ii11iIi11i
if 39 - 39: iII111i + Oo0Ooo / oO0o
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
if 99 - 99: I1IiiI * II111iiii
if 84 - 84: II111iiii - I1IiiI
if 41 - 41: iIii1I11I1II1 % I1Ii111 % OoOoOO00
if 35 - 35: I11i + i1IIi
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
I1i1III1i = Iiii1IIIiIi . lookup_source_cache ( eid , exact )
if ( I1i1III1i ) : return ( I1i1III1i )
if 97 - 97: oO0o % iIii1I11I1II1
if ( exact ) :
Iiii1IIIiIi = None
else :
IiI1 = Iiii1IIIiIi . parent_for_more_specifics
if ( IiI1 and IiI1 . accept_more_specifics ) :
if ( group . is_more_specific ( IiI1 . group ) ) : Iiii1IIIiIi = IiI1
if 87 - 87: II111iiii % I1IiiI + oO0o - I11i / I11i
if 16 - 16: I1IiiI
return ( Iiii1IIIiIi )
if 39 - 39: ooOoO0o * II111iiii
if 90 - 90: OoooooooOO * ooOoO0o
if 14 - 14: I1IiiI % i1IIi
if 35 - 35: ooOoO0o % o0oOOo0O0Ooo % ooOoO0o
if 77 - 77: OOooOOo % I1Ii111 / i11iIiiIii . i1IIi % OOooOOo
if 55 - 55: i1IIi
if 64 - 64: oO0o . OOooOOo * i11iIiiIii + I1Ii111
if 88 - 88: O0
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
if 90 - 90: i11iIiiIii - iII111i * oO0o
if 79 - 79: IiII
if 38 - 38: I1Ii111
if 56 - 56: i11iIiiIii
if 58 - 58: i11iIiiIii / OoOoOO00
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
if 39 - 39: Oo0Ooo . OoO0O00
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
if 100 - 100: ooOoO0o / OoooooooOO
if 73 - 73: i11iIiiIii - Oo0Ooo
if 100 - 100: iIii1I11I1II1 + I1Ii111
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
if 81 - 81: I1IiiI
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
class lisp_address ( ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 20 - 20: IiII - OOooOOo + OoOoOO00
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 74 - 74: OoO0O00
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 13 - 13: I1ii11iIi11i / OoO0O00
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
o0o0O00 = self . address
if ( ( ( o0o0O00 & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( o0o0O00 & 0xff000000 ) >> 24 ) == 172 ) :
i1I = ( o0o0O00 & 0x00ff0000 ) >> 16
if ( i1I >= 16 and i1I <= 31 ) : return ( True )
if 95 - 95: Oo0Ooo . iIii1I11I1II1 - iIii1I11I1II1 * I1IiiI % Oo0Ooo * I1IiiI
if ( ( ( o0o0O00 & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 87 - 87: iII111i + i1IIi
if 10 - 10: Oo0Ooo . o0oOOo0O0Ooo - i11iIiiIii / iII111i + i11iIiiIii . I11i
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 66 - 66: i1IIi
if 98 - 98: Oo0Ooo / iIii1I11I1II1
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 33 - 33: O0 - iII111i
return ( 0 )
if 40 - 40: iII111i * I11i
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
o0o0O00 = self . address >> 96
return ( o0o0O00 == 0x20010005 )
if 87 - 87: OoOoOO00
if 30 - 30: IiII % OoOoOO00 + I1Ii111
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
return ( 0 )
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if 87 - 87: I11i
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
def packet_format ( self ) :
if 74 - 74: Ii1I
if 26 - 26: I11i . O0
if 68 - 68: Ii1I
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
def pack_address ( self ) :
oOoOo000 = self . packet_format ( )
i1II1IiiIi = ""
if ( self . is_ipv4 ( ) ) :
i1II1IiiIi = struct . pack ( oOoOo000 , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
OooO0O0Ooo = byte_swap_64 ( self . address >> 64 )
oO0O = byte_swap_64 ( self . address & 0xffffffffffffffff )
i1II1IiiIi = struct . pack ( oOoOo000 , OooO0O0Ooo , oO0O )
elif ( self . is_mac ( ) ) :
o0o0O00 = self . address
OooO0O0Ooo = ( o0o0O00 >> 32 ) & 0xffff
oO0O = ( o0o0O00 >> 16 ) & 0xffff
i1iiIiiIiI11 = o0o0O00 & 0xffff
i1II1IiiIi = struct . pack ( oOoOo000 , OooO0O0Ooo , oO0O , i1iiIiiIiI11 )
elif ( self . is_e164 ( ) ) :
o0o0O00 = self . address
OooO0O0Ooo = ( o0o0O00 >> 32 ) & 0xffffffff
oO0O = ( o0o0O00 & 0xffffffff )
i1II1IiiIi = struct . pack ( oOoOo000 , OooO0O0Ooo , oO0O )
elif ( self . is_dist_name ( ) ) :
i1II1IiiIi += self . address + "\0"
if 41 - 41: O0 % i1IIi * i1IIi
return ( i1II1IiiIi )
if 85 - 85: II111iiii + i1IIi / ooOoO0o . OOooOOo % OoO0O00
if 19 - 19: i1IIi + OOooOOo + IiII . I1IiiI * Ii1I
def unpack_address ( self , packet ) :
oOoOo000 = self . packet_format ( )
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 43 - 43: i1IIi . OoooooooOO . I1IiiI . OoooooooOO - OoooooooOO
o0o0O00 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 10 - 10: II111iiii * I1IiiI / II111iiii / OoOoOO00 . ooOoO0o
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( o0o0O00 [ 0 ] )
if 42 - 42: I1IiiI - I11i / I1IiiI + I11i
elif ( self . is_ipv6 ( ) ) :
if 54 - 54: iII111i
if 86 - 86: I1ii11iIi11i - Ii1I / IiII
if 91 - 91: ooOoO0o * i11iIiiIii / O0 % Ii1I
if 35 - 35: Oo0Ooo % O0
if 71 - 71: oO0o % OOooOOo * i1IIi
if 50 - 50: OoOoOO00 + i1IIi
if 9 - 9: iII111i / I1Ii111 * Ii1I
if 25 - 25: OoO0O00 . iII111i % I11i . oO0o * iII111i + Oo0Ooo
if ( o0o0O00 [ 0 ] <= 0xffff and ( o0o0O00 [ 0 ] & 0xff ) == 0 ) :
O00O00o0O0O = ( o0o0O00 [ 0 ] << 48 ) << 64
else :
O00O00o0O0O = byte_swap_64 ( o0o0O00 [ 0 ] ) << 64
if 32 - 32: IiII
OO000Oo = byte_swap_64 ( o0o0O00 [ 1 ] )
self . address = O00O00o0O0O | OO000Oo
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
elif ( self . is_mac ( ) ) :
ooiI1IiII = o0o0O00 [ 0 ]
OO0OO0O = o0o0O00 [ 1 ]
oOo0o0Ooo0 = o0o0O00 [ 2 ]
self . address = ( ooiI1IiII << 32 ) + ( OO0OO0O << 16 ) + oOo0o0Ooo0
if 6 - 6: I1Ii111 % oO0o % I1ii11iIi11i
elif ( self . is_e164 ( ) ) :
self . address = ( o0o0O00 [ 0 ] << 32 ) + o0o0O00 [ 1 ]
if 36 - 36: IiII
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
O0OOoooO = 0
if 97 - 97: i1IIi % OoOoOO00 . Oo0Ooo - OoO0O00 - ooOoO0o
packet = packet [ O0OOoooO : : ]
return ( packet )
if 99 - 99: i11iIiiIii / I1Ii111 / I1IiiI * oO0o
if 100 - 100: II111iiii * Ii1I . OoO0O00 . iII111i + i1IIi * I1IiiI
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 84 - 84: OoO0O00 + i1IIi
if 99 - 99: OOooOOo + o0oOOo0O0Ooo * I1Ii111 % OoooooooOO % I11i
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 48 - 48: o0oOOo0O0Ooo / OoO0O00
if 45 - 45: OOooOOo
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 57 - 57: iIii1I11I1II1 + IiII - I1IiiI
if 64 - 64: II111iiii . IiII / I1IiiI
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 20 - 20: OoooooooOO - I1ii11iIi11i * I1ii11iIi11i * I1ii11iIi11i
if 87 - 87: OoooooooOO * ooOoO0o
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 6 - 6: I1Ii111 / ooOoO0o / OoooooooOO . iIii1I11I1II1
if 68 - 68: OoO0O00
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 26 - 26: I11i % i1IIi / iIii1I11I1II1 % IiII . iII111i + I1ii11iIi11i
if 49 - 49: O0 . IiII + I1Ii111 - I11i % II111iiii
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 15 - 15: O0 - OoOoOO00 % II111iiii + O0 % O0 + OoOoOO00
if 34 - 34: I1Ii111
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 69 - 69: iIii1I11I1II1 . OOooOOo % I11i
if 28 - 28: I1Ii111 . ooOoO0o % I1IiiI
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 62 - 62: II111iiii + ooOoO0o + I1IiiI
if 70 - 70: o0oOOo0O0Ooo + Ii1I . OoO0O00 * Ii1I + OOooOOo + ooOoO0o
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 13 - 13: I1ii11iIi11i
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
if 18 - 18: OoooooooOO - I1ii11iIi11i
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
if 79 - 79: OOooOOo + Oo0Ooo
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 98 - 98: iIii1I11I1II1 / I1IiiI + i1IIi
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 99 - 99: OOooOOo
return ( False )
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
if 56 - 56: Oo0Ooo % I1ii11iIi11i
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 53 - 53: OoO0O00 . I11i - ooOoO0o
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 39 - 39: i1IIi
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 59 - 59: i1IIi
if 37 - 37: OoO0O00 / I1ii11iIi11i / OoOoOO00
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
if 71 - 71: OOooOOo
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
Ii11 = addr_str . find ( "[" )
O0o0o00O = addr_str . find ( "]" )
if ( Ii11 != - 1 and O0o0o00O != - 1 ) :
self . instance_id = int ( addr_str [ Ii11 + 1 : O0o0o00O ] )
addr_str = addr_str [ O0o0o00O + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 73 - 73: iII111i / I1IiiI * ooOoO0o
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
if 15 - 15: OoO0O00
if 88 - 88: Ii1I % i1IIi / I1Ii111
if 2 - 2: Ii1I . IiII % OoOoOO00
if 42 - 42: OoOoOO00 * OoO0O00 * IiII - IiII % Oo0Ooo . IiII
if ( self . is_ipv4 ( ) ) :
I1O0o = addr_str . split ( "." )
oOO = int ( I1O0o [ 0 ] ) << 24
oOO += int ( I1O0o [ 1 ] ) << 16
oOO += int ( I1O0o [ 2 ] ) << 8
oOO += int ( I1O0o [ 3 ] )
self . address = oOO
elif ( self . is_ipv6 ( ) ) :
if 35 - 35: i11iIiiIii
if 62 - 62: O0 - o0oOOo0O0Ooo + I1Ii111 * I1ii11iIi11i / OOooOOo
if 87 - 87: Oo0Ooo / OoooooooOO + O0 / o0oOOo0O0Ooo % II111iiii - O0
if 63 - 63: OOooOOo - OoO0O00 * i1IIi - I1ii11iIi11i . I1IiiI
if 59 - 59: i11iIiiIii . OOooOOo % Oo0Ooo + O0
if 84 - 84: I1Ii111 / O0 - IiII . I11i / o0oOOo0O0Ooo
if 12 - 12: i11iIiiIii / Ii1I + i1IIi
if 54 - 54: I1IiiI
if 55 - 55: I1ii11iIi11i % IiII % o0oOOo0O0Ooo + i1IIi * OoooooooOO % II111iiii
if 37 - 37: Oo0Ooo
if 33 - 33: OoooooooOO - O0 . O0 - o0oOOo0O0Ooo % o0oOOo0O0Ooo % OoO0O00
if 27 - 27: ooOoO0o . i11iIiiIii / o0oOOo0O0Ooo * OoO0O00 * OoOoOO00 * oO0o
if 19 - 19: O0 * II111iiii * OoOoOO00
if 53 - 53: Oo0Ooo
if 16 - 16: Ii1I
if 73 - 73: i11iIiiIii + I1IiiI - IiII - IiII + IiII . Ii1I
if 78 - 78: OoO0O00 + oO0o
o0I11I1II1i = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 81 - 81: I1ii11iIi11i * I1IiiI * OoOoOO00 / IiII
addr_str = binascii . hexlify ( addr_str )
if 85 - 85: OoooooooOO
if ( o0I11I1II1i ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 87 - 87: Ii1I
self . address = int ( addr_str , 16 )
if 78 - 78: iII111i * iIii1I11I1II1 . OoO0O00 . OoOoOO00 % I1Ii111
elif ( self . is_geo_prefix ( ) ) :
o0oO0O = lisp_geo ( None )
o0oO0O . name = "geo-prefix-{}" . format ( o0oO0O )
o0oO0O . parse_geo_string ( addr_str )
self . address = o0oO0O
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
oOO = int ( addr_str , 16 )
self . address = oOO
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
oOO = int ( addr_str , 16 )
self . address = oOO << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 77 - 77: OOooOOo / OoooooooOO
self . mask_len = self . host_mask_len ( )
if 11 - 11: iIii1I11I1II1 - Ii1I - OoOoOO00 . oO0o / I1ii11iIi11i
if 79 - 79: i11iIiiIii % o0oOOo0O0Ooo * II111iiii . i1IIi * Ii1I - i11iIiiIii
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
iI11I = prefix_str . find ( "]" )
ooooOo00OO0o = len ( prefix_str [ iI11I + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , ooooOo00OO0o = prefix_str . split ( "/" )
else :
iIi1IiI = prefix_str . find ( "'" )
if ( iIi1IiI == - 1 ) : return
OoO00 = prefix_str . find ( "'" , iIi1IiI + 1 )
if ( OoO00 == - 1 ) : return
ooooOo00OO0o = len ( prefix_str [ iIi1IiI + 1 : OoO00 ] ) * 8
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( ooooOo00OO0o )
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
def zero_host_bits ( self ) :
oOoOO00O = ( 2 ** self . mask_len ) - 1
oOO0O = self . addr_length ( ) * 8 - self . mask_len
oOoOO00O <<= oOO0O
self . address &= oOoOO00O
if 76 - 76: iIii1I11I1II1 % OoO0O00 / I1ii11iIi11i . I1ii11iIi11i
if 26 - 26: IiII . Oo0Ooo + iII111i
def is_geo_string ( self , addr_str ) :
iI11I = addr_str . find ( "]" )
if ( iI11I != - 1 ) : addr_str = addr_str [ iI11I + 1 : : ]
if 92 - 92: Oo0Ooo - I1IiiI * I1IiiI
o0oO0O = addr_str . split ( "/" )
if ( len ( o0oO0O ) == 2 ) :
if ( o0oO0O [ 1 ] . isdigit ( ) == False ) : return ( False )
if 78 - 78: OoOoOO00 + OoO0O00 % oO0o + Oo0Ooo
o0oO0O = o0oO0O [ 0 ]
o0oO0O = o0oO0O . split ( "-" )
oo0oOoO0OoOOOo0O = len ( o0oO0O )
if ( oo0oOoO0OoOOOo0O < 8 or oo0oOoO0OoOOOo0O > 9 ) : return ( False )
if 24 - 24: I1Ii111 + OOooOOo
for ooo0oO000 in range ( 0 , oo0oOoO0OoOOOo0O ) :
if ( ooo0oO000 == 3 ) :
if ( o0oO0O [ ooo0oO000 ] in [ "N" , "S" ] ) : continue
return ( False )
if 17 - 17: II111iiii + iII111i + OoO0O00 % I11i
if ( ooo0oO000 == 7 ) :
if ( o0oO0O [ ooo0oO000 ] in [ "W" , "E" ] ) : continue
return ( False )
if 23 - 23: iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if ( o0oO0O [ ooo0oO000 ] . isdigit ( ) == False ) : return ( False )
if 89 - 89: OOooOOo - I1Ii111 - iII111i
return ( True )
if 67 - 67: oO0o
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
def print_address ( self ) :
o0o0O00 = self . print_address_no_iid ( )
o0OOoOO = "[" + str ( self . instance_id )
for Ii11 in self . iid_list : o0OOoOO += "," + str ( Ii11 )
o0OOoOO += "]"
o0o0O00 = "{}{}" . format ( o0OOoOO , o0o0O00 )
return ( o0o0O00 )
if 13 - 13: I1Ii111
if 52 - 52: II111iiii / OoO0O00 . Ii1I
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
o0o0O00 = self . address
o0o0oo0O00o = o0o0O00 >> 24
oO0 = ( o0o0O00 >> 16 ) & 0xff
IIIiI1i1iiIIi = ( o0o0O00 >> 8 ) & 0xff
iIIiIi = o0o0O00 & 0xff
return ( "{}.{}.{}.{}" . format ( o0o0oo0O00o , oO0 , IIIiI1i1iiIIi , iIIiIi ) )
elif ( self . is_ipv6 ( ) ) :
I1iiIiiii1111 = lisp_hex_string ( self . address ) . zfill ( 32 )
I1iiIiiii1111 = binascii . unhexlify ( I1iiIiiii1111 )
I1iiIiiii1111 = socket . inet_ntop ( socket . AF_INET6 , I1iiIiiii1111 )
return ( "{}" . format ( I1iiIiiii1111 ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
I1iiIiiii1111 = lisp_hex_string ( self . address ) . zfill ( 12 )
I1iiIiiii1111 = "{}-{}-{}" . format ( I1iiIiiii1111 [ 0 : 4 ] , I1iiIiiii1111 [ 4 : 8 ] ,
I1iiIiiii1111 [ 8 : 12 ] )
return ( "{}" . format ( I1iiIiiii1111 ) )
elif ( self . is_e164 ( ) ) :
I1iiIiiii1111 = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( I1iiIiiii1111 ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 36 - 36: OOooOOo * I1IiiI
return ( "unknown-afi:{}" . format ( self . afi ) )
if 78 - 78: Oo0Ooo * IiII . Oo0Ooo / I11i
if 85 - 85: i1IIi - IiII - o0oOOo0O0Ooo + o0oOOo0O0Ooo
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
OOoOo000Ooooo = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , OOoOo000Ooooo ) )
if 68 - 68: iIii1I11I1II1 % oO0o
o0o0O00 = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( o0o0O00 )
if ( self . is_geo_prefix ( ) ) : return ( o0o0O00 )
if 5 - 5: o0oOOo0O0Ooo
iI11I = o0o0O00 . find ( "no-address" )
if ( iI11I == - 1 ) :
o0o0O00 = "{}/{}" . format ( o0o0O00 , str ( self . mask_len ) )
else :
o0o0O00 = o0o0O00 [ 0 : iI11I ]
if 24 - 24: OoooooooOO
return ( o0o0O00 )
if 64 - 64: iIii1I11I1II1 % OoooooooOO * i1IIi
if 50 - 50: I1IiiI - i1IIi / Oo0Ooo * I1ii11iIi11i . II111iiii
def print_prefix_no_iid ( self ) :
o0o0O00 = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( o0o0O00 )
if ( self . is_geo_prefix ( ) ) : return ( o0o0O00 )
return ( "{}/{}" . format ( o0o0O00 , str ( self . mask_len ) ) )
if 24 - 24: OoooooooOO * Ii1I
if 66 - 66: O0 - Ii1I % IiII
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
o0o0O00 = self . print_address ( )
iI11I = o0o0O00 . find ( "]" )
if ( iI11I != - 1 ) : o0o0O00 = o0o0O00 [ iI11I + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
o0o0O00 = o0o0O00 . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , o0o0O00 ) )
if 97 - 97: Ii1I * O0 * I1IiiI % oO0o
return ( "{}-{}-{}" . format ( self . instance_id , o0o0O00 , self . mask_len ) )
if 44 - 44: O0
if 38 - 38: i11iIiiIii
def print_sg ( self , g ) :
IiIIi1I1I11Ii = self . print_prefix ( )
Ooo = IiIIi1I1I11Ii . find ( "]" ) + 1
g = g . print_prefix ( )
ii1i1I11II = g . find ( "]" ) + 1
o0 = "[{}]({}, {})" . format ( self . instance_id , IiIIi1I1I11Ii [ Ooo : : ] , g [ ii1i1I11II : : ] )
return ( o0 )
if 72 - 72: OoooooooOO * II111iiii + OoO0O00 % iIii1I11I1II1 . I1ii11iIi11i % OoooooooOO
if 19 - 19: OoOoOO00 + I1Ii111
def hash_address ( self , addr ) :
OooO0O0Ooo = self . address
oO0O = addr . address
if 19 - 19: I1ii11iIi11i / I1Ii111 + OoooooooOO - O0
if ( self . is_geo_prefix ( ) ) : OooO0O0Ooo = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : oO0O = addr . address . print_geo ( )
if 49 - 49: I1ii11iIi11i / OoOoOO00 - I1IiiI + iII111i . OOooOOo % oO0o
if ( type ( OooO0O0Ooo ) == str ) :
OooO0O0Ooo = int ( binascii . hexlify ( OooO0O0Ooo [ 0 : 1 ] ) )
if 34 - 34: OoO0O00 - I1IiiI + OoOoOO00
if ( type ( oO0O ) == str ) :
oO0O = int ( binascii . hexlify ( oO0O [ 0 : 1 ] ) )
if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi
return ( OooO0O0Ooo ^ oO0O )
if 78 - 78: I1IiiI / i1IIi % II111iiii % I1IiiI % Ii1I
if 29 - 29: i1IIi % o0oOOo0O0Ooo + OOooOOo / Oo0Ooo
if 38 - 38: IiII . I1Ii111
if 69 - 69: ooOoO0o + OoOoOO00 + II111iiii % I1Ii111 + Ii1I . ooOoO0o
if 73 - 73: I11i % I11i . ooOoO0o + OoOoOO00
if 33 - 33: i11iIiiIii . i11iIiiIii * i11iIiiIii / iIii1I11I1II1 / I1ii11iIi11i . ooOoO0o
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 11 - 11: iII111i
ooooOo00OO0o = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
oO0oOoo00o = 2 ** ( 32 - ooooOo00OO0o )
iIii1II = prefix . instance_id
OOoOo000Ooooo = iIii1II + oO0oOoo00o
return ( self . instance_id in range ( iIii1II , OOoOo000Ooooo ) )
if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1
if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 69 - 69: iII111i % I1ii11iIi11i
if 19 - 19: IiII
if 35 - 35: OoOoOO00
if 18 - 18: II111iiii . OoOoOO00 + I1ii11iIi11i * oO0o + OoooooooOO
if 39 - 39: I1IiiI * ooOoO0o / i11iIiiIii - oO0o - oO0o + O0
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
o0o0O00 = self . address
oO00OOoOoO = prefix . address
if ( self . is_geo_prefix ( ) ) :
o0o0O00 = self . address . print_geo ( )
oO00OOoOoO = prefix . address . print_geo ( )
if 65 - 65: II111iiii - I1Ii111 * Oo0Ooo + ooOoO0o / OOooOOo . i11iIiiIii
if ( len ( o0o0O00 ) < len ( oO00OOoOoO ) ) : return ( False )
return ( o0o0O00 . find ( oO00OOoOoO ) == 0 )
if 15 - 15: I1IiiI
if 50 - 50: Oo0Ooo - I1Ii111 / I1IiiI + IiII / o0oOOo0O0Ooo . iII111i
if 61 - 61: OoO0O00 + o0oOOo0O0Ooo * iII111i
if 84 - 84: Oo0Ooo . I1Ii111
if 6 - 6: IiII + I1IiiI % iII111i - oO0o / OoO0O00
if ( self . mask_len < ooooOo00OO0o ) : return ( False )
if 37 - 37: O0 % OoO0O00 + i11iIiiIii . O0 / OOooOOo
oOO0O = ( prefix . addr_length ( ) * 8 ) - ooooOo00OO0o
oOoOO00O = ( 2 ** ooooOo00OO0o - 1 ) << oOO0O
return ( ( self . address & oOoOO00O ) == prefix . address )
if 15 - 15: I1ii11iIi11i + oO0o
if 99 - 99: oO0o - ooOoO0o - II111iiii * OoooooooOO / O0
def mask_address ( self , mask_len ) :
oOO0O = ( self . addr_length ( ) * 8 ) - mask_len
oOoOO00O = ( 2 ** mask_len - 1 ) << oOO0O
self . address &= oOoOO00O
if 57 - 57: iIii1I11I1II1 / IiII + OoO0O00 * oO0o + Ii1I
if 76 - 76: i11iIiiIii . OOooOOo / I11i * oO0o % iIii1I11I1II1 . ooOoO0o
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
ooOO00 = self . print_prefix ( )
iIIii = prefix . print_prefix ( ) if prefix else ""
return ( ooOO00 == iIIii )
if 81 - 81: OOooOOo . OOooOOo
if 70 - 70: I1IiiI / I11i - II111iiii . o0oOOo0O0Ooo / O0
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
I11 = lisp_myrlocs [ 0 ]
if ( I11 == None ) : return ( False )
I11 = I11 . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == I11 )
if 70 - 70: OoO0O00 % Ii1I - Ii1I / OoO0O00 * IiII
if ( self . is_ipv6 ( ) ) :
I11 = lisp_myrlocs [ 1 ]
if ( I11 == None ) : return ( False )
I11 = I11 . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == I11 )
if 2 - 2: oO0o
return ( False )
if 11 - 11: Ii1I
if 77 - 77: IiII * o0oOOo0O0Ooo % oO0o
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid is 0 and mask_len is 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 49 - 49: oO0o
self . instance_id = iid
self . mask_len = mask_len
if 85 - 85: OoO0O00 . IiII / iII111i . I1IiiI
if 8 - 8: i1IIi - iIii1I11I1II1 + iII111i
def lcaf_length ( self , lcaf_type ) :
o00OOo00 = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : o00OOo00 += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : o00OOo00 += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : o00OOo00 += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : o00OOo00 += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : o00OOo00 += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : o00OOo00 += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : o00OOo00 += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : o00OOo00 += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : o00OOo00 = o00OOo00 * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : o00OOo00 += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : o00OOo00 += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : o00OOo00 += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : o00OOo00 += 4
return ( o00OOo00 )
if 90 - 90: i11iIiiIii - Oo0Ooo
if 31 - 31: OoOoOO00 + OoOoOO00 + OoooooooOO % O0
if 14 - 14: i1IIi / OoooooooOO . I1IiiI * I1Ii111 + OoO0O00
if 45 - 45: OoooooooOO * I1Ii111
if 7 - 7: O0
if 42 - 42: o0oOOo0O0Ooo / Ii1I
if 31 - 31: OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if 60 - 60: I11i - OoO0O00 - OoOoOO00 * ooOoO0o - i1IIi
if 18 - 18: ooOoO0o + i11iIiiIii + O0 + OOooOOo / Ii1I
if 65 - 65: I1IiiI . ooOoO0o
if 51 - 51: I1Ii111
if 89 - 89: Oo0Ooo
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
def lcaf_encode_iid ( self ) :
oOOi1I111II = LISP_LCAF_INSTANCE_ID_TYPE
I11II11IiI11 = socket . htons ( self . lcaf_length ( oOOi1I111II ) )
o0OOoOO = self . instance_id
iioOO = self . afi
Iii11i1 = 0
if ( iioOO < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
iioOO = LISP_AFI_LCAF
Iii11i1 = 0
else :
iioOO = 0
Iii11i1 = self . mask_len
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
O0oOO0oOo0o = struct . pack ( "BBBBH" , 0 , 0 , oOOi1I111II , Iii11i1 , I11II11IiI11 )
O0oOO0oOo0o += struct . pack ( "IH" , socket . htonl ( o0OOoOO ) , socket . htons ( iioOO ) )
if ( iioOO == 0 ) : return ( O0oOO0oOo0o )
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
if ( self . afi == LISP_AFI_GEO_COORD ) :
O0oOO0oOo0o = O0oOO0oOo0o [ 0 : - 2 ]
O0oOO0oOo0o += self . address . encode_geo ( )
return ( O0oOO0oOo0o )
if 55 - 55: OOooOOo / iII111i + OoooooooOO - OoooooooOO
if 51 - 51: O0 % Ii1I % Oo0Ooo - O0
O0oOO0oOo0o += self . pack_address ( )
return ( O0oOO0oOo0o )
if 94 - 94: OoooooooOO - ooOoO0o % I1ii11iIi11i + I1Ii111
if 51 - 51: I1ii11iIi11i . iII111i / i1IIi * ooOoO0o % I11i
def lcaf_decode_iid ( self , packet ) :
oOoOo000 = "BBBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 82 - 82: O0 % OoOoOO00 . iII111i . i1IIi . iII111i - Oo0Ooo
II11iiii , oo0Oo0o0O , oOOi1I111II , oo0Oo0OoooOO , o00OOo00 = struct . unpack ( oOoOo000 ,
packet [ : O0OOoooO ] )
packet = packet [ O0OOoooO : : ]
if 23 - 23: Oo0Ooo - iIii1I11I1II1 . Ii1I / oO0o
if ( oOOi1I111II != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 50 - 50: i1IIi * OoOoOO00 % I1ii11iIi11i . ooOoO0o + I1Ii111
oOoOo000 = "IH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 83 - 83: I1ii11iIi11i . II111iiii
o0OOoOO , iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
packet = packet [ O0OOoooO : : ]
if 14 - 14: Ii1I % I1IiiI * OOooOOo / Oo0Ooo % OoOoOO00
o00OOo00 = socket . ntohs ( o00OOo00 )
self . instance_id = socket . ntohl ( o0OOoOO )
iioOO = socket . ntohs ( iioOO )
self . afi = iioOO
if ( oo0Oo0OoooOO != 0 and iioOO == 0 ) : self . mask_len = oo0Oo0OoooOO
if ( iioOO == 0 ) :
self . afi = LISP_AFI_IID_RANGE if oo0Oo0OoooOO else LISP_AFI_ULTIMATE_ROOT
if 20 - 20: i11iIiiIii . I1IiiI - iII111i % iII111i - iIii1I11I1II1 - o0oOOo0O0Ooo
if 44 - 44: iII111i
if 52 - 52: i11iIiiIii
if 1 - 1: i1IIi * iIii1I11I1II1
if 29 - 29: I11i
if ( iioOO == 0 ) : return ( packet )
if 12 - 12: oO0o % i1IIi - oO0o / ooOoO0o * II111iiii % ooOoO0o
if 6 - 6: IiII / OoO0O00
if 83 - 83: IiII - iIii1I11I1II1 * ooOoO0o - oO0o
if 77 - 77: Ii1I
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 9 - 9: OOooOOo / OoooooooOO + iII111i
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if ( iioOO == LISP_AFI_LCAF ) :
oOoOo000 = "BBBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
OOII1iI , Ooooo0OO , oOOi1I111II , o0o0OO0OO , ii111 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
if ( oOOi1I111II != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
ii111 = socket . ntohs ( ii111 )
packet = packet [ O0OOoooO : : ]
if ( ii111 > len ( packet ) ) : return ( None )
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
o0oO0O = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = o0oO0O
packet = o0oO0O . decode_geo ( packet , ii111 , o0o0OO0OO )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
I11II11IiI11 = self . addr_length ( )
if ( len ( packet ) < I11II11IiI11 ) : return ( None )
if 77 - 77: ooOoO0o + I1ii11iIi11i * o0oOOo0O0Ooo / i1IIi * I11i
packet = self . unpack_address ( packet )
return ( packet )
if 70 - 70: oO0o / iII111i * i1IIi / II111iiii / OoOoOO00 + oO0o
if 30 - 30: i1IIi - iII111i - i11iIiiIii . OoOoOO00 . o0oOOo0O0Ooo
if 74 - 74: i11iIiiIii / II111iiii
if 62 - 62: O0
if 63 - 63: Oo0Ooo + Oo0Ooo
if 48 - 48: Oo0Ooo * I1ii11iIi11i % II111iiii
if 42 - 42: I1Ii111 - ooOoO0o % o0oOOo0O0Ooo * I1IiiI . o0oOOo0O0Ooo
if 84 - 84: iIii1I11I1II1
if 39 - 39: Ii1I . II111iiii / I1IiiI
if 44 - 44: Ii1I / Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
if 52 - 52: OoO0O00
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
def lcaf_encode_sg ( self , group ) :
oOOi1I111II = LISP_LCAF_MCAST_INFO_TYPE
o0OOoOO = socket . htonl ( self . instance_id )
I11II11IiI11 = socket . htons ( self . lcaf_length ( oOOi1I111II ) )
O0oOO0oOo0o = struct . pack ( "BBBBHIHBB" , 0 , 0 , oOOi1I111II , 0 , I11II11IiI11 , o0OOoOO ,
0 , self . mask_len , group . mask_len )
if 50 - 50: IiII . i11iIiiIii % I11i
O0oOO0oOo0o += struct . pack ( "H" , socket . htons ( self . afi ) )
O0oOO0oOo0o += self . pack_address ( )
O0oOO0oOo0o += struct . pack ( "H" , socket . htons ( group . afi ) )
O0oOO0oOo0o += group . pack_address ( )
return ( O0oOO0oOo0o )
if 22 - 22: i1IIi - II111iiii - OoOoOO00 . iII111i
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
def lcaf_decode_sg ( self , packet ) :
oOoOo000 = "BBBBHIHBB"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( [ None , None ] )
if 34 - 34: iII111i . OoOoOO00
II11iiii , oo0Oo0o0O , oOOi1I111II , i11iIi1I1i1 , o00OOo00 , o0OOoOO , IIIIi11IiiI , Ii11Ii1i , Ii1II11I1iI11 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 71 - 71: OOooOOo % Oo0Ooo - o0oOOo0O0Ooo / I1Ii111 - O0 - oO0o
packet = packet [ O0OOoooO : : ]
if 10 - 10: I1IiiI
if ( oOOi1I111II != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 17 - 17: i11iIiiIii % o0oOOo0O0Ooo . ooOoO0o
self . instance_id = socket . ntohl ( o0OOoOO )
o00OOo00 = socket . ntohs ( o00OOo00 ) - 8
if 34 - 34: OoooooooOO / iII111i / O0
if 75 - 75: I11i % OOooOOo - OoO0O00 * I11i * IiII
if 11 - 11: I1ii11iIi11i . O0 - iII111i * IiII . i1IIi . iII111i
if 82 - 82: i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
oOoOo000 = "H"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( [ None , None ] )
if ( o00OOo00 < O0OOoooO ) : return ( [ None , None ] )
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
o00OOo00 -= O0OOoooO
self . afi = socket . ntohs ( iioOO )
self . mask_len = Ii11Ii1i
I11II11IiI11 = self . addr_length ( )
if ( o00OOo00 < I11II11IiI11 ) : return ( [ None , None ] )
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 97 - 97: I11i . ooOoO0o
o00OOo00 -= I11II11IiI11
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
if 76 - 76: OoO0O00 * ooOoO0o
if 32 - 32: O0 . oO0o * o0oOOo0O0Ooo . Ii1I + IiII
oOoOo000 = "H"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( [ None , None ] )
if ( o00OOo00 < O0OOoooO ) : return ( [ None , None ] )
if 98 - 98: iII111i . II111iiii % O0
iioOO = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
o00OOo00 -= O0OOoooO
O0oo0oo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
O0oo0oo0 . afi = socket . ntohs ( iioOO )
O0oo0oo0 . mask_len = Ii1II11I1iI11
O0oo0oo0 . instance_id = self . instance_id
I11II11IiI11 = self . addr_length ( )
if ( o00OOo00 < I11II11IiI11 ) : return ( [ None , None ] )
if 43 - 43: OOooOOo % I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
packet = O0oo0oo0 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 17 - 17: OoooooooOO - i1IIi * I11i
return ( [ packet , O0oo0oo0 ] )
if 33 - 33: i1IIi . Oo0Ooo + I11i
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
def lcaf_decode_eid ( self , packet ) :
oOoOo000 = "BBB"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( [ None , None ] )
if 78 - 78: I1Ii111 + I1Ii111
if 43 - 43: I1Ii111 * o0oOOo0O0Ooo + i1IIi
if 19 - 19: Ii1I
if 51 - 51: oO0o
if 57 - 57: i11iIiiIii - Oo0Ooo + I1Ii111 * OoO0O00
i11iIi1I1i1 , Ooooo0OO , oOOi1I111II = struct . unpack ( oOoOo000 ,
packet [ : O0OOoooO ] )
if 35 - 35: o0oOOo0O0Ooo % II111iiii + O0
if ( oOOi1I111II == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( oOOi1I111II == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , O0oo0oo0 = self . lcaf_decode_sg ( packet )
return ( [ packet , O0oo0oo0 ] )
elif ( oOOi1I111II == LISP_LCAF_GEO_COORD_TYPE ) :
oOoOo000 = "BBBBH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( None )
if 70 - 70: I1ii11iIi11i . II111iiii
OOII1iI , Ooooo0OO , oOOi1I111II , o0o0OO0OO , ii111 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] )
if 54 - 54: OOooOOo
if 67 - 67: I1IiiI . o0oOOo0O0Ooo / i1IIi * I1ii11iIi11i . Oo0Ooo + II111iiii
if ( oOOi1I111II != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 63 - 63: OoOoOO00 - OoOoOO00
ii111 = socket . ntohs ( ii111 )
packet = packet [ O0OOoooO : : ]
if ( ii111 > len ( packet ) ) : return ( None )
if 31 - 31: I1ii11iIi11i % O0 - i11iIiiIii * o0oOOo0O0Ooo . ooOoO0o * ooOoO0o
o0oO0O = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = o0oO0O
packet = o0oO0O . decode_geo ( packet , ii111 , o0o0OO0OO )
self . mask_len = self . host_mask_len ( )
if 18 - 18: OoO0O00 - OoO0O00 . o0oOOo0O0Ooo
return ( [ packet , None ] )
if 80 - 80: I11i + I1Ii111 / I1IiiI * OOooOOo % iII111i
if 48 - 48: iIii1I11I1II1 + i1IIi . I1IiiI % OoO0O00 - iIii1I11I1II1 / i1IIi
if 14 - 14: IiII . I11i
if 13 - 13: OoOoOO00 - I11i . OOooOOo % OoO0O00
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
def copy_elp_node ( self ) :
Oo00o0o00oOo = lisp_elp_node ( )
Oo00o0o00oOo . copy_address ( self . address )
Oo00o0o00oOo . probe = self . probe
Oo00o0o00oOo . strict = self . strict
Oo00o0o00oOo . eid = self . eid
Oo00o0o00oOo . we_are_last = self . we_are_last
return ( Oo00o0o00oOo )
if 9 - 9: iIii1I11I1II1
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 7 - 7: OoO0O00 - I11i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + i11iIiiIii
if 28 - 28: OoOoOO00 % ooOoO0o . I1IiiI + II111iiii
def copy_elp ( self ) :
IIi1iIi = lisp_elp ( self . elp_name )
IIi1iIi . use_elp_node = self . use_elp_node
IIi1iIi . we_are_last = self . we_are_last
for Oo00o0o00oOo in self . elp_nodes :
IIi1iIi . elp_nodes . append ( Oo00o0o00oOo . copy_elp_node ( ) )
if 34 - 34: iIii1I11I1II1
return ( IIi1iIi )
if 65 - 65: II111iiii - iII111i / o0oOOo0O0Ooo
if 35 - 35: i11iIiiIii - Oo0Ooo . I1ii11iIi11i % OoOoOO00
def print_elp ( self , want_marker ) :
iI11i1Ii = ""
for Oo00o0o00oOo in self . elp_nodes :
i1II1II = ""
if ( want_marker ) :
if ( Oo00o0o00oOo == self . use_elp_node ) :
i1II1II = "*"
elif ( Oo00o0o00oOo . we_are_last ) :
i1II1II = "x"
if 57 - 57: II111iiii / Oo0Ooo % i1IIi * iIii1I11I1II1
if 53 - 53: I1Ii111 . I1ii11iIi11i
iI11i1Ii += "{}{}({}{}{}), " . format ( i1II1II ,
Oo00o0o00oOo . address . print_address_no_iid ( ) ,
"r" if Oo00o0o00oOo . eid else "R" , "P" if Oo00o0o00oOo . probe else "p" ,
"S" if Oo00o0o00oOo . strict else "s" )
if 18 - 18: I1ii11iIi11i / i11iIiiIii
return ( iI11i1Ii [ 0 : - 2 ] if iI11i1Ii != "" else "" )
if 52 - 52: i11iIiiIii . O0 * ooOoO0o - o0oOOo0O0Ooo - O0
if 39 - 39: iII111i / I11i
def select_elp_node ( self ) :
oo00 , iiiIIi1IiiIiII1 , O0OoO0o = lisp_myrlocs
iI11I = None
if 22 - 22: ooOoO0o % ooOoO0o . OOooOOo - II111iiii + OoO0O00
for Oo00o0o00oOo in self . elp_nodes :
if ( oo00 and Oo00o0o00oOo . address . is_exact_match ( oo00 ) ) :
iI11I = self . elp_nodes . index ( Oo00o0o00oOo )
break
if 44 - 44: I11i / o0oOOo0O0Ooo - OoO0O00 . Ii1I % oO0o - o0oOOo0O0Ooo
if ( iiiIIi1IiiIiII1 and Oo00o0o00oOo . address . is_exact_match ( iiiIIi1IiiIiII1 ) ) :
iI11I = self . elp_nodes . index ( Oo00o0o00oOo )
break
if 14 - 14: OOooOOo * IiII
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
if 33 - 33: OoO0O00
if 91 - 91: I11i % I11i % iII111i
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if ( iI11I == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
Oo00o0o00oOo . we_are_last = False
return
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
if 18 - 18: OoooooooOO + ooOoO0o * OoOoOO00 - OoO0O00
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
if 42 - 42: i11iIiiIii / O0
if 8 - 8: I1Ii111
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ iI11I ] ) :
self . use_elp_node = None
Oo00o0o00oOo . we_are_last = True
return
if 51 - 51: i11iIiiIii
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
self . use_elp_node = self . elp_nodes [ iI11I + 1 ]
return
if 20 - 20: Oo0Ooo
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
class lisp_geo ( ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 84 - 84: OOooOOo
if 68 - 68: I1Ii111
def copy_geo ( self ) :
o0oO0O = lisp_geo ( self . geo_name )
o0oO0O . latitude = self . latitude
o0oO0O . lat_mins = self . lat_mins
o0oO0O . lat_secs = self . lat_secs
o0oO0O . longitude = self . longitude
o0oO0O . long_mins = self . long_mins
o0oO0O . long_secs = self . long_secs
o0oO0O . altitude = self . altitude
o0oO0O . radius = self . radius
return ( o0oO0O )
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
if 54 - 54: oO0o + I11i - OoO0O00
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 86 - 86: OoooooooOO
if 51 - 51: i11iIiiIii
def parse_geo_string ( self , geo_str ) :
iI11I = geo_str . find ( "]" )
if ( iI11I != - 1 ) : geo_str = geo_str [ iI11I + 1 : : ]
if 91 - 91: OOooOOo
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
if 73 - 73: i1IIi - Ii1I + oO0o * iIii1I11I1II1
if 100 - 100: i11iIiiIii / iIii1I11I1II1 + Oo0Ooo + OoO0O00 - iII111i
if 8 - 8: i11iIiiIii . O0 + o0oOOo0O0Ooo * oO0o + II111iiii
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , o00Oo0O = geo_str . split ( "/" )
self . radius = int ( o00Oo0O )
if 39 - 39: ooOoO0o / I1IiiI * o0oOOo0O0Ooo + o0oOOo0O0Ooo - Ii1I + OoOoOO00
if 10 - 10: I1Ii111 . I11i / OoooooooOO . I11i . II111iiii
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 26 - 26: i11iIiiIii - oO0o % o0oOOo0O0Ooo . I11i
o0oI1Ii111i1I = geo_str [ 0 : 4 ]
iII11I = geo_str [ 4 : 8 ]
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
if 87 - 87: iII111i
if 86 - 86: IiII - I11i
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 99 - 99: i1IIi + I1ii11iIi11i
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
if 44 - 44: II111iiii / I1ii11iIi11i
self . latitude = int ( o0oI1Ii111i1I [ 0 ] )
self . lat_mins = int ( o0oI1Ii111i1I [ 1 ] )
self . lat_secs = int ( o0oI1Ii111i1I [ 2 ] )
if ( o0oI1Ii111i1I [ 3 ] == "N" ) : self . latitude = - self . latitude
if 39 - 39: OoooooooOO % OoO0O00
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
if 29 - 29: IiII
self . longitude = int ( iII11I [ 0 ] )
self . long_mins = int ( iII11I [ 1 ] )
self . long_secs = int ( iII11I [ 2 ] )
if ( iII11I [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
if 91 - 91: I1Ii111 * iII111i * OoO0O00
def print_geo ( self ) :
o0Oo0O0 = "N" if self . latitude < 0 else "S"
I1Iii1Ii = "E" if self . longitude < 0 else "W"
if 7 - 7: ooOoO0o + iIii1I11I1II1
iII1I1I11 = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , o0Oo0O0 , abs ( self . longitude ) ,
self . long_mins , self . long_secs , I1Iii1Ii )
if 63 - 63: II111iiii
if ( self . no_geo_altitude ( ) == False ) :
iII1I1I11 += "-" + str ( self . altitude )
if 53 - 53: O0
if 76 - 76: i1IIi . OOooOOo * iIii1I11I1II1 / I1ii11iIi11i % i11iIiiIii / O0
if 83 - 83: oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if ( self . radius != 0 ) : iII1I1I11 += "/{}" . format ( self . radius )
return ( iII1I1I11 )
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
def geo_url ( self ) :
OOOOO0OOoOOO = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
OOOOO0OOoOOO = "10" if ( OOOOO0OOoOOO == "" or OOOOO0OOoOOO . isdigit ( ) == False ) else OOOOO0OOoOOO
IIi1IIi1 , oO0000OOO0O = self . dms_to_decimal ( )
o00Oo0o = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( IIi1IIi1 , oO0000OOO0O , IIi1IIi1 , oO0000OOO0O ,
# iIii1I11I1II1 + i1IIi - iII111i + i1IIi / OoOoOO00
# OOooOOo . OoooooooOO
OOOOO0OOoOOO )
return ( o00Oo0o )
if 50 - 50: OoO0O00 % oO0o + I1Ii111 - II111iiii
if 41 - 41: OoooooooOO % ooOoO0o * iIii1I11I1II1 * i11iIiiIii / I1IiiI
def print_geo_url ( self ) :
o0oO0O = self . print_geo ( )
if ( self . radius == 0 ) :
o00Oo0o = self . geo_url ( )
OO0o0o0oo = "<a href='{}'>{}</a>" . format ( o00Oo0o , o0oO0O )
else :
o00Oo0o = o0oO0O . replace ( "/" , "-" )
OO0o0o0oo = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( o00Oo0o , o0oO0O )
if 80 - 80: Ii1I - IiII - ooOoO0o * I1Ii111 % I1ii11iIi11i
return ( OO0o0o0oo )
if 82 - 82: iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
def dms_to_decimal ( self ) :
oO0OoOo0oo , O0OO0o0 , i1iIiIi = self . latitude , self . lat_mins , self . lat_secs
i1iIi1I111i = float ( abs ( oO0OoOo0oo ) )
i1iIi1I111i += float ( O0OO0o0 * 60 + i1iIiIi ) / 3600
if ( oO0OoOo0oo > 0 ) : i1iIi1I111i = - i1iIi1I111i
IIioO0Oo0OOoooO = i1iIi1I111i
if 62 - 62: IiII . O0 + oO0o - ooOoO0o * iIii1I11I1II1
oO0OoOo0oo , O0OO0o0 , i1iIiIi = self . longitude , self . long_mins , self . long_secs
i1iIi1I111i = float ( abs ( oO0OoOo0oo ) )
i1iIi1I111i += float ( O0OO0o0 * 60 + i1iIiIi ) / 3600
if ( oO0OoOo0oo > 0 ) : i1iIi1I111i = - i1iIi1I111i
iIii1I1I = i1iIi1I111i
return ( ( IIioO0Oo0OOoooO , iIii1I1I ) )
if 48 - 48: OoOoOO00 * I11i
if 92 - 92: I1IiiI * I1IiiI
def get_distance ( self , geo_point ) :
I11III1iIIIi1 = self . dms_to_decimal ( )
iiIiIi = geo_point . dms_to_decimal ( )
i1i1iIII1II = vincenty ( I11III1iIIIi1 , iiIiIi )
return ( i1i1iIII1II . km )
if 85 - 85: OoOoOO00 % iII111i - O0 / Ii1I
if 15 - 15: I1Ii111 / I1ii11iIi11i / I1IiiI % i11iIiiIii + II111iiii . ooOoO0o
def point_in_circle ( self , geo_point ) :
oo00OOo0 = self . get_distance ( geo_point )
return ( oo00OOo0 <= self . radius )
if 37 - 37: I1Ii111 / i11iIiiIii . I1ii11iIi11i - OoO0O00 * ooOoO0o
if 91 - 91: ooOoO0o % II111iiii
def encode_geo ( self ) :
O0OOOOO0O = socket . htons ( LISP_AFI_LCAF )
oo0oOoO0OoOOOo0O = socket . htons ( 20 + 2 )
Ooooo0OO = 0
if 48 - 48: oO0o
IIi1IIi1 = abs ( self . latitude )
II1iIiI1i = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : Ooooo0OO |= 0x40
if 25 - 25: I1IiiI + iIii1I11I1II1 * Oo0Ooo - iIii1I11I1II1 % IiII * oO0o
oO0000OOO0O = abs ( self . longitude )
Oo00o0O00o = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : Ooooo0OO |= 0x20
if 22 - 22: OoooooooOO - OoO0O00 + OoOoOO00 - OOooOOo + i11iIiiIii - oO0o
i1ii1ii1I = 0
if ( self . no_geo_altitude ( ) == False ) :
i1ii1ii1I = socket . htonl ( self . altitude )
Ooooo0OO |= 0x10
if 9 - 9: OoooooooOO + IiII % I11i
o00Oo0O = socket . htons ( self . radius )
if ( o00Oo0O != 0 ) : Ooooo0OO |= 0x06
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
OOoOooO0 = struct . pack ( "HBBBBH" , O0OOOOO0O , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , oo0oOoO0OoOOOo0O )
OOoOooO0 += struct . pack ( "BBHBBHBBHIHHH" , Ooooo0OO , 0 , 0 , IIi1IIi1 , II1iIiI1i >> 16 ,
socket . htons ( II1iIiI1i & 0x0ffff ) , oO0000OOO0O , Oo00o0O00o >> 16 ,
socket . htons ( Oo00o0O00o & 0xffff ) , i1ii1ii1I , o00Oo0O , 0 , 0 )
if 10 - 10: OoOoOO00 . i1IIi
return ( OOoOooO0 )
if 44 - 44: OOooOOo - OOooOOo * IiII - iIii1I11I1II1
if 72 - 72: iIii1I11I1II1 . OoooooooOO
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
oOoOo000 = "BBHBBHBBHIHHH"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( lcaf_len < O0OOoooO ) : return ( None )
if 44 - 44: I11i * I11i + OoooooooOO
Ooooo0OO , i111IIIiI1Iii , i1i1iIiii , IIi1IIi1 , ii1i1I11 , II1iIiI1i , oO0000OOO0O , O0ooOoo , Oo00o0O00o , i1ii1ii1I , o00Oo0O , II1iIii , iioOO = struct . unpack ( oOoOo000 ,
# OoO0O00 % iIii1I11I1II1
packet [ : O0OOoooO ] )
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
if 59 - 59: iIii1I11I1II1
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
iioOO = socket . ntohs ( iioOO )
if ( iioOO == LISP_AFI_LCAF ) : return ( None )
if 63 - 63: I11i
if ( Ooooo0OO & 0x40 ) : IIi1IIi1 = - IIi1IIi1
self . latitude = IIi1IIi1
OOoO = ( ( ii1i1I11 << 16 ) | socket . ntohs ( II1iIiI1i ) ) / 1000
self . lat_mins = OOoO / 60
self . lat_secs = OOoO % 60
if 22 - 22: Ii1I + iII111i . OoooooooOO - i11iIiiIii . OOooOOo
if ( Ooooo0OO & 0x20 ) : oO0000OOO0O = - oO0000OOO0O
self . longitude = oO0000OOO0O
i111I111 = ( ( O0ooOoo << 16 ) | socket . ntohs ( Oo00o0O00o ) ) / 1000
self . long_mins = i111I111 / 60
self . long_secs = i111I111 % 60
if 25 - 25: I11i % i1IIi / i11iIiiIii + OoooooooOO / i11iIiiIii
self . altitude = socket . ntohl ( i1ii1ii1I ) if ( Ooooo0OO & 0x10 ) else - 1
o00Oo0O = socket . ntohs ( o00Oo0O )
self . radius = o00Oo0O if ( Ooooo0OO & 0x02 ) else o00Oo0O * 1000
if 1 - 1: i11iIiiIii + Ii1I / iIii1I11I1II1 . I1IiiI
self . geo_name = None
packet = packet [ O0OOoooO : : ]
if 90 - 90: Oo0Ooo . IiII - I1ii11iIi11i - iII111i
if ( iioOO != 0 ) :
self . rloc . afi = iioOO
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 17 - 17: iIii1I11I1II1 - Ii1I + IiII . Oo0Ooo + i11iIiiIii
return ( packet )
if 97 - 97: ooOoO0o % II111iiii / Ii1I . iIii1I11I1II1
if 100 - 100: II111iiii / I11i * iIii1I11I1II1 / OOooOOo + i11iIiiIii - iIii1I11I1II1
if 32 - 32: o0oOOo0O0Ooo - Ii1I / ooOoO0o % I1Ii111
if 69 - 69: oO0o - I1IiiI . OOooOOo * OoooooooOO
if 83 - 83: IiII % I1Ii111 % IiII - O0 % I1ii11iIi11i
if 44 - 44: i11iIiiIii + oO0o * oO0o . i11iIiiIii % i1IIi + iII111i
class lisp_rle_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 91 - 91: I1Ii111 . II111iiii / Ii1I * O0
if 33 - 33: oO0o * i1IIi + ooOoO0o * OOooOOo - O0 - iIii1I11I1II1
def copy_rle_node ( self ) :
IIi1i1111i = lisp_rle_node ( )
IIi1i1111i . address . copy_address ( self . address )
IIi1i1111i . level = self . level
IIi1i1111i . translated_port = self . translated_port
IIi1i1111i . rloc_name = self . rloc_name
return ( IIi1i1111i )
if 35 - 35: I1Ii111
if 12 - 12: Ii1I % I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 12 - 12: Oo0Ooo + I1IiiI
if 12 - 12: OoOoOO00 / II111iiii
def get_encap_keys ( self ) :
IIiII = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
I1iiIiiii1111 = self . address . print_address_no_iid ( ) + ":" + IIiII
if 28 - 28: I1IiiI
try :
o00OO0o0 = lisp_crypto_keys_by_rloc_encap [ I1iiIiiii1111 ]
if ( o00OO0o0 [ 1 ] ) : return ( o00OO0o0 [ 1 ] . encrypt_key , o00OO0o0 [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 27 - 27: I1IiiI % oO0o - iIii1I11I1II1 - o0oOOo0O0Ooo - IiII - O0
if 46 - 46: II111iiii
if 24 - 24: i11iIiiIii * i1IIi - I11i + o0oOOo0O0Ooo
if 60 - 60: ooOoO0o
class lisp_rle ( ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 62 - 62: i11iIiiIii
if 88 - 88: i11iIiiIii
def copy_rle ( self ) :
iiiI1i1111II = lisp_rle ( self . rle_name )
for IIi1i1111i in self . rle_nodes :
iiiI1i1111II . rle_nodes . append ( IIi1i1111i . copy_rle_node ( ) )
if 59 - 59: oO0o - OoooooooOO % ooOoO0o
iiiI1i1111II . build_forwarding_list ( )
return ( iiiI1i1111II )
if 90 - 90: OoOoOO00
if 96 - 96: II111iiii % Ii1I
def print_rle ( self , html ) :
Oo0OooO00O = ""
for IIi1i1111i in self . rle_nodes :
IIiII = IIi1i1111i . translated_port
oOIIIiiIiI = blue ( IIi1i1111i . rloc_name , html ) if IIi1i1111i . rloc_name != None else ""
if 45 - 45: iII111i . oO0o * iII111i
I1iiIiiii1111 = IIi1i1111i . address . print_address_no_iid ( )
if ( IIi1i1111i . address . is_local ( ) ) : I1iiIiiii1111 = red ( I1iiIiiii1111 , html )
Oo0OooO00O += "{}{}(L{}){}, " . format ( I1iiIiiii1111 , "" if IIiII == 0 else "-" + str ( IIiII ) , IIi1i1111i . level ,
# Oo0Ooo
"" if IIi1i1111i . rloc_name == None else oOIIIiiIiI )
if 22 - 22: Oo0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i
return ( Oo0OooO00O [ 0 : - 2 ] if Oo0OooO00O != "" else "" )
if 9 - 9: OoO0O00 * I1IiiI % IiII
if 97 - 97: o0oOOo0O0Ooo + Ii1I
def build_forwarding_list ( self ) :
oo0O = - 1
for IIi1i1111i in self . rle_nodes :
if ( oo0O == - 1 ) :
if ( IIi1i1111i . address . is_local ( ) ) : oo0O = IIi1i1111i . level
else :
if ( IIi1i1111i . level > oo0O ) : break
if 77 - 77: I11i - oO0o . Ii1I
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
oo0O = 0 if oo0O == - 1 else IIi1i1111i . level
if 74 - 74: ooOoO0o
self . rle_forwarding_list = [ ]
for IIi1i1111i in self . rle_nodes :
if ( IIi1i1111i . level == oo0O or ( oo0O == 0 and
IIi1i1111i . level == 128 ) ) :
if ( lisp_i_am_rtr == False and IIi1i1111i . address . is_local ( ) ) :
I1iiIiiii1111 = IIi1i1111i . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( I1iiIiiii1111 ) )
continue
if 18 - 18: iIii1I11I1II1 - I11i - oO0o
self . rle_forwarding_list . append ( IIi1i1111i )
if 12 - 12: O0 + O0 + ooOoO0o . I1IiiI * II111iiii
if 47 - 47: i11iIiiIii % OOooOOo / ooOoO0o . IiII - I1IiiI
if 10 - 10: Oo0Ooo / ooOoO0o / I1ii11iIi11i
if 98 - 98: O0 - I1Ii111 - i11iIiiIii
if 85 - 85: II111iiii - I1ii11iIi11i % I1IiiI . I1IiiI - OoooooooOO - I11i
class lisp_json ( ) :
def __init__ ( self , name , string ) :
self . json_name = name
self . json_string = string
if 38 - 38: i1IIi + oO0o * ooOoO0o % Ii1I % ooOoO0o
if 80 - 80: OoO0O00 + OoOoOO00 % iII111i % OoooooooOO - ooOoO0o
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
def delete ( self ) :
if ( lisp_json_list . has_key ( self . json_name ) ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 14 - 14: iIii1I11I1II1
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
if 1 - 1: OoO0O00 - I1IiiI * o0oOOo0O0Ooo
def print_json ( self , html ) :
oOoOooO0o00 = self . json_string
IIIIIiiIII = "***"
if ( html ) : IIIIIiiIII = red ( IIIIIiiIII , html )
o0oO = IIIIIiiIII + self . json_string + IIIIIiiIII
if ( self . valid_json ( ) ) : return ( oOoOooO0o00 )
return ( o0oO )
if 6 - 6: IiII
if 69 - 69: iII111i
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 87 - 87: i11iIiiIii % o0oOOo0O0Ooo + Ii1I
return ( True )
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
if 90 - 90: II111iiii + OOooOOo % I1Ii111 * iIii1I11I1II1 % iIii1I11I1II1
class lisp_stats ( ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 46 - 46: OoOoOO00
if 75 - 75: I1IiiI
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
i11IiIIi11I = time . time ( ) - self . last_increment
return ( i11IiIIi11I <= 1 )
if 37 - 37: iIii1I11I1II1 % OoO0O00 * ooOoO0o + I11i % ooOoO0o / i11iIiiIii
if 14 - 14: i1IIi / ooOoO0o
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
i11IiIIi11I = time . time ( ) - self . last_increment
return ( i11IiIIi11I <= 60 )
if 10 - 10: ooOoO0o / OoooooooOO - ooOoO0o % O0 + oO0o - oO0o
if 16 - 16: O0
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 14 - 14: Ii1I . Ii1I . OOooOOo - O0 / OoO0O00 % II111iiii
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 5 - 5: iIii1I11I1II1 % OoOoOO00 % OOooOOo % O0 * oO0o . iIii1I11I1II1
return ( c1 , c2 )
if 96 - 96: i11iIiiIii + oO0o / I1ii11iIi11i . IiII % o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo . i1IIi - OOooOOo
def normalize ( self , count ) :
count = str ( count )
iI111iIiiI = len ( count )
if ( iI111iIiiI > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 55 - 55: ooOoO0o + I11i - OoOoOO00 + I1IiiI % Oo0Ooo / I1ii11iIi11i
if ( iI111iIiiI > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 17 - 17: i1IIi / IiII . I1IiiI % i1IIi
if ( iI111iIiiI > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 46 - 46: IiII % O0 . o0oOOo0O0Ooo . OOooOOo
return ( count )
if 47 - 47: OoooooooOO . oO0o . II111iiii / II111iiii - OoOoOO00
if 81 - 81: o0oOOo0O0Ooo - Oo0Ooo % IiII - ooOoO0o / O0
def get_stats ( self , summary , html ) :
ii1ii1 = self . last_rate_check
iiiII1Ii1iI = self . last_packet_count
iIiIIiii111I1 = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 22 - 22: OoOoOO00 * IiII . i1IIi - Oo0Ooo + OoOoOO00 . ooOoO0o
OoOO0Oo0Oo0 = self . last_rate_check - ii1ii1
if ( OoOO0Oo0Oo0 == 0 ) :
IIIiI = 0
iIIi1I1 = 0
else :
IIIiI = int ( ( self . packet_count - iiiII1Ii1iI ) / OoOO0Oo0Oo0 )
iIIi1I1 = ( self . byte_count - iIiIIiii111I1 ) / OoOO0Oo0Oo0
iIIi1I1 = ( iIIi1I1 * 8 ) / 1000000
iIIi1I1 = round ( iIIi1I1 , 2 )
if 64 - 64: OoOoOO00
if 94 - 94: OOooOOo * OoooooooOO * o0oOOo0O0Ooo / I1Ii111 . II111iiii
if 37 - 37: O0 * II111iiii * I1IiiI - O0 - I11i / i1IIi
if 27 - 27: i11iIiiIii + iIii1I11I1II1
if 15 - 15: oO0o
Oooo000oOO0oO = self . normalize ( self . packet_count )
Ii1OO0Oo00OO0o = self . normalize ( self . byte_count )
if 17 - 17: O0 + OOooOOo * ooOoO0o - i1IIi + OOooOOo
if 30 - 30: OOooOOo / I1ii11iIi11i - iIii1I11I1II1 % i1IIi
if 34 - 34: I1IiiI . II111iiii
if 100 - 100: OoO0O00 / O0 / OoOoOO00
if 33 - 33: i1IIi / o0oOOo0O0Ooo . OoooooooOO
if ( summary ) :
Ii11i1Iiii11 = "<br>" if html else ""
Oooo000oOO0oO , Ii1OO0Oo00OO0o = self . stat_colors ( Oooo000oOO0oO , Ii1OO0Oo00OO0o , html )
OOo0oo0O0o0 = "packet-count: {}{}byte-count: {}" . format ( Oooo000oOO0oO , Ii11i1Iiii11 , Ii1OO0Oo00OO0o )
I1iIii1Ii = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( IIIiI , iIIi1I1 )
if 82 - 82: OoooooooOO . o0oOOo0O0Ooo * I1ii11iIi11i % I1ii11iIi11i * Ii1I
if ( html != "" ) : I1iIii1Ii = lisp_span ( OOo0oo0O0o0 , I1iIii1Ii )
else :
O0OOOooO = str ( IIIiI )
IiI11iii1ii1 = str ( iIIi1I1 )
if ( html ) :
Oooo000oOO0oO = lisp_print_cour ( Oooo000oOO0oO )
O0OOOooO = lisp_print_cour ( O0OOOooO )
Ii1OO0Oo00OO0o = lisp_print_cour ( Ii1OO0Oo00OO0o )
IiI11iii1ii1 = lisp_print_cour ( IiI11iii1ii1 )
if 10 - 10: i1IIi * OoOoOO00 + I1Ii111 . IiII % i11iIiiIii
Ii11i1Iiii11 = "<br>" if html else ", "
if 98 - 98: I1IiiI - oO0o / i11iIiiIii % I1ii11iIi11i * oO0o * OoO0O00
I1iIii1Ii = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( Oooo000oOO0oO , Ii11i1Iiii11 , O0OOOooO , Ii11i1Iiii11 , Ii1OO0Oo00OO0o , Ii11i1Iiii11 ,
# O0 - o0oOOo0O0Ooo * I1Ii111 - i11iIiiIii % Oo0Ooo
IiI11iii1ii1 )
if 27 - 27: Ii1I / oO0o - Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo . Ii1I
return ( I1iIii1Ii )
if 79 - 79: Ii1I % O0 * OOooOOo
if 41 - 41: I1ii11iIi11i . OoooooooOO * I1ii11iIi11i - oO0o
if 40 - 40: I1IiiI % OoO0O00 + i11iIiiIii / oO0o
if 98 - 98: oO0o + iIii1I11I1II1 . ooOoO0o / I1ii11iIi11i
if 77 - 77: OoOoOO00 / Oo0Ooo * OoOoOO00 % I1IiiI . II111iiii % OoO0O00
if 38 - 38: iII111i - OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
if 25 - 25: I1IiiI / IiII . OOooOOo . I1ii11iIi11i % i1IIi
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 12 - 12: O0 % O0
if 9 - 9: O0 . I1IiiI + I1ii11iIi11i / OOooOOo * I1ii11iIi11i
if 10 - 10: IiII % o0oOOo0O0Ooo / O0 / II111iiii
if 81 - 81: Ii1I / o0oOOo0O0Ooo % OoOoOO00 . I1ii11iIi11i
class lisp_rloc ( ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = 0
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
if 47 - 47: II111iiii + OOooOOo / II111iiii . OOooOOo
if ( recurse == False ) : return
if 68 - 68: OoooooooOO
if 63 - 63: I1IiiI
if 80 - 80: oO0o + iIii1I11I1II1
if 87 - 87: I1ii11iIi11i % Ii1I . Ii1I
if 71 - 71: OoO0O00 - IiII . i1IIi * I1IiiI % I11i
if 36 - 36: IiII * OoooooooOO . i11iIiiIii * i1IIi
O0O0Oo = lisp_get_default_route_next_hops ( )
if ( O0O0Oo == [ ] or len ( O0O0Oo ) == 1 ) : return
if 94 - 94: o0oOOo0O0Ooo
self . rloc_next_hop = O0O0Oo [ 0 ]
oo = self
for i11i1i in O0O0Oo [ 1 : : ] :
OOoooO0Oo0o = lisp_rloc ( False )
OOoooO0Oo0o = copy . deepcopy ( self )
OOoooO0Oo0o . rloc_next_hop = i11i1i
oo . next_rloc = OOoooO0Oo0o
oo = OOoooO0Oo0o
if 76 - 76: o0oOOo0O0Ooo
if 80 - 80: OOooOOo
if 15 - 15: OOooOOo . OoOoOO00 / oO0o . I1ii11iIi11i % OoO0O00 - oO0o
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 21 - 21: ooOoO0o . o0oOOo0O0Ooo . oO0o . i1IIi
if 96 - 96: Ii1I % I11i * OoooooooOO . I1IiiI . iIii1I11I1II1
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 8 - 8: O0 + o0oOOo0O0Ooo / O0 - I1ii11iIi11i % I1ii11iIi11i
if 55 - 55: OoooooooOO * OoooooooOO % I1Ii111 / Ii1I / ooOoO0o
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 12 - 12: i11iIiiIii + Ii1I % iIii1I11I1II1 + I1Ii111
if 12 - 12: Ii1I + I1Ii111 / O0 * II111iiii
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 67 - 67: iIii1I11I1II1 / I11i + ooOoO0o * I1Ii111 * oO0o
if 100 - 100: OoooooooOO % I1IiiI / OoOoOO00 % OoOoOO00 . o0oOOo0O0Ooo
if 81 - 81: Ii1I - II111iiii + I11i / Ii1I
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 89 - 89: i11iIiiIii + I1ii11iIi11i - ooOoO0o . ooOoO0o + Oo0Ooo % Ii1I
if 96 - 96: I1Ii111 - I11i * I1Ii111
def print_rloc ( self , indent ) :
OOOO0O00o = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , OOOO0O00o , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 32 - 32: I1IiiI / i1IIi / I1ii11iIi11i % i1IIi . ooOoO0o % I1ii11iIi11i
if 97 - 97: OoO0O00 . OOooOOo % Ii1I + OoooooooOO * I1Ii111
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
OO000 = self . rloc_name
if ( cour ) : OO000 = lisp_print_cour ( OO000 )
return ( 'rloc-name: {}' . format ( blue ( OO000 , cour ) ) )
if 89 - 89: I11i
if 91 - 91: OoooooooOO - IiII - Ii1I
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
IIiII = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 36 - 36: OOooOOo
if 76 - 76: OoO0O00 . i1IIi
if 98 - 98: O0
if 86 - 86: O0 * oO0o + Oo0Ooo / II111iiii + i1IIi
OoOOo = self . rloc
if ( OoOOo . is_null ( ) == False ) :
Iii111I = lisp_get_nat_info ( OoOOo , self . rloc_name )
if ( Iii111I ) :
IIiII = Iii111I . port
I1II11Ii111Ii = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
I1iiIiiii1111 = OoOOo . print_address_no_iid ( )
ooOo = red ( I1iiIiiii1111 , False )
Oooo0O0000o0O = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 20 - 20: I1Ii111 / OoooooooOO * i1IIi + i1IIi % I11i
if 11 - 11: Ii1I - Ii1I . o0oOOo0O0Ooo - I1Ii111 * Ii1I - o0oOOo0O0Ooo
if 71 - 71: OoOoOO00 * OoooooooOO . IiII - OoOoOO00
if 4 - 4: i1IIi * OOooOOo % Oo0Ooo * IiII
if 10 - 10: OoooooooOO
if 28 - 28: OoO0O00 + i11iIiiIii / i1IIi
if ( Iii111I . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( ooOo , IIiII , Oooo0O0000o0O ) )
if 7 - 7: I1ii11iIi11i . Oo0Ooo / i11iIiiIii
if 65 - 65: I11i * iII111i * II111iiii / o0oOOo0O0Ooo . O0
Iii111I = None if ( Iii111I == I1II11Ii111Ii ) else I1II11Ii111Ii
if ( Iii111I and Iii111I . timed_out ( ) ) :
IIiII = Iii111I . port
ooOo = red ( Iii111I . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( ooOo , IIiII ,
# II111iiii
Oooo0O0000o0O ) )
Iii111I = None
if 17 - 17: I1IiiI / i11iIiiIii + o0oOOo0O0Ooo . OoOoOO00 . I1IiiI
if 31 - 31: OoooooooOO . I1Ii111 % OoooooooOO * iII111i % OOooOOo . iII111i
if 17 - 17: I1Ii111 % i1IIi % I11i * O0 / Oo0Ooo
if 96 - 96: OoOoOO00 . Ii1I
if 80 - 80: OoOoOO00 + o0oOOo0O0Ooo - II111iiii
if 3 - 3: ooOoO0o * I1Ii111
if 34 - 34: Ii1I / Oo0Ooo . II111iiii - ooOoO0o - I1ii11iIi11i % OoOoOO00
if ( Iii111I ) :
if ( Iii111I . address != I1iiIiiii1111 ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( ooOo , red ( Iii111I . address , False ) ) )
if 43 - 43: Ii1I * oO0o
self . rloc . store_address ( Iii111I . address )
if 57 - 57: OoooooooOO + I1IiiI % I1ii11iIi11i % ooOoO0o * I1Ii111
ooOo = red ( Iii111I . address , False )
IIiII = Iii111I . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( ooOo , IIiII , Oooo0O0000o0O ) )
if 9 - 9: i11iIiiIii
self . store_translated_rloc ( OoOOo , IIiII )
if 85 - 85: IiII / o0oOOo0O0Ooo * ooOoO0o
if 74 - 74: O0 - o0oOOo0O0Ooo
if 68 - 68: I1Ii111
if 19 - 19: o0oOOo0O0Ooo
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 63 - 63: OoooooooOO % ooOoO0o
if 26 - 26: OOooOOo + Oo0Ooo
if 97 - 97: I1Ii111 * I1Ii111 + iII111i % Ii1I / iII111i
if 73 - 73: OoOoOO00 % I1Ii111 . I1ii11iIi11i
self . rle = rloc_record . rle
if ( self . rle ) :
for IIi1i1111i in self . rle . rle_nodes :
OO000 = IIi1i1111i . rloc_name
Iii111I = lisp_get_nat_info ( IIi1i1111i . address , OO000 )
if ( Iii111I == None ) : continue
if 45 - 45: iIii1I11I1II1 % Ii1I . OoOoOO00 . o0oOOo0O0Ooo - OoooooooOO
IIiII = Iii111I . port
iI11Ii = OO000
if ( iI11Ii ) : iI11Ii = blue ( OO000 , False )
if 46 - 46: I1ii11iIi11i
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( IIiII ,
# IiII % iII111i
IIi1i1111i . address . print_address_no_iid ( ) , iI11Ii ) )
IIi1i1111i . translated_port = IIiII
if 21 - 21: OoOoOO00
if 86 - 86: O0 . O0 - I1Ii111
if 95 - 95: Ii1I / Ii1I * OoO0O00 . OoooooooOO . OoooooooOO * I11i
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
if 98 - 98: II111iiii - i1IIi - ooOoO0o
i1I1Ii = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 66 - 66: OoOoOO00 % ooOoO0o - II111iiii . oO0o / i11iIiiIii
if ( rloc_record . keys != None and i1I1Ii ) :
iii11 = rloc_record . keys [ 1 ]
if ( iii11 != None ) :
I1iiIiiii1111 = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( IIiII )
if 73 - 73: OoO0O00 - i1IIi
iii11 . add_key_by_rloc ( I1iiIiiii1111 , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( I1iiIiiii1111 , False ) ) )
if 52 - 52: I1ii11iIi11i
if 4 - 4: Ii1I - iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
return ( IIiII )
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 32 - 32: OOooOOo
if 46 - 46: II111iiii . OoO0O00
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 97 - 97: oO0o
if 45 - 45: i11iIiiIii / IiII + OoO0O00
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 55 - 55: Ii1I / II111iiii - oO0o
return ( True )
if 58 - 58: i1IIi . OoooooooOO % iIii1I11I1II1 * o0oOOo0O0Ooo + O0 / oO0o
if 77 - 77: I11i . I1ii11iIi11i
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 92 - 92: i11iIiiIii + I11i % I1IiiI / ooOoO0o
if 28 - 28: i1IIi . I1IiiI
if 41 - 41: I1ii11iIi11i . I1Ii111 * OoOoOO00 . I1Ii111 / o0oOOo0O0Ooo
def print_state_change ( self , new_state ) :
iIiI = self . print_state ( )
OO0o0o0oo = "{} -> {}" . format ( iIiI , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
OO0o0o0oo = bold ( OO0o0o0oo , False )
if 40 - 40: i11iIiiIii
return ( OO0o0o0oo )
if 95 - 95: OOooOOo / Oo0Ooo . OoO0O00 / IiII + i11iIiiIii * OOooOOo
if 27 - 27: O0 * OoO0O00 * I1ii11iIi11i
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 40 - 40: O0 + oO0o - ooOoO0o + I1IiiI - IiII
if 60 - 60: I1Ii111 * OoO0O00 * oO0o + oO0o
def print_recent_rloc_probe_rtts ( self ) :
i1III1ii = str ( self . recent_rloc_probe_rtts )
i1III1ii = i1III1ii . replace ( "-1" , "?" )
return ( i1III1ii )
if 48 - 48: I1IiiI - II111iiii / OoOoOO00
if 69 - 69: i11iIiiIii
def compute_rloc_probe_rtt ( self ) :
oo = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
OOOoOOO000 = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ oo ] + OOOoOOO000 [ 0 : - 1 ]
if 86 - 86: ooOoO0o / iII111i . OoooooooOO + I1Ii111 + I1Ii111
if 35 - 35: Oo0Ooo + oO0o * o0oOOo0O0Ooo - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 56 - 56: iIii1I11I1II1 / I11i
if 78 - 78: i11iIiiIii * OoO0O00 * Ii1I / i1IIi * OOooOOo + o0oOOo0O0Ooo
def print_recent_rloc_probe_hops ( self ) :
oooOOOooOo0 = str ( self . recent_rloc_probe_hops )
return ( oooOOOooOo0 )
if 97 - 97: I1Ii111 . Oo0Ooo
if 44 - 44: OoO0O00 + OOooOOo
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < LISP_RLOC_PROBE_TTL / 2 ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 9 - 9: iII111i . i11iIiiIii * IiII . I11i
if ( from_ttl < LISP_RLOC_PROBE_TTL / 2 ) :
Ii111iI = "!"
else :
Ii111iI = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 71 - 71: ooOoO0o + OOooOOo * I1IiiI % I11i . I1Ii111 % OoooooooOO
if 7 - 7: iIii1I11I1II1
oo = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + Ii111iI
OOOoOOO000 = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ oo ] + OOOoOOO000 [ 0 : - 1 ]
if 88 - 88: ooOoO0o
if 37 - 37: ooOoO0o * OoOoOO00 . ooOoO0o
def process_rloc_probe_reply ( self , nonce , eid , group , hop_count , ttl ) :
OoOOo = self
while ( True ) :
if ( OoOOo . last_rloc_probe_nonce == nonce ) : break
OoOOo = OoOOo . next_rloc
if ( OoOOo == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 47 - 47: iIii1I11I1II1 + iIii1I11I1II1 / Ii1I
return
if 19 - 19: OOooOOo . OoOoOO00 % iIii1I11I1II1 % OoOoOO00
if 92 - 92: o0oOOo0O0Ooo + II111iiii
if 56 - 56: OoOoOO00 - OoOoOO00 / Ii1I
OoOOo . last_rloc_probe_reply = lisp_get_timestamp ( )
OoOOo . compute_rloc_probe_rtt ( )
oooIIi1i = OoOOo . print_state_change ( "up" )
if ( OoOOo . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( OoOOo . rloc , True )
OoOOo . state = LISP_RLOC_UP_STATE
OoOOo . last_state_change = lisp_get_timestamp ( )
IIII = lisp_map_cache . lookup_cache ( eid , True )
if ( IIII ) : lisp_write_ipc_map_cache ( True , IIII )
if 64 - 64: i1IIi * II111iiii + I1ii11iIi11i + OOooOOo % I1ii11iIi11i - OoooooooOO
if 96 - 96: IiII + oO0o / Oo0Ooo + OoooooooOO
OoOOo . store_rloc_probe_hops ( hop_count , ttl )
if 53 - 53: Ii1I * IiII + Oo0Ooo + i11iIiiIii - iIii1I11I1II1
Ooo0O = bold ( "RLOC-probe reply" , False )
I1iiIiiii1111 = OoOOo . rloc . print_address_no_iid ( )
Oo0OOoo0 = bold ( str ( OoOOo . print_rloc_probe_rtt ( ) ) , False )
OoOoO = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 58 - 58: Ii1I . Oo0Ooo
i11i1i = ""
if ( OoOOo . rloc_next_hop != None ) :
oOo0OOOOOO , I1o0Ooo = OoOOo . rloc_next_hop
i11i1i = ", nh {}({})" . format ( I1o0Ooo , oOo0OOOOOO )
if 78 - 78: iIii1I11I1II1
if 64 - 64: OoOoOO00 - oO0o
ooo0OO = green ( lisp_print_eid_tuple ( eid , group ) , False )
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}" ) . format ( Ooo0O , red ( I1iiIiiii1111 , False ) , OoOoO , ooo0OO ,
# iII111i
oooIIi1i , Oo0OOoo0 , i11i1i , str ( hop_count ) + "/" + str ( ttl ) ) )
if 64 - 64: II111iiii
if ( OoOOo . rloc_next_hop == None ) : return
if 14 - 14: I1Ii111
if 81 - 81: II111iiii
if 55 - 55: O0 + o0oOOo0O0Ooo * I1IiiI - OoooooooOO
if 68 - 68: I11i + Oo0Ooo
OoOOo = None
i1iIi1Ii1I11I = None
while ( True ) :
OoOOo = self if OoOOo == None else OoOOo . next_rloc
if ( OoOOo == None ) : break
if ( OoOOo . up_state ( ) == False ) : continue
if ( OoOOo . rloc_probe_rtt == - 1 ) : continue
if 58 - 58: ooOoO0o
if ( i1iIi1Ii1I11I == None ) : i1iIi1Ii1I11I = OoOOo
if ( OoOOo . rloc_probe_rtt < i1iIi1Ii1I11I . rloc_probe_rtt ) : i1iIi1Ii1I11I = OoOOo
if 84 - 84: OoOoOO00 - I11i
if 34 - 34: Ii1I % I1Ii111 % I1ii11iIi11i - IiII
if ( i1iIi1Ii1I11I != None ) :
oOo0OOOOOO , I1o0Ooo = i1iIi1Ii1I11I . rloc_next_hop
i11i1i = bold ( "nh {}({})" . format ( I1o0Ooo , oOo0OOOOOO ) , False )
lprint ( " Install host-route via best {}" . format ( i11i1i ) )
lisp_install_host_route ( I1iiIiiii1111 , None , False )
lisp_install_host_route ( I1iiIiiii1111 , I1o0Ooo , True )
if 89 - 89: IiII
if 64 - 64: OoOoOO00
if 3 - 3: i11iIiiIii / I1Ii111
def add_to_rloc_probe_list ( self , eid , group ) :
I1iiIiiii1111 = self . rloc . print_address_no_iid ( )
IIiII = self . translated_port
if ( IIiII != 0 ) : I1iiIiiii1111 += ":" + str ( IIiII )
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
if ( lisp_rloc_probe_list . has_key ( I1iiIiiii1111 ) == False ) :
lisp_rloc_probe_list [ I1iiIiiii1111 ] = [ ]
if 73 - 73: OOooOOo / Oo0Ooo
if 80 - 80: OoO0O00 + I1IiiI % i1IIi / I11i % i1IIi * i11iIiiIii
if ( group . is_null ( ) ) : group . instance_id = 0
for iIIIIIi11Ii , ooo0OO , O0ooO0oOO in lisp_rloc_probe_list [ I1iiIiiii1111 ] :
if ( ooo0OO . is_exact_match ( eid ) and O0ooO0oOO . is_exact_match ( group ) ) :
if ( iIIIIIi11Ii == self ) :
if ( lisp_rloc_probe_list [ I1iiIiiii1111 ] == [ ] ) :
lisp_rloc_probe_list . pop ( I1iiIiiii1111 )
if 27 - 27: OoOoOO00 / I1Ii111 * O0 / I1IiiI - IiII / o0oOOo0O0Ooo
return
if 70 - 70: I1ii11iIi11i
lisp_rloc_probe_list [ I1iiIiiii1111 ] . remove ( [ iIIIIIi11Ii , ooo0OO , O0ooO0oOO ] )
break
if 11 - 11: I1Ii111
if 70 - 70: Ii1I
lisp_rloc_probe_list [ I1iiIiiii1111 ] . append ( [ self , eid , group ] )
if 22 - 22: Ii1I
if 59 - 59: I1ii11iIi11i
if 90 - 90: OOooOOo / iII111i
if 70 - 70: o0oOOo0O0Ooo
if 49 - 49: OOooOOo - I1IiiI + OoooooooOO % iII111i + o0oOOo0O0Ooo + OoOoOO00
OoOOo = lisp_rloc_probe_list [ I1iiIiiii1111 ] [ 0 ] [ 0 ]
if ( OoOOo . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 37 - 37: II111iiii % I1ii11iIi11i * OoOoOO00
if 35 - 35: i1IIi
if 81 - 81: OoO0O00
def delete_from_rloc_probe_list ( self , eid , group ) :
I1iiIiiii1111 = self . rloc . print_address_no_iid ( )
IIiII = self . translated_port
if ( IIiII != 0 ) : I1iiIiiii1111 += ":" + str ( IIiII )
if ( lisp_rloc_probe_list . has_key ( I1iiIiiii1111 ) == False ) : return
if 45 - 45: OoooooooOO . O0 * oO0o + IiII
IiIi11IIIIiii = [ ]
for iIIiI11iI1Ii1 in lisp_rloc_probe_list [ I1iiIiiii1111 ] :
if ( iIIiI11iI1Ii1 [ 0 ] != self ) : continue
if ( iIIiI11iI1Ii1 [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( iIIiI11iI1Ii1 [ 2 ] . is_exact_match ( group ) == False ) : continue
IiIi11IIIIiii = iIIiI11iI1Ii1
break
if 25 - 25: Oo0Ooo * ooOoO0o % I1Ii111
if ( IiIi11IIIIiii == [ ] ) : return
if 34 - 34: OoOoOO00 / I1Ii111 - ooOoO0o
try :
lisp_rloc_probe_list [ I1iiIiiii1111 ] . remove ( IiIi11IIIIiii )
if ( lisp_rloc_probe_list [ I1iiIiiii1111 ] == [ ] ) :
lisp_rloc_probe_list . pop ( I1iiIiiii1111 )
if 66 - 66: I11i * OoO0O00
except :
return
if 98 - 98: IiII . Oo0Ooo + I1Ii111
if 63 - 63: oO0o * I1IiiI * oO0o
if 56 - 56: oO0o - Ii1I % I1Ii111
def print_rloc_probe_state ( self , trailing_linefeed ) :
Oo0O = ""
OoOOo = self
while ( True ) :
O000o00O0OOoo = OoOOo . last_rloc_probe
if ( O000o00O0OOoo == None ) : O000o00O0OOoo = 0
i1IiIiII = OoOOo . last_rloc_probe_reply
if ( i1IiIiII == None ) : i1IiIiII = 0
Oo0OOoo0 = OoOOo . print_rloc_probe_rtt ( )
IiIIi1I1I11Ii = space ( 4 )
if 57 - 57: IiII % O0 * I1ii11iIi11i
if ( OoOOo . rloc_next_hop == None ) :
Oo0O += "RLOC-Probing:\n"
else :
oOo0OOOOOO , I1o0Ooo = OoOOo . rloc_next_hop
Oo0O += "RLOC-Probing for nh {}({}):\n" . format ( I1o0Ooo , oOo0OOOOOO )
if 61 - 61: O0
if 51 - 51: I1Ii111 - I11i % o0oOOo0O0Ooo * Oo0Ooo - oO0o + II111iiii
Oo0O += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( IiIIi1I1I11Ii , lisp_print_elapsed ( O000o00O0OOoo ) ,
# i11iIiiIii
IiIIi1I1I11Ii , lisp_print_elapsed ( i1IiIiII ) , Oo0OOoo0 )
if 64 - 64: OoO0O00 % OoOoOO00 % I1IiiI - Ii1I / IiII * Ii1I
if ( trailing_linefeed ) : Oo0O += "\n"
if 74 - 74: IiII - O0 % OOooOOo % OoooooooOO - I11i
OoOOo = OoOOo . next_rloc
if ( OoOOo == None ) : break
Oo0O += "\n"
if 4 - 4: i1IIi + OoOoOO00 + iIii1I11I1II1 - i1IIi * i11iIiiIii
return ( Oo0O )
if 99 - 99: I1ii11iIi11i - O0 % II111iiii + ooOoO0o % OoO0O00 * Ii1I
if 8 - 8: OOooOOo
def get_encap_keys ( self ) :
IIiII = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 85 - 85: O0 % OOooOOo . Ii1I
I1iiIiiii1111 = self . rloc . print_address_no_iid ( ) + ":" + IIiII
if 74 - 74: I1ii11iIi11i - I1Ii111 + i11iIiiIii / I1Ii111 / OoooooooOO + o0oOOo0O0Ooo
try :
o00OO0o0 = lisp_crypto_keys_by_rloc_encap [ I1iiIiiii1111 ]
if ( o00OO0o0 [ 1 ] ) : return ( o00OO0o0 [ 1 ] . encrypt_key , o00OO0o0 [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 23 - 23: Oo0Ooo
if 91 - 91: I1Ii111
if 59 - 59: i1IIi % OOooOOo
def rloc_recent_rekey ( self ) :
IIiII = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 81 - 81: i11iIiiIii / OoO0O00 * OoOoOO00 % iII111i - iIii1I11I1II1 + I1ii11iIi11i
I1iiIiiii1111 = self . rloc . print_address_no_iid ( ) + ":" + IIiII
if 20 - 20: O0 . I1Ii111 * Ii1I * II111iiii
try :
iii11 = lisp_crypto_keys_by_rloc_encap [ I1iiIiiii1111 ] [ 1 ]
if ( iii11 == None ) : return ( False )
if ( iii11 . last_rekey == None ) : return ( True )
return ( time . time ( ) - iii11 . last_rekey < 1 )
except :
return ( False )
if 66 - 66: Ii1I % OoO0O00 % II111iiii - OOooOOo * o0oOOo0O0Ooo
if 33 - 33: OoooooooOO / I11i
if 98 - 98: I1ii11iIi11i . Ii1I . iIii1I11I1II1 * I1ii11iIi11i / Ii1I
if 74 - 74: Oo0Ooo * I1Ii111
class lisp_mapping ( ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
if 72 - 72: OoOoOO00 + O0 - IiII * ooOoO0o
if 20 - 20: II111iiii % OoOoOO00 * i11iIiiIii
def print_mapping ( self , eid_indent , rloc_indent ) :
OOOO0O00o = lisp_print_elapsed ( self . uptime )
O0oo0oo0 = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 68 - 68: IiII / ooOoO0o
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , O0oo0oo0 , OOOO0O00o ,
len ( self . rloc_set ) ) )
for OoOOo in self . rloc_set : OoOOo . print_rloc ( rloc_indent )
if 100 - 100: ooOoO0o / I1IiiI
if 69 - 69: ooOoO0o + OoO0O00 * o0oOOo0O0Ooo - ooOoO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 66 - 66: OoooooooOO / iII111i / I1IiiI % ooOoO0o / OoO0O00 + OOooOOo
if 64 - 64: i1IIi
def print_ttl ( self ) :
iiI = self . map_cache_ttl
if ( iiI == None ) : return ( "forever" )
if 26 - 26: OoOoOO00 / o0oOOo0O0Ooo . OOooOOo + I1IiiI + Ii1I . iII111i
if ( iiI >= 3600 ) :
if ( ( iiI % 3600 ) == 0 ) :
iiI = str ( iiI / 3600 ) + " hours"
else :
iiI = str ( iiI * 60 ) + " mins"
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
elif ( iiI >= 60 ) :
if ( ( iiI % 60 ) == 0 ) :
iiI = str ( iiI / 60 ) + " mins"
else :
iiI = str ( iiI ) + " secs"
if 5 - 5: OoOoOO00 % i1IIi
else :
iiI = str ( iiI ) + " secs"
if 31 - 31: Oo0Ooo * O0 . OOooOOo . o0oOOo0O0Ooo + OoO0O00 + II111iiii
return ( iiI )
if 76 - 76: Oo0Ooo + I1IiiI - O0
if 58 - 58: IiII * i1IIi . I1IiiI - iII111i
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
i11IiIIi11I = time . time ( ) - self . last_refresh_time
return ( i11IiIIi11I >= self . map_cache_ttl )
if 73 - 73: Oo0Ooo . OoOoOO00
if 50 - 50: IiII / o0oOOo0O0Ooo
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
i11IiIIi11I = time . time ( ) - self . stats . last_increment
return ( i11IiIIi11I <= 60 )
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
if 93 - 93: i11iIiiIii / Oo0Ooo * OoOoOO00 / ooOoO0o + OoO0O00 * OOooOOo
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 81 - 81: IiII * iII111i + i1IIi + I1Ii111 / OoO0O00
if 83 - 83: oO0o / OoO0O00
def delete_rlocs_from_rloc_probe_list ( self ) :
for OoOOo in self . best_rloc_set :
OoOOo . delete_from_rloc_probe_list ( self . eid , self . group )
if 34 - 34: OoooooooOO - i1IIi * O0
if 83 - 83: I1IiiI + OoO0O00
if 41 - 41: Ii1I + II111iiii . OOooOOo * I1Ii111 / II111iiii
def build_best_rloc_set ( self ) :
iI11Ii11 = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 11 - 11: I1Ii111 - ooOoO0o
if 76 - 76: oO0o - i1IIi - O0 % Oo0Ooo
if 66 - 66: IiII % iII111i / o0oOOo0O0Ooo
if 44 - 44: iIii1I11I1II1 + o0oOOo0O0Ooo + OoO0O00 * II111iiii
OOO00O000O0OO = 256
for OoOOo in self . rloc_set :
if ( OoOOo . up_state ( ) ) : OOO00O000O0OO = min ( OoOOo . priority , OOO00O000O0OO )
if 96 - 96: Ii1I
if 8 - 8: iII111i
if 77 - 77: OOooOOo % I1IiiI - iII111i % I1Ii111
if 29 - 29: iIii1I11I1II1 / i11iIiiIii + Oo0Ooo
if 99 - 99: I1IiiI - iII111i * Ii1I - OoOoOO00 / i11iIiiIii - i1IIi
if 46 - 46: I1ii11iIi11i * ooOoO0o
if 4 - 4: I1Ii111 * II111iiii
if 4 - 4: ooOoO0o * Oo0Ooo - I1ii11iIi11i % ooOoO0o % OoOoOO00
if 18 - 18: OOooOOo / O0 . OoO0O00 - II111iiii * OOooOOo
if 13 - 13: OoO0O00 % i1IIi . i11iIiiIii / iII111i
for OoOOo in self . rloc_set :
if ( OoOOo . priority <= OOO00O000O0OO ) :
if ( OoOOo . unreach_state ( ) and OoOOo . last_rloc_probe == None ) :
OoOOo . last_rloc_probe = lisp_get_timestamp ( )
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
self . best_rloc_set . append ( OoOOo )
if 8 - 8: ooOoO0o + OOooOOo * ooOoO0o / i1IIi . I1ii11iIi11i
if 4 - 4: Ii1I - Oo0Ooo . i1IIi + iIii1I11I1II1
if 28 - 28: O0 / ooOoO0o / IiII - I11i + IiII + OoO0O00
if 84 - 84: Oo0Ooo + OoOoOO00 / iII111i . I1ii11iIi11i
if 26 - 26: Oo0Ooo
if 61 - 61: Ii1I * oO0o * i11iIiiIii + OoO0O00
if 43 - 43: OoO0O00 * OoO0O00 * oO0o
if 24 - 24: oO0o
for OoOOo in iI11Ii11 :
if ( OoOOo . priority < OOO00O000O0OO ) : continue
OoOOo . delete_from_rloc_probe_list ( self . eid , self . group )
if 77 - 77: i11iIiiIii - I1Ii111 - I1ii11iIi11i * Oo0Ooo / i11iIiiIii
for OoOOo in self . best_rloc_set :
if ( OoOOo . rloc . is_null ( ) ) : continue
OoOOo . add_to_rloc_probe_list ( self . eid , self . group )
if 79 - 79: Oo0Ooo % Oo0Ooo . oO0o + ooOoO0o * iII111i * I11i
if 87 - 87: o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo + I1IiiI
if 89 - 89: II111iiii
def select_rloc ( self , lisp_packet , ipc_socket ) :
i1II1IiiIi = lisp_packet . packet
iiIiiI1I1I = lisp_packet . inner_version
o00OOo00 = len ( self . best_rloc_set )
if ( o00OOo00 is 0 ) :
self . stats . increment ( len ( i1II1IiiIi ) )
return ( [ None , None , None , self . action , None , None ] )
if 76 - 76: Ii1I - iII111i
if 89 - 89: II111iiii . Ii1I
i1I11iII1 = 4 if lisp_load_split_pings else 0
IiiiI1I1iI11 = lisp_packet . hash_ports ( )
if ( iiIiiI1I1I == 4 ) :
for Ii11 in range ( 8 + i1I11iII1 ) :
IiiiI1I1iI11 = IiiiI1I1iI11 ^ struct . unpack ( "B" , i1II1IiiIi [ Ii11 + 12 ] ) [ 0 ]
if 22 - 22: O0 * I1IiiI / I11i + I11i % I11i
elif ( iiIiiI1I1I == 6 ) :
for Ii11 in range ( 0 , 32 + i1I11iII1 , 4 ) :
IiiiI1I1iI11 = IiiiI1I1iI11 ^ struct . unpack ( "I" , i1II1IiiIi [ Ii11 + 8 : Ii11 + 12 ] ) [ 0 ]
if 63 - 63: i11iIiiIii + iIii1I11I1II1 / oO0o % IiII - O0
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 16 ) + ( IiiiI1I1iI11 & 0xffff )
IiiiI1I1iI11 = ( IiiiI1I1iI11 >> 8 ) + ( IiiiI1I1iI11 & 0xff )
else :
for Ii11 in range ( 0 , 12 + i1I11iII1 , 4 ) :
IiiiI1I1iI11 = IiiiI1I1iI11 ^ struct . unpack ( "I" , i1II1IiiIi [ Ii11 : Ii11 + 4 ] ) [ 0 ]
if 21 - 21: II111iiii
if 89 - 89: OOooOOo % i11iIiiIii * OoOoOO00 % oO0o / O0 * i1IIi
if 16 - 16: IiII
if ( lisp_data_plane_logging ) :
ii11I = [ ]
for iIIIIIi11Ii in self . best_rloc_set :
if ( iIIIIIi11Ii . rloc . is_null ( ) ) : continue
ii11I . append ( [ iIIIIIi11Ii . rloc . print_address_no_iid ( ) , iIIIIIi11Ii . print_state ( ) ] )
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( IiiiI1I1iI11 ) , IiiiI1I1iI11 % o00OOo00 , red ( str ( ii11I ) , False ) ) )
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
if 99 - 99: i11iIiiIii - I1Ii111
if 4 - 4: o0oOOo0O0Ooo - i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
OoOOo = self . best_rloc_set [ IiiiI1I1iI11 % o00OOo00 ]
if 54 - 54: II111iiii * I1IiiI
if 49 - 49: I1ii11iIi11i
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
oOOo00ooO = lisp_get_echo_nonce ( OoOOo . rloc , None )
if ( oOOo00ooO ) :
oOOo00ooO . change_state ( OoOOo )
if ( OoOOo . no_echoed_nonce_state ( ) ) :
oOOo00ooO . request_nonce_sent = None
if 15 - 15: oO0o
if 40 - 40: I1Ii111
if 77 - 77: II111iiii - o0oOOo0O0Ooo . Ii1I
if 47 - 47: o0oOOo0O0Ooo % OOooOOo + I1Ii111
if 64 - 64: ooOoO0o / IiII . I1IiiI
if 77 - 77: o0oOOo0O0Ooo % I1Ii111 . OOooOOo
if ( OoOOo . up_state ( ) == False ) :
oO0OOooOoO = IiiiI1I1iI11 % o00OOo00
iI11I = ( oO0OOooOoO + 1 ) % o00OOo00
while ( iI11I != oO0OOooOoO ) :
OoOOo = self . best_rloc_set [ iI11I ]
if ( OoOOo . up_state ( ) ) : break
iI11I = ( iI11I + 1 ) % o00OOo00
if 17 - 17: I1IiiI * Ii1I . i11iIiiIii - oO0o . i11iIiiIii + Oo0Ooo
if ( iI11I == oO0OOooOoO ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 42 - 42: iII111i
if 51 - 51: I1IiiI - OoOoOO00 * I1Ii111 * iIii1I11I1II1
if 5 - 5: i11iIiiIii / o0oOOo0O0Ooo
if 45 - 45: I1Ii111 + OoooooooOO + o0oOOo0O0Ooo * II111iiii
if 12 - 12: I1ii11iIi11i / O0
if 18 - 18: OoOoOO00 . i11iIiiIii + i1IIi / OoooooooOO - IiII % OoO0O00
OoOOo . stats . increment ( len ( i1II1IiiIi ) )
if 47 - 47: iII111i % IiII + I1Ii111 * o0oOOo0O0Ooo * OoooooooOO
if 100 - 100: Oo0Ooo / I1IiiI / iII111i / I1Ii111 / oO0o % o0oOOo0O0Ooo
if 16 - 16: I1IiiI + I11i
if 66 - 66: OoooooooOO % II111iiii / I1Ii111 . i11iIiiIii
if ( OoOOo . rle_name and OoOOo . rle == None ) :
if ( lisp_rle_list . has_key ( OoOOo . rle_name ) ) :
OoOOo . rle = lisp_rle_list [ OoOOo . rle_name ]
if 67 - 67: Ii1I + Oo0Ooo - I1IiiI - IiII + oO0o + Oo0Ooo
if 84 - 84: I1ii11iIi11i % oO0o - OOooOOo * Ii1I
if ( OoOOo . rle ) : return ( [ None , None , None , None , OoOOo . rle , None ] )
if 78 - 78: i1IIi / ooOoO0o / oO0o
if 21 - 21: IiII % Ii1I + OOooOOo + IiII
if 90 - 90: o0oOOo0O0Ooo
if 38 - 38: OoOoOO00 / OOooOOo % OoooooooOO * I1ii11iIi11i
if ( OoOOo . elp and OoOOo . elp . use_elp_node ) :
return ( [ OoOOo . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 7 - 7: I11i * O0 + Oo0Ooo / O0 * oO0o + i11iIiiIii
if 74 - 74: OoOoOO00
if 91 - 91: i11iIiiIii / Ii1I % OOooOOo % O0 - I11i . I11i
if 78 - 78: i1IIi + I11i % OoooooooOO + i1IIi + iII111i % Ii1I
if 87 - 87: ooOoO0o . iIii1I11I1II1
O00o00 = None if ( OoOOo . rloc . is_null ( ) ) else OoOOo . rloc
IIiII = OoOOo . translated_port
ooOOoo0 = self . action if ( O00o00 == None ) else None
if 65 - 65: iIii1I11I1II1
if 58 - 58: IiII % i1IIi . i11iIiiIii
if 5 - 5: OoOoOO00
if 75 - 75: OOooOOo
if 60 - 60: ooOoO0o - II111iiii - iIii1I11I1II1
oOo0 = None
if ( oOOo00ooO and oOOo00ooO . request_nonce_timeout ( ) == False ) :
oOo0 = oOOo00ooO . get_request_or_echo_nonce ( ipc_socket , O00o00 )
if 23 - 23: I1ii11iIi11i
if 68 - 68: OoO0O00 . oO0o / IiII - II111iiii % Oo0Ooo
if 24 - 24: II111iiii / I1ii11iIi11i + oO0o / Ii1I + IiII % oO0o
if 86 - 86: I1IiiI
if 83 - 83: I11i % Ii1I + IiII % I11i / i1IIi . oO0o
return ( [ O00o00 , IIiII , oOo0 , ooOOoo0 , None , OoOOo ] )
if 56 - 56: I1Ii111 - OOooOOo % o0oOOo0O0Ooo
if 30 - 30: I1Ii111 % i1IIi
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 98 - 98: oO0o . i11iIiiIii / Ii1I - Ii1I
if 23 - 23: iIii1I11I1II1
if 30 - 30: I1ii11iIi11i + OoO0O00 - O0
if 42 - 42: I11i - I1Ii111
if 24 - 24: i1IIi
for IiI1I1iii11 in self . rloc_set :
for OoOOo in rloc_address_set :
if ( OoOOo . is_exact_match ( IiI1I1iii11 . rloc ) == False ) : continue
OoOOo = None
break
if 93 - 93: OoOoOO00 - Oo0Ooo + iIii1I11I1II1 % iIii1I11I1II1 / I1ii11iIi11i - I1Ii111
if ( OoOOo == rloc_address_set [ - 1 ] ) : return ( False )
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo / i11iIiiIii * iII111i / OoOoOO00 . I1IiiI
return ( True )
if 23 - 23: I1IiiI . iII111i % i1IIi
if 92 - 92: o0oOOo0O0Ooo % i1IIi / OoooooooOO * OoooooooOO / iIii1I11I1II1
def get_rloc ( self , rloc ) :
for IiI1I1iii11 in self . rloc_set :
iIIIIIi11Ii = IiI1I1iii11 . rloc
if ( rloc . is_exact_match ( iIIIIIi11Ii ) ) : return ( IiI1I1iii11 )
if 7 - 7: IiII / OOooOOo + Oo0Ooo . I1IiiI
return ( None )
if 33 - 33: I1Ii111 + OoooooooOO
if 73 - 73: O0 . Oo0Ooo
def get_rloc_by_interface ( self , interface ) :
for IiI1I1iii11 in self . rloc_set :
if ( IiI1I1iii11 . interface == interface ) : return ( IiI1I1iii11 )
if 28 - 28: I1IiiI . O0 % o0oOOo0O0Ooo / I11i
return ( None )
if 48 - 48: II111iiii % I1ii11iIi11i - II111iiii
if 29 - 29: I1Ii111 - I1Ii111 - I11i * iIii1I11I1II1 % OoO0O00 % IiII
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
I111I = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( I111I == None ) :
I111I = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , I111I )
if 73 - 73: i1IIi . OoooooooOO / OoOoOO00 % Ii1I / Ii1I / Ii1I
I111I . add_source_entry ( self )
if 40 - 40: I1Ii111 - iIii1I11I1II1
if 88 - 88: OOooOOo * O0 * OoOoOO00
if 26 - 26: Ii1I
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
IIII = lisp_map_cache . lookup_cache ( self . group , True )
if ( IIII == None ) :
IIII = lisp_mapping ( self . group , self . group , [ ] )
IIII . eid . copy_address ( self . group )
IIII . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , IIII )
if 65 - 65: iII111i / iIii1I11I1II1 + I11i - iIii1I11I1II1 - Ii1I . I1Ii111
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( IIII . group )
IIII . add_source_entry ( self )
if 77 - 77: OoOoOO00 / I1IiiI + IiII
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 66 - 66: i11iIiiIii * OoooooooOO + iII111i / Ii1I
if 42 - 42: Ii1I / iIii1I11I1II1 / Oo0Ooo . O0 . oO0o * I1IiiI
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 21 - 21: OoooooooOO
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
OoooO00OO0OO = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( OoooO00OO0OO ) )
if 50 - 50: OoO0O00 . o0oOOo0O0Ooo
else :
IIII = lisp_map_cache . lookup_cache ( self . group , True )
if ( IIII == None ) : return
if 30 - 30: I1ii11iIi11i % iII111i
O0o0O0OO00O = IIII . lookup_source_cache ( self . eid , True )
if ( O0o0O0OO00O == None ) : return
if 63 - 63: Oo0Ooo + I11i % I11i / iII111i + OoOoOO00
IIII . source_cache . delete_cache ( self . eid )
if ( IIII . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 9 - 9: oO0o - OoO0O00 . O0 + OoO0O00
if 59 - 59: OoooooooOO + I11i . oO0o
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
if 21 - 21: I11i % I1ii11iIi11i
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 8 - 8: OOooOOo % OoO0O00 + O0 - o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo . ooOoO0o + OoOoOO00 - I11i / i11iIiiIii . iII111i
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 80 - 80: II111iiii + OoO0O00 % ooOoO0o + i11iIiiIii
if 30 - 30: Ii1I / I1ii11iIi11i % IiII - Oo0Ooo
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
o0OOoOO = "," + str ( self . secondary_iid )
return ( prefix . replace ( o0OOoOO , o0OOoOO + "*" ) )
if 100 - 100: IiII . I1Ii111 * oO0o % OoO0O00 . iIii1I11I1II1 * Oo0Ooo
if 100 - 100: IiII - OoOoOO00 % iII111i
def increment_decap_stats ( self , packet ) :
IIiII = packet . udp_dport
if ( IIiII == LISP_DATA_PORT ) :
OoOOo = self . get_rloc ( packet . outer_dest )
else :
if 24 - 24: Oo0Ooo / OoO0O00 + i11iIiiIii
if 81 - 81: i11iIiiIii . iIii1I11I1II1 - OoooooooOO
if 52 - 52: O0 - I1Ii111 + oO0o % ooOoO0o . oO0o
if 60 - 60: oO0o + o0oOOo0O0Ooo - OOooOOo % o0oOOo0O0Ooo . I11i + OoO0O00
for OoOOo in self . rloc_set :
if ( OoOOo . translated_port != 0 ) : break
if 27 - 27: i11iIiiIii - I1ii11iIi11i * I1Ii111 . I1IiiI / OoO0O00 * ooOoO0o
if 42 - 42: OOooOOo
if ( OoOOo != None ) : OoOOo . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 36 - 36: OoooooooOO + ooOoO0o + iII111i
if 30 - 30: i1IIi % Ii1I
def rtrs_in_rloc_set ( self ) :
for OoOOo in self . rloc_set :
if ( OoOOo . is_rtr ( ) ) : return ( True )
if 18 - 18: o0oOOo0O0Ooo % I1ii11iIi11i . Ii1I . O0 * II111iiii + I1ii11iIi11i
return ( False )
if 45 - 45: OoO0O00 / I1ii11iIi11i * ooOoO0o * OOooOOo % i11iIiiIii * iII111i
if 33 - 33: oO0o . iII111i + Oo0Ooo
if 33 - 33: ooOoO0o
class lisp_dynamic_eid ( ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 46 - 46: OoOoOO00 / iII111i - OoO0O00 . o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . O0 . OoOoOO00 + I1Ii111 + OoooooooOO . i11iIiiIii
def get_timeout ( self , interface ) :
try :
oooOoOoooo = lisp_myinterfaces [ interface ]
self . timeout = oooOoOoooo . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 26 - 26: OoooooooOO % iIii1I11I1II1 - IiII
if 3 - 3: oO0o * II111iiii . O0
if 19 - 19: I1IiiI / I1IiiI / Oo0Ooo + oO0o + i1IIi
if 31 - 31: iII111i / OoooooooOO - I1Ii111 . iII111i
class lisp_group_mapping ( ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 38 - 38: ooOoO0o . OoooooooOO - II111iiii * i11iIiiIii / i1IIi . OoooooooOO
if 51 - 51: oO0o - I1ii11iIi11i + I1ii11iIi11i
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 100 - 100: I11i - I1ii11iIi11i . i1IIi
if 85 - 85: II111iiii
if 58 - 58: i1IIi - OoO0O00 + ooOoO0o
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 6 - 6: IiII % I1IiiI + OoooooooOO * oO0o . iII111i + oO0o
class lisp_site ( ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 4 - 4: I11i % I1IiiI
if 72 - 72: I1IiiI % II111iiii % iII111i / OoOoOO00
if 96 - 96: OoOoOO00 % Ii1I
class lisp_site_eid ( ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
if 50 - 50: IiII - II111iiii
if 10 - 10: OoooooooOO % Ii1I * OOooOOo + IiII * oO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 13 - 13: II111iiii
if 14 - 14: i11iIiiIii . IiII
def print_flags ( self , html ) :
if ( html == False ) :
Oo0O = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# ooOoO0o % Oo0Ooo + OOooOOo % II111iiii * OoOoOO00
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
OOo0OOo = self . print_flags ( False )
OOo0OOo = OOo0OOo . split ( "-" )
Oo0O = ""
for i1iiIIi1i1iI in OOo0OOo :
I1ooOO = lisp_site_flags [ i1iiIIi1i1iI . upper ( ) ]
I1ooOO = I1ooOO . format ( "" if i1iiIIi1i1iI . isupper ( ) else "not " )
Oo0O += lisp_span ( i1iiIIi1i1iI , I1ooOO )
if ( i1iiIIi1i1iI . lower ( ) != "n" ) : Oo0O += "-"
if 16 - 16: I11i % OoOoOO00 * I1IiiI . I11i % I1IiiI . Oo0Ooo
if 99 - 99: OoO0O00
return ( Oo0O )
if 49 - 49: iII111i + OoOoOO00
if 33 - 33: ooOoO0o
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 19 - 19: I1Ii111 % IiII
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 94 - 94: I1Ii111 * I1ii11iIi11i * I1ii11iIi11i - o0oOOo0O0Ooo . i11iIiiIii
if 16 - 16: i1IIi
def build_sort_key ( self ) :
o0oOO0OOoO = lisp_cache ( )
Iii11i1 , iii11 = o0oOO0OOoO . build_key ( self . eid )
ooO0O = ""
if ( self . group . is_null ( ) == False ) :
Ii1II11I1iI11 , ooO0O = o0oOO0OOoO . build_key ( self . group )
ooO0O = "-" + ooO0O [ 0 : 12 ] + "-" + str ( Ii1II11I1iI11 ) + "-" + ooO0O [ 12 : : ]
if 68 - 68: I1IiiI - OoOoOO00 - iIii1I11I1II1 % i11iIiiIii * OoOoOO00 * OoO0O00
iii11 = iii11 [ 0 : 12 ] + "-" + str ( Iii11i1 ) + "-" + iii11 [ 12 : : ] + ooO0O
del ( o0oOO0OOoO )
return ( iii11 )
if 97 - 97: OoO0O00 - IiII + ooOoO0o % iIii1I11I1II1 % iII111i
if 100 - 100: IiII - Ii1I * iIii1I11I1II1 . iII111i . i1IIi % Oo0Ooo
def merge_in_site_eid ( self , child ) :
i11I1i = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
i11I1i = self . merge_rles_in_site_eid ( )
if 8 - 8: oO0o % OOooOOo - i11iIiiIii - i1IIi / I1IiiI - OoooooooOO
if 46 - 46: Oo0Ooo % i11iIiiIii * o0oOOo0O0Ooo
if 33 - 33: oO0o * ooOoO0o * Ii1I * IiII
if 39 - 39: i1IIi
if 79 - 79: ooOoO0o - II111iiii - oO0o
if 55 - 55: iII111i % iIii1I11I1II1 + Ii1I + oO0o . i11iIiiIii - OOooOOo
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 14 - 14: oO0o - i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII * I1IiiI
return ( i11I1i )
if 2 - 2: i1IIi / I1Ii111 + I1IiiI + I1ii11iIi11i - o0oOOo0O0Ooo + iIii1I11I1II1
if 78 - 78: I1ii11iIi11i % i1IIi . I1Ii111 + Oo0Ooo . o0oOOo0O0Ooo % II111iiii
def copy_rloc_records ( self ) :
O0ii1i = [ ]
for IiI1I1iii11 in self . registered_rlocs :
O0ii1i . append ( copy . deepcopy ( IiI1I1iii11 ) )
if 75 - 75: I1IiiI * oO0o / Oo0Ooo - II111iiii . OoO0O00
return ( O0ii1i )
if 8 - 8: iII111i . i11iIiiIii . IiII . I1ii11iIi11i + I11i
if 24 - 24: I1IiiI - I1IiiI . Oo0Ooo * IiII + I1IiiI / i1IIi
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for Iiii1IIIiIi in self . individual_registrations . values ( ) :
if ( self . site_id != Iiii1IIIiIi . site_id ) : continue
if ( Iiii1IIIiIi . registered == False ) : continue
self . registered_rlocs += Iiii1IIIiIi . copy_rloc_records ( )
if 18 - 18: II111iiii / iIii1I11I1II1 * I1ii11iIi11i . ooOoO0o * ooOoO0o
if 89 - 89: I1IiiI - Oo0Ooo
if 28 - 28: OoooooooOO . i1IIi . I1Ii111
if 53 - 53: OoO0O00 * Oo0Ooo + Oo0Ooo
if 62 - 62: OOooOOo - i1IIi + i11iIiiIii * I11i / OoO0O00
if 84 - 84: IiII * OOooOOo
O0ii1i = [ ]
for IiI1I1iii11 in self . registered_rlocs :
if ( IiI1I1iii11 . rloc . is_null ( ) or len ( O0ii1i ) == 0 ) :
O0ii1i . append ( IiI1I1iii11 )
continue
if 1 - 1: iII111i * I1IiiI . o0oOOo0O0Ooo . IiII
for I1i in O0ii1i :
if ( I1i . rloc . is_null ( ) ) : continue
if ( IiI1I1iii11 . rloc . is_exact_match ( I1i . rloc ) ) : break
if 63 - 63: o0oOOo0O0Ooo + i1IIi
if ( I1i == O0ii1i [ - 1 ] ) : O0ii1i . append ( IiI1I1iii11 )
if 31 - 31: OoooooooOO + o0oOOo0O0Ooo % OoooooooOO - II111iiii . OoooooooOO
self . registered_rlocs = O0ii1i
if 42 - 42: I11i * OOooOOo * OoOoOO00 % I1Ii111
if 25 - 25: IiII
if 60 - 60: oO0o - iIii1I11I1II1 / I1Ii111 * OoO0O00 . oO0o
if 29 - 29: Oo0Ooo
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 82 - 82: OoO0O00
if 93 - 93: Oo0Ooo
def merge_rles_in_site_eid ( self ) :
if 71 - 71: OoooooooOO - IiII . I1ii11iIi11i + OoooooooOO
if 97 - 97: Ii1I - I1IiiI . OoooooooOO * IiII
if 17 - 17: OoO0O00 / II111iiii / II111iiii / II111iiii
if 70 - 70: OoO0O00 + O0 * OoO0O00
Iii11I1i = { }
for IiI1I1iii11 in self . registered_rlocs :
if ( IiI1I1iii11 . rle == None ) : continue
for IIi1i1111i in IiI1I1iii11 . rle . rle_nodes :
o0o0O00 = IIi1i1111i . address . print_address_no_iid ( )
Iii11I1i [ o0o0O00 ] = IIi1i1111i . address
if 22 - 22: i1IIi % Oo0Ooo / oO0o % OoOoOO00 / OoOoOO00
break
if 79 - 79: IiII % OoooooooOO
if 51 - 51: iII111i . oO0o % ooOoO0o % Ii1I . o0oOOo0O0Ooo
if 43 - 43: II111iiii
if 72 - 72: OoOoOO00 * oO0o - ooOoO0o / iII111i
if 8 - 8: OoO0O00 * I1ii11iIi11i
self . merge_rlocs_in_site_eid ( )
if 18 - 18: O0 + I1Ii111 . I1ii11iIi11i
if 48 - 48: Ii1I . o0oOOo0O0Ooo * O0 / OoooooooOO + I1Ii111 + Oo0Ooo
if 92 - 92: Ii1I - o0oOOo0O0Ooo % I1IiiI + I1Ii111
if 3 - 3: iIii1I11I1II1 + i11iIiiIii
if 49 - 49: OoOoOO00 % iIii1I11I1II1 + I1Ii111
if 38 - 38: i11iIiiIii
if 75 - 75: iIii1I11I1II1 / OoO0O00 * OOooOOo % O0
if 82 - 82: Oo0Ooo / i1IIi . i1IIi / oO0o
IIOO00 = [ ]
for IiI1I1iii11 in self . registered_rlocs :
if ( self . registered_rlocs . index ( IiI1I1iii11 ) == 0 ) :
IIOO00 . append ( IiI1I1iii11 )
continue
if 14 - 14: iII111i . iII111i . I11i % I11i * oO0o
if ( IiI1I1iii11 . rle == None ) : IIOO00 . append ( IiI1I1iii11 )
if 77 - 77: I1ii11iIi11i
self . registered_rlocs = IIOO00
if 5 - 5: II111iiii . I1ii11iIi11i
if 96 - 96: o0oOOo0O0Ooo + OoooooooOO - iII111i * O0
if 12 - 12: OoO0O00 % i11iIiiIii - iII111i
if 61 - 61: IiII / oO0o . I1Ii111 - IiII * IiII - iII111i
if 49 - 49: Ii1I
if 91 - 91: Ii1I / ooOoO0o % iII111i
if 75 - 75: i1IIi
iiiI1i1111II = lisp_rle ( "" )
II1iiI = { }
OO000 = None
for Iiii1IIIiIi in self . individual_registrations . values ( ) :
if ( Iiii1IIIiIi . registered == False ) : continue
ii1I1 = Iiii1IIIiIi . registered_rlocs [ 0 ] . rle
if ( ii1I1 == None ) : continue
if 27 - 27: I11i + iIii1I11I1II1 * I1IiiI
OO000 = Iiii1IIIiIi . registered_rlocs [ 0 ] . rloc_name
for IIIiIII in ii1I1 . rle_nodes :
o0o0O00 = IIIiIII . address . print_address_no_iid ( )
if ( II1iiI . has_key ( o0o0O00 ) ) : break
if 3 - 3: OoOoOO00 * OOooOOo - IiII - II111iiii * oO0o
IIi1i1111i = lisp_rle_node ( )
IIi1i1111i . address . copy_address ( IIIiIII . address )
IIi1i1111i . level = IIIiIII . level
IIi1i1111i . rloc_name = OO000
iiiI1i1111II . rle_nodes . append ( IIi1i1111i )
II1iiI [ o0o0O00 ] = IIIiIII . address
if 23 - 23: I11i * I1ii11iIi11i . I11i
if 70 - 70: i1IIi * I1ii11iIi11i . oO0o - I1IiiI * Ii1I * iII111i
if 11 - 11: Oo0Ooo + I1ii11iIi11i
if 92 - 92: iII111i / II111iiii + i1IIi / I1ii11iIi11i
if 67 - 67: iII111i / IiII + I1IiiI + IiII % OoOoOO00 % I1ii11iIi11i
if 7 - 7: I1ii11iIi11i % OoOoOO00 - O0 . I1Ii111
if ( len ( iiiI1i1111II . rle_nodes ) == 0 ) : iiiI1i1111II = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = iiiI1i1111II
if ( OO000 ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 9 - 9: Ii1I . OoooooooOO / ooOoO0o + i1IIi
if 90 - 90: oO0o - OoOoOO00 % ooOoO0o
if 83 - 83: OOooOOo - I1ii11iIi11i + OoO0O00
if 99 - 99: iII111i - OoOoOO00 % ooOoO0o
if 27 - 27: oO0o . oO0o * iII111i % iIii1I11I1II1
if ( Iii11I1i . keys ( ) == II1iiI . keys ( ) ) : return ( False )
if 81 - 81: iII111i * II111iiii
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# OoooooooOO + i11iIiiIii
Iii11I1i . keys ( ) , II1iiI . keys ( ) ) )
if 11 - 11: OoooooooOO % oO0o - OoO0O00
return ( True )
if 49 - 49: ooOoO0o + iII111i % OoooooooOO / Oo0Ooo % i1IIi
if 50 - 50: OoO0O00
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
ooOoOO0Oo = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( ooOoOO0Oo == None ) :
ooOoOO0Oo = lisp_site_eid ( self . site )
ooOoOO0Oo . eid . copy_address ( self . group )
ooOoOO0Oo . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , ooOoOO0Oo )
if 52 - 52: o0oOOo0O0Ooo + O0
if 13 - 13: OoO0O00
if 56 - 56: OoOoOO00 . ooOoO0o * oO0o - I11i
if 47 - 47: oO0o . i1IIi * I1ii11iIi11i % OOooOOo % IiII / Oo0Ooo
if 39 - 39: i11iIiiIii . OOooOOo + Oo0Ooo
ooOoOO0Oo . parent_for_more_specifics = self . parent_for_more_specifics
if 92 - 92: O0 * Oo0Ooo / o0oOOo0O0Ooo % OoO0O00
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( ooOoOO0Oo . group )
ooOoOO0Oo . add_source_entry ( self )
if 87 - 87: OoooooooOO / I11i . O0
if 77 - 77: OOooOOo + oO0o * iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii
if 92 - 92: Oo0Ooo . o0oOOo0O0Ooo % OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
ooOoOO0Oo = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( ooOoOO0Oo == None ) : return
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
Iiii1IIIiIi = ooOoOO0Oo . lookup_source_cache ( self . eid , True )
if ( Iiii1IIIiIi == None ) : return
if 11 - 11: OOooOOo / o0oOOo0O0Ooo
if ( ooOoOO0Oo . source_cache == None ) : return
if 98 - 98: oO0o + I11i . oO0o
ooOoOO0Oo . source_cache . delete_cache ( self . eid )
if ( ooOoOO0Oo . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 10 - 10: iII111i + i1IIi . I11i % ooOoO0o / ooOoO0o
if 86 - 86: Oo0Ooo
if 7 - 7: iIii1I11I1II1
if 86 - 86: IiII + iII111i * II111iiii - IiII - o0oOOo0O0Ooo
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 8 - 8: OOooOOo . Ii1I
if 15 - 15: ooOoO0o / OOooOOo + i1IIi / Ii1I / OOooOOo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 47 - 47: Oo0Ooo + oO0o % OoooooooOO
if 23 - 23: I1Ii111 / i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 11 - 11: I11i % OoOoOO00 * Oo0Ooo
if 48 - 48: OOooOOo
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 66 - 66: iII111i - I1Ii111 - i11iIiiIii . o0oOOo0O0Ooo + Oo0Ooo
if 90 - 90: O0 - i11iIiiIii * ooOoO0o . I1ii11iIi11i . Ii1I - OoooooooOO
def inherit_from_ams_parent ( self ) :
IiI1 = self . parent_for_more_specifics
if ( IiI1 == None ) : return
self . force_proxy_reply = IiI1 . force_proxy_reply
self . force_nat_proxy_reply = IiI1 . force_nat_proxy_reply
self . force_ttl = IiI1 . force_ttl
self . pitr_proxy_reply_drop = IiI1 . pitr_proxy_reply_drop
self . proxy_reply_action = IiI1 . proxy_reply_action
self . echo_nonce_capable = IiI1 . echo_nonce_capable
self . policy = IiI1 . policy
self . require_signature = IiI1 . require_signature
if 23 - 23: o0oOOo0O0Ooo
if 88 - 88: I1Ii111 + iIii1I11I1II1 / o0oOOo0O0Ooo
def rtrs_in_rloc_set ( self ) :
for IiI1I1iii11 in self . registered_rlocs :
if ( IiI1I1iii11 . is_rtr ( ) ) : return ( True )
if 93 - 93: ooOoO0o % iIii1I11I1II1 - OOooOOo . IiII + ooOoO0o
return ( False )
if 63 - 63: I1ii11iIi11i / OOooOOo
if 28 - 28: I11i / I1Ii111 + IiII * OoooooooOO - iIii1I11I1II1
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for IiI1I1iii11 in self . registered_rlocs :
if ( IiI1I1iii11 . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( IiI1I1iii11 . is_rtr ( ) ) : return ( True )
if 6 - 6: I11i % o0oOOo0O0Ooo / OoooooooOO . I1Ii111
return ( False )
if 17 - 17: I1ii11iIi11i + OoooooooOO / iIii1I11I1II1 . II111iiii + Oo0Ooo
if 7 - 7: O0 - I1ii11iIi11i - iIii1I11I1II1
def is_rloc_in_rloc_set ( self , rloc ) :
for IiI1I1iii11 in self . registered_rlocs :
if ( IiI1I1iii11 . rle ) :
for iiiI1i1111II in IiI1I1iii11 . rle . rle_nodes :
if ( iiiI1i1111II . address . is_exact_match ( rloc ) ) : return ( True )
if 96 - 96: OoOoOO00 . I1IiiI . I11i * OoooooooOO + OoooooooOO * O0
if 90 - 90: I11i + I1ii11iIi11i + OoooooooOO + OoOoOO00 + IiII / iII111i
if ( IiI1I1iii11 . rloc . is_exact_match ( rloc ) ) : return ( True )
if 75 - 75: i11iIiiIii
return ( False )
if 27 - 27: I11i - IiII - I1Ii111
if 90 - 90: OoO0O00 . oO0o * O0 / I11i % O0 + I1Ii111
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 48 - 48: iIii1I11I1II1 . i11iIiiIii / OoooooooOO . i1IIi . o0oOOo0O0Ooo
for IiI1I1iii11 in prev_rloc_set :
oO00Ooo0o = IiI1I1iii11 . rloc
if ( self . is_rloc_in_rloc_set ( oO00Ooo0o ) == False ) : return ( False )
if 84 - 84: Ii1I
return ( True )
if 92 - 92: I11i
if 64 - 64: iII111i / iII111i * iII111i % O0 / IiII . I1ii11iIi11i
if 23 - 23: i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
class lisp_mr ( ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 82 - 82: O0 * ooOoO0o * iIii1I11I1II1 . i1IIi
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 47 - 47: I11i * I11i . OoOoOO00
if 68 - 68: OoooooooOO + OoOoOO00 + i11iIiiIii
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 89 - 89: Oo0Ooo + Ii1I * O0 - I1Ii111
try :
iI1 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
iiOOO0o = iI1 [ 2 ]
except :
return
if 59 - 59: OOooOOo * i1IIi
if 26 - 26: OOooOOo % ooOoO0o
if 80 - 80: o0oOOo0O0Ooo . iII111i . ooOoO0o + OOooOOo * I1IiiI / O0
if 61 - 61: I11i % OOooOOo + i11iIiiIii + I11i
if 69 - 69: OoOoOO00 + OoOoOO00 + o0oOOo0O0Ooo / iIii1I11I1II1 * OoO0O00
if 44 - 44: II111iiii / o0oOOo0O0Ooo
if ( len ( iiOOO0o ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 81 - 81: I1Ii111 . Ii1I * ooOoO0o . IiII - OoOoOO00
if 79 - 79: ooOoO0o - O0
o0o0O00 = iiOOO0o [ self . a_record_index ]
if ( o0o0O00 != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( o0o0O00 )
self . insert_mr ( )
if 56 - 56: ooOoO0o
if 89 - 89: O0 % iIii1I11I1II1 / OoOoOO00 - I1Ii111 - I1IiiI
if 60 - 60: IiII % i11iIiiIii / OOooOOo
if 43 - 43: i11iIiiIii * II111iiii + ooOoO0o - OoooooooOO * II111iiii / OoO0O00
if 92 - 92: O0 - ooOoO0o % iII111i
if 83 - 83: I1ii11iIi11i / OoOoOO00 % OoooooooOO
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 54 - 54: I11i / I1IiiI * IiII - iII111i
for o0o0O00 in iiOOO0o [ 1 : : ] :
oOO0oo = lisp_address ( LISP_AFI_NONE , o0o0O00 , 0 , 0 )
oOO0O000OOo0 = lisp_get_map_resolver ( oOO0oo , None )
if ( oOO0O000OOo0 != None and oOO0O000OOo0 . a_record_index == iiOOO0o . index ( o0o0O00 ) ) :
continue
if 37 - 37: i1IIi * I1Ii111 / I11i * II111iiii + OoooooooOO . OoO0O00
oOO0O000OOo0 = lisp_mr ( o0o0O00 , None , None )
oOO0O000OOo0 . a_record_index = iiOOO0o . index ( o0o0O00 )
oOO0O000OOo0 . dns_name = self . dns_name
oOO0O000OOo0 . last_dns_resolve = lisp_get_timestamp ( )
if 22 - 22: OoOoOO00 + OoooooooOO - I1Ii111
if 82 - 82: Ii1I % I1Ii111 / ooOoO0o
if 86 - 86: II111iiii - iIii1I11I1II1 + oO0o + I1IiiI
if 29 - 29: Ii1I % OoooooooOO * II111iiii
if 88 - 88: I1Ii111 + I11i + I1Ii111 % OoO0O00 / I1ii11iIi11i - I11i
iIi11ii1 = [ ]
for oOO0O000OOo0 in lisp_map_resolvers_list . values ( ) :
if ( self . dns_name != oOO0O000OOo0 . dns_name ) : continue
oOO0oo = oOO0O000OOo0 . map_resolver . print_address_no_iid ( )
if ( oOO0oo in iiOOO0o ) : continue
iIi11ii1 . append ( oOO0O000OOo0 )
if 55 - 55: I1ii11iIi11i - I11i
for oOO0O000OOo0 in iIi11ii1 : oOO0O000OOo0 . delete_mr ( )
if 73 - 73: i11iIiiIii . OoO0O00 + OoO0O00 - OOooOOo % OOooOOo - OoO0O00
if 5 - 5: I1ii11iIi11i + i1IIi * I11i % iII111i
def insert_mr ( self ) :
iii11 = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ iii11 ] = self
if 96 - 96: ooOoO0o % I1ii11iIi11i % i11iIiiIii * I11i * iII111i . i11iIiiIii
if 65 - 65: i11iIiiIii / o0oOOo0O0Ooo % I1ii11iIi11i - O0 % OoooooooOO / o0oOOo0O0Ooo
def delete_mr ( self ) :
iii11 = self . mr_name + self . map_resolver . print_address ( )
if ( lisp_map_resolvers_list . has_key ( iii11 ) == False ) : return
lisp_map_resolvers_list . pop ( iii11 )
if 36 - 36: iII111i * OoO0O00 / OOooOOo * IiII * iIii1I11I1II1 / IiII
if 79 - 79: iIii1I11I1II1 - iIii1I11I1II1 * I1ii11iIi11i
if 96 - 96: iII111i / i11iIiiIii / oO0o + Oo0Ooo
class lisp_ddt_root ( ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 65 - 65: OoOoOO00
if 87 - 87: I11i % i1IIi + i11iIiiIii * II111iiii
if 58 - 58: OoO0O00 * I1IiiI - II111iiii / Ii1I - I1IiiI % OoooooooOO
class lisp_referral ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 33 - 33: IiII / i1IIi + I1Ii111
if 5 - 5: O0 / iII111i % II111iiii . Oo0Ooo - I11i
def print_referral ( self , eid_indent , referral_indent ) :
OO00OoooOoO0 = lisp_print_elapsed ( self . uptime )
iiIIIiiii = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , OO00OoooOoO0 ,
# OoOoOO00 - OoOoOO00 % II111iiii + i1IIi + I1IiiI
iiIIIiiii , len ( self . referral_set ) ) )
if 75 - 75: OoooooooOO . I11i - OoOoOO00
for ooO in self . referral_set . values ( ) :
ooO . print_ref_node ( referral_indent )
if 93 - 93: OoOoOO00 . I1Ii111 % I1ii11iIi11i
if 58 - 58: OoooooooOO . i1IIi . Oo0Ooo - o0oOOo0O0Ooo / oO0o * I1Ii111
if 6 - 6: oO0o - OoO0O00
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 44 - 44: Oo0Ooo + I1ii11iIi11i % Oo0Ooo / I11i
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 57 - 57: Oo0Ooo + Ii1I * OoooooooOO
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 30 - 30: O0
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 70 - 70: oO0o
if 89 - 89: O0
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 3 - 3: iII111i - O0 / I11i
if 46 - 46: I1IiiI . OoooooooOO / iIii1I11I1II1 - ooOoO0o * OOooOOo
def print_ttl ( self ) :
iiI = self . referral_ttl
if ( iiI < 60 ) : return ( str ( iiI ) + " secs" )
if 55 - 55: o0oOOo0O0Ooo + iIii1I11I1II1 / I11i
if ( ( iiI % 60 ) == 0 ) :
iiI = str ( iiI / 60 ) + " mins"
else :
iiI = str ( iiI ) + " secs"
if 97 - 97: i11iIiiIii
return ( iiI )
if 71 - 71: oO0o + Oo0Ooo
if 7 - 7: OoOoOO00 / I1ii11iIi11i * i1IIi
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# I1Ii111 % o0oOOo0O0Ooo . iII111i * I11i / iIii1I11I1II1 - II111iiii
LISP_DDT_ACTION_NOT_AUTH ) )
if 97 - 97: II111iiii * o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo . II111iiii
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
OoOo = lisp_referral_cache . lookup_cache ( self . group , True )
if ( OoOo == None ) :
OoOo = lisp_referral ( )
OoOo . eid . copy_address ( self . group )
OoOo . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , OoOo )
if 76 - 76: II111iiii + I1Ii111 . OoooooooOO / IiII % i11iIiiIii
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( OoOo . group )
OoOo . add_source_entry ( self )
if 87 - 87: Ii1I / OoOoOO00 / OOooOOo
if 11 - 11: o0oOOo0O0Ooo * OoO0O00 . o0oOOo0O0Ooo - I1IiiI / IiII - OOooOOo
if 19 - 19: i1IIi + IiII . OoO0O00 / O0 - I1Ii111 - Oo0Ooo
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
OoOo = lisp_referral_cache . lookup_cache ( self . group , True )
if ( OoOo == None ) : return
if 24 - 24: iII111i + i1IIi
Iio00OO = OoOo . lookup_source_cache ( self . eid , True )
if ( Iio00OO == None ) : return
if 31 - 31: OoOoOO00
OoOo . source_cache . delete_cache ( self . eid )
if ( OoOo . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 37 - 37: iIii1I11I1II1 % IiII / i11iIiiIii - oO0o
if 43 - 43: II111iiii - OoooooooOO
if 11 - 11: I1IiiI
if 76 - 76: iII111i - II111iiii % Oo0Ooo . I1Ii111
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 64 - 64: OoO0O00 - OoO0O00
if 93 - 93: Oo0Ooo . O0
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 75 - 75: iII111i * II111iiii - I1IiiI
if 30 - 30: i1IIi / ooOoO0o . ooOoO0o
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
class lisp_referral_node ( ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
if 46 - 46: I1Ii111
def print_ref_node ( self , indent ) :
OOOO0O00o = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , OOOO0O00o ,
# o0oOOo0O0Ooo * I1Ii111 - I1Ii111 % i11iIiiIii + i1IIi - o0oOOo0O0Ooo
"up" if self . updown else "down" , self . priority , self . weight ) )
if 67 - 67: oO0o % iII111i . II111iiii
if 36 - 36: II111iiii - ooOoO0o
if 52 - 52: i1IIi + i1IIi * i1IIi / OoOoOO00
class lisp_ms ( ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = lisp_map_servers_list . values ( ) [ 0 ] . xtr_id
if 98 - 98: iII111i . i1IIi + o0oOOo0O0Ooo * OoooooooOO - i11iIiiIii
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 21 - 21: i11iIiiIii . oO0o * o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
if 33 - 33: I1IiiI + O0 - I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
try :
iI1 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
iiOOO0o = iI1 [ 2 ]
except :
return
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
if 38 - 38: O0 % I1ii11iIi11i + O0
if 37 - 37: Oo0Ooo / I1IiiI
if 23 - 23: II111iiii / iII111i
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
if 92 - 92: iIii1I11I1II1
if ( len ( iiOOO0o ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 47 - 47: Oo0Ooo + Oo0Ooo * ooOoO0o - OoOoOO00 + II111iiii
if 10 - 10: II111iiii / ooOoO0o . Ii1I / I1Ii111 / oO0o
o0o0O00 = iiOOO0o [ self . a_record_index ]
if ( o0o0O00 != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( o0o0O00 )
self . insert_ms ( )
if 8 - 8: OOooOOo / ooOoO0o * I11i + OOooOOo * i1IIi
if 48 - 48: o0oOOo0O0Ooo - I1ii11iIi11i / iII111i
if 63 - 63: O0 - IiII . OOooOOo % IiII . I1IiiI / oO0o
if 79 - 79: OoOoOO00
if 88 - 88: oO0o * o0oOOo0O0Ooo
if 5 - 5: I11i - I1Ii111 * I11i - II111iiii + OOooOOo + II111iiii
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 91 - 91: i1IIi + Oo0Ooo - I1ii11iIi11i + I1ii11iIi11i * O0 / O0
for o0o0O00 in iiOOO0o [ 1 : : ] :
oOO0oo = lisp_address ( LISP_AFI_NONE , o0o0O00 , 0 , 0 )
oooO0OOo0O0O0 = lisp_get_map_server ( oOO0oo )
if ( oooO0OOo0O0O0 != None and oooO0OOo0O0O0 . a_record_index == iiOOO0o . index ( o0o0O00 ) ) :
continue
if 78 - 78: OoooooooOO
oooO0OOo0O0O0 = copy . deepcopy ( self )
oooO0OOo0O0O0 . map_server . store_address ( o0o0O00 )
oooO0OOo0O0O0 . a_record_index = iiOOO0o . index ( o0o0O00 )
oooO0OOo0O0O0 . last_dns_resolve = lisp_get_timestamp ( )
oooO0OOo0O0O0 . insert_ms ( )
if 8 - 8: Oo0Ooo - Oo0Ooo % O0 - Ii1I / o0oOOo0O0Ooo % Oo0Ooo
if 51 - 51: iIii1I11I1II1 / iIii1I11I1II1 * I1ii11iIi11i / I11i
if 18 - 18: Ii1I - i11iIiiIii + OoO0O00 . O0 - iII111i
if 9 - 9: OoooooooOO / iII111i + o0oOOo0O0Ooo / II111iiii / I1Ii111
if 44 - 44: I1IiiI / iII111i / Oo0Ooo
iIi11ii1 = [ ]
for oooO0OOo0O0O0 in lisp_map_servers_list . values ( ) :
if ( self . dns_name != oooO0OOo0O0O0 . dns_name ) : continue
oOO0oo = oooO0OOo0O0O0 . map_server . print_address_no_iid ( )
if ( oOO0oo in iiOOO0o ) : continue
iIi11ii1 . append ( oooO0OOo0O0O0 )
if 66 - 66: I1Ii111 + OoooooooOO % I1IiiI . iII111i * Oo0Ooo + o0oOOo0O0Ooo
for oooO0OOo0O0O0 in iIi11ii1 : oooO0OOo0O0O0 . delete_ms ( )
if 96 - 96: OoO0O00 - ooOoO0o * Ii1I
if 34 - 34: OoO0O00 . Oo0Ooo % Ii1I . IiII + OoOoOO00
def insert_ms ( self ) :
iii11 = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ iii11 ] = self
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
if 71 - 71: Ii1I + IiII
def delete_ms ( self ) :
iii11 = self . ms_name + self . map_server . print_address ( )
if ( lisp_map_servers_list . has_key ( iii11 ) == False ) : return
lisp_map_servers_list . pop ( iii11 )
if 10 - 10: II111iiii % o0oOOo0O0Ooo . o0oOOo0O0Ooo % iII111i
if 2 - 2: OoooooooOO / IiII % Oo0Ooo % iIii1I11I1II1
if 62 - 62: oO0o
class lisp_interface ( ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 47 - 47: I1IiiI - O0 - I1ii11iIi11i . OoOoOO00
if 98 - 98: o0oOOo0O0Ooo - OoO0O00 . I1ii11iIi11i / OOooOOo
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 43 - 43: I1IiiI + OOooOOo + o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo % OoO0O00 . OoooooooOO
def get_instance_id ( self ) :
return ( self . instance_id )
if 21 - 21: Oo0Ooo * Oo0Ooo - iII111i - O0
if 87 - 87: OOooOOo / I1Ii111 - Ii1I + O0 - oO0o - O0
def get_socket ( self ) :
return ( self . raw_socket )
if 68 - 68: iII111i + II111iiii + I1ii11iIi11i * OOooOOo / oO0o
if 41 - 41: OOooOOo + Oo0Ooo % I1IiiI
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 3 - 3: ooOoO0o * Ii1I
if 29 - 29: OoooooooOO + OOooOOo
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 68 - 68: O0 + IiII / iII111i - OoOoOO00
if 5 - 5: I1IiiI * OoooooooOO - II111iiii
def set_socket ( self , device ) :
IiIIi1I1I11Ii = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
IiIIi1I1I11Ii . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
IiIIi1I1I11Ii . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
IiIIi1I1I11Ii . close ( )
IiIIi1I1I11Ii = None
if 64 - 64: i1IIi
self . raw_socket = IiIIi1I1I11Ii
if 77 - 77: OOooOOo - i1IIi / II111iiii . I1Ii111 + O0
if 1 - 1: OoooooooOO % iIii1I11I1II1 * I1ii11iIi11i
def set_bridge_socket ( self , device ) :
IiIIi1I1I11Ii = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
IiIIi1I1I11Ii = IiIIi1I1I11Ii . bind ( ( device , 0 ) )
self . bridge_socket = IiIIi1I1I11Ii
except :
return
if 17 - 17: Ii1I * i1IIi % OoO0O00
if 12 - 12: I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 % iII111i
if 80 - 80: Oo0Ooo
class lisp_datetime ( ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 37 - 37: i11iIiiIii - I1Ii111
if 50 - 50: I1IiiI / Ii1I / Ii1I + O0 % I11i - i1IIi
def valid_datetime ( self ) :
OooIIIii = self . datetime_name
if ( OooIIIii . find ( ":" ) == - 1 ) : return ( False )
if ( OooIIIii . find ( "-" ) == - 1 ) : return ( False )
IIiO0O0 , Ii1II11 , OO00Ooo00ooo , time = OooIIIii [ 0 : 4 ] , OooIIIii [ 5 : 7 ] , OooIIIii [ 8 : 10 ] , OooIIIii [ 11 : : ]
if 25 - 25: ooOoO0o
if ( ( IIiO0O0 + Ii1II11 + OO00Ooo00ooo ) . isdigit ( ) == False ) : return ( False )
if ( Ii1II11 < "01" and Ii1II11 > "12" ) : return ( False )
if ( OO00Ooo00ooo < "01" and OO00Ooo00ooo > "31" ) : return ( False )
if 63 - 63: i11iIiiIii . i1IIi
IiI1I , OoOIiIiiiIi , Ooo0 = time . split ( ":" )
if 35 - 35: iIii1I11I1II1 % Oo0Ooo + o0oOOo0O0Ooo * o0oOOo0O0Ooo % ooOoO0o
if ( ( IiI1I + OoOIiIiiiIi + Ooo0 ) . isdigit ( ) == False ) : return ( False )
if ( IiI1I < "00" and IiI1I > "23" ) : return ( False )
if ( OoOIiIiiiIi < "00" and OoOIiIiiiIi > "59" ) : return ( False )
if ( Ooo0 < "00" and Ooo0 > "59" ) : return ( False )
return ( True )
if 10 - 10: I1ii11iIi11i / II111iiii % II111iiii - OoooooooOO * o0oOOo0O0Ooo / ooOoO0o
if 26 - 26: OoO0O00 . O0 * iII111i % OoOoOO00 % iIii1I11I1II1
def parse_datetime ( self ) :
I111I11i = self . datetime_name
I111I11i = I111I11i . replace ( "-" , "" )
I111I11i = I111I11i . replace ( ":" , "" )
self . datetime = int ( I111I11i )
if 3 - 3: OOooOOo
if 82 - 82: oO0o
def now ( self ) :
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
OOOO0O00o = lisp_datetime ( OOOO0O00o )
return ( OOOO0O00o )
if 71 - 71: iIii1I11I1II1 * O0 % I11i + I1Ii111 . oO0o + I11i
if 41 - 41: II111iiii + OoooooooOO
def print_datetime ( self ) :
return ( self . datetime_name )
if 2 - 2: OoooooooOO
if 79 - 79: i11iIiiIii
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 60 - 60: I1ii11iIi11i / I11i
if 100 - 100: I1IiiI
def past ( self ) :
return ( self . future ( ) == False )
if 44 - 44: iIii1I11I1II1 + Oo0Ooo - I1Ii111 . OoooooooOO
if 28 - 28: Ii1I + OOooOOo % IiII . i11iIiiIii - I1IiiI * Oo0Ooo
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 2 - 2: I11i * I1ii11iIi11i + O0
if 44 - 44: iIii1I11I1II1 / II111iiii - ooOoO0o
def this_year ( self ) :
i111I1I1i = str ( self . now ( ) . datetime ) [ 0 : 4 ]
OOOO0O00o = str ( self . datetime ) [ 0 : 4 ]
return ( OOOO0O00o == i111I1I1i )
if 6 - 6: I1ii11iIi11i / iIii1I11I1II1 / I11i % iIii1I11I1II1
if 49 - 49: OOooOOo * iIii1I11I1II1 - iIii1I11I1II1
def this_month ( self ) :
i111I1I1i = str ( self . now ( ) . datetime ) [ 0 : 6 ]
OOOO0O00o = str ( self . datetime ) [ 0 : 6 ]
return ( OOOO0O00o == i111I1I1i )
if 70 - 70: OoO0O00 % i11iIiiIii * IiII . I11i * Oo0Ooo
if 17 - 17: i1IIi
def today ( self ) :
i111I1I1i = str ( self . now ( ) . datetime ) [ 0 : 8 ]
OOOO0O00o = str ( self . datetime ) [ 0 : 8 ]
return ( OOOO0O00o == i111I1I1i )
if 29 - 29: OOooOOo % OoO0O00 + oO0o + o0oOOo0O0Ooo . iII111i
if 14 - 14: i1IIi + OoOoOO00 * oO0o - II111iiii + IiII + OoOoOO00
if 42 - 42: Oo0Ooo + iII111i * ooOoO0o
if 72 - 72: iIii1I11I1II1 % I1Ii111
if 77 - 77: I1Ii111 * I1IiiI / iIii1I11I1II1 . II111iiii * Oo0Ooo
if 71 - 71: ooOoO0o / iIii1I11I1II1 % O0 / I1ii11iIi11i . I1Ii111 / i11iIiiIii
class lisp_policy_match ( ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 6 - 6: oO0o . OoO0O00 - II111iiii . I1IiiI - o0oOOo0O0Ooo - i1IIi
if 42 - 42: Ii1I + i11iIiiIii
class lisp_policy ( ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 46 - 46: O0 % OoOoOO00 - I1Ii111 . I1IiiI
if 66 - 66: II111iiii * iIii1I11I1II1 * ooOoO0o * I11i . II111iiii - ooOoO0o
def match_policy_map_request ( self , mr , srloc ) :
for OOO0Ooo0OoO0 in self . match_clauses :
OoOoO = OOO0Ooo0OoO0 . source_eid
O000o0Ooo = mr . source_eid
if ( OoOoO and O000o0Ooo and O000o0Ooo . is_more_specific ( OoOoO ) == False ) : continue
if 15 - 15: I1ii11iIi11i - i11iIiiIii - Ii1I / Ii1I . iII111i
OoOoO = OOO0Ooo0OoO0 . dest_eid
O000o0Ooo = mr . target_eid
if ( OoOoO and O000o0Ooo and O000o0Ooo . is_more_specific ( OoOoO ) == False ) : continue
if 36 - 36: oO0o + Oo0Ooo * I1Ii111 % OOooOOo . Oo0Ooo . I1IiiI
OoOoO = OOO0Ooo0OoO0 . source_rloc
O000o0Ooo = srloc
if ( OoOoO and O000o0Ooo and O000o0Ooo . is_more_specific ( OoOoO ) == False ) : continue
IIi11I1i1I1I = OOO0Ooo0OoO0 . datetime_lower
oOii = OOO0Ooo0OoO0 . datetime_upper
if ( IIi11I1i1I1I and oOii and IIi11I1i1I1I . now_in_range ( oOii ) == False ) : continue
return ( True )
if 5 - 5: i1IIi % OoooooooOO
return ( False )
if 8 - 8: OOooOOo * oO0o
if 47 - 47: O0
def set_policy_map_reply ( self ) :
i1ii111IIiiI = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( i1ii111IIiiI ) : return ( None )
if 48 - 48: o0oOOo0O0Ooo + Ii1I
OoOOo = lisp_rloc ( )
if ( self . set_rloc_address ) :
OoOOo . rloc . copy_address ( self . set_rloc_address )
o0o0O00 = OoOOo . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( o0o0O00 ) )
if 26 - 26: i1IIi
if ( self . set_rloc_record_name ) :
OoOOo . rloc_name = self . set_rloc_record_name
IiIII = blue ( OoOOo . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( IiIII ) )
if 33 - 33: OoOoOO00 + OOooOOo . i1IIi . IiII
if ( self . set_geo_name ) :
OoOOo . geo_name = self . set_geo_name
IiIII = OoOOo . geo_name
Ooo0O0oO0000 = "" if lisp_geo_list . has_key ( IiIII ) else "(not configured)"
if 8 - 8: IiII / i11iIiiIii
lprint ( "Policy set-geo-name '{}' {}" . format ( IiIII , Ooo0O0oO0000 ) )
if 39 - 39: I1Ii111
if ( self . set_elp_name ) :
OoOOo . elp_name = self . set_elp_name
IiIII = OoOOo . elp_name
Ooo0O0oO0000 = "" if lisp_elp_list . has_key ( IiIII ) else "(not configured)"
if 42 - 42: iIii1I11I1II1
lprint ( "Policy set-elp-name '{}' {}" . format ( IiIII , Ooo0O0oO0000 ) )
if 35 - 35: I1ii11iIi11i / OoOoOO00 / i1IIi / i11iIiiIii * iIii1I11I1II1 / i1IIi
if ( self . set_rle_name ) :
OoOOo . rle_name = self . set_rle_name
IiIII = OoOOo . rle_name
Ooo0O0oO0000 = "" if lisp_rle_list . has_key ( IiIII ) else "(not configured)"
if 69 - 69: OOooOOo / I1Ii111 * II111iiii
lprint ( "Policy set-rle-name '{}' {}" . format ( IiIII , Ooo0O0oO0000 ) )
if 88 - 88: OOooOOo - I1IiiI + Oo0Ooo
if ( self . set_json_name ) :
OoOOo . json_name = self . set_json_name
IiIII = OoOOo . json_name
Ooo0O0oO0000 = "" if lisp_json_list . has_key ( IiIII ) else "(not configured)"
if 15 - 15: I11i / I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
lprint ( "Policy set-json-name '{}' {}" . format ( IiIII , Ooo0O0oO0000 ) )
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
return ( OoOOo )
if 57 - 57: I1IiiI % i1IIi * OoO0O00 + I1Ii111 . I11i % I11i
if 69 - 69: I1ii11iIi11i / OoOoOO00 + iIii1I11I1II1
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 8 - 8: OoooooooOO
if 72 - 72: OoooooooOO % I1ii11iIi11i - OoO0O00 . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo * Ii1I - Oo0Ooo * iII111i - i11iIiiIii
class lisp_pubsub ( ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
if 6 - 6: I1IiiI + i11iIiiIii + O0 / i1IIi
if 50 - 50: iII111i . II111iiii % I1Ii111 % I1IiiI / o0oOOo0O0Ooo . I1IiiI
def add ( self , eid_prefix ) :
iiI = self . ttl
i1OO0o = eid_prefix . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( i1OO0o ) == False ) :
lisp_pubsub_cache [ i1OO0o ] = { }
if 76 - 76: OOooOOo % iII111i
Ooooo00oOO0Oo = lisp_pubsub_cache [ i1OO0o ]
if 80 - 80: iIii1I11I1II1 + o0oOOo0O0Ooo + iIii1I11I1II1
OO0OOo00O = "Add"
if ( Ooooo00oOO0Oo . has_key ( self . xtr_id ) ) :
OO0OOo00O = "Replace"
del ( Ooooo00oOO0Oo [ self . xtr_id ] )
if 68 - 68: OOooOOo * iII111i - o0oOOo0O0Ooo - Oo0Ooo % OoooooooOO
Ooooo00oOO0Oo [ self . xtr_id ] = self
if 60 - 60: o0oOOo0O0Ooo / OoooooooOO % II111iiii - ooOoO0o
i1OO0o = green ( i1OO0o , False )
o00ooOOo0ooO0 = red ( self . itr . print_address_no_iid ( ) , False )
i11IIii = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( OO0OOo00O , i1OO0o ,
o00ooOOo0ooO0 , i11IIii , iiI ) )
if 29 - 29: OoOoOO00 * I11i . O0 + oO0o - iIii1I11I1II1 - I11i
if 40 - 40: OoooooooOO + O0
def delete ( self , eid_prefix ) :
i1OO0o = eid_prefix . print_prefix ( )
o00ooOOo0ooO0 = red ( self . itr . print_address_no_iid ( ) , False )
i11IIii = "0x" + lisp_hex_string ( self . xtr_id )
if ( lisp_pubsub_cache . has_key ( i1OO0o ) ) :
Ooooo00oOO0Oo = lisp_pubsub_cache [ i1OO0o ]
if ( Ooooo00oOO0Oo . has_key ( self . xtr_id ) ) :
Ooooo00oOO0Oo . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( i1OO0o ,
o00ooOOo0ooO0 , i11IIii ) )
if 55 - 55: i11iIiiIii * Ii1I % OOooOOo + ooOoO0o - I1ii11iIi11i . Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo
if 55 - 55: OOooOOo - OoooooooOO * iIii1I11I1II1 + iII111i % II111iiii
if 33 - 33: I1Ii111 * oO0o * OoooooooOO + OOooOOo - I1IiiI + I1Ii111
if 92 - 92: ooOoO0o * I11i % iIii1I11I1II1 + Ii1I - OoOoOO00
if 31 - 31: OoooooooOO
if 87 - 87: OoooooooOO - Ii1I . I11i / I1Ii111 . i1IIi
if 86 - 86: i1IIi . oO0o % OOooOOo
if 99 - 99: oO0o / I1Ii111 * oO0o * I11i
if 38 - 38: o0oOOo0O0Ooo + OoOoOO00
if 24 - 24: Ii1I - OOooOOo - o0oOOo0O0Ooo - I1Ii111 / OoooooooOO
if 17 - 17: OoO0O00
if 79 - 79: Ii1I - II111iiii
if 57 - 57: II111iiii / OoooooooOO
if 4 - 4: I11i * OoOoOO00
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
if 87 - 87: oO0o . I11i
if 15 - 15: oO0o
if 45 - 45: Oo0Ooo * IiII * OoO0O00 + iIii1I11I1II1
if 89 - 89: IiII . IiII . oO0o % iII111i
if 27 - 27: OoOoOO00 + O0 % i1IIi - Oo0Ooo
if 96 - 96: O0 % o0oOOo0O0Ooo + OOooOOo % I1IiiI
class lisp_trace ( ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 51 - 51: i1IIi . o0oOOo0O0Ooo % I1IiiI - OoooooooOO / OoOoOO00 - I11i
if 45 - 45: O0 * II111iiii / i11iIiiIii
def print_trace ( self ) :
IiIiiiiI11I = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( IiIiiiiI11I ) )
if 34 - 34: oO0o * II111iiii . II111iiii - OOooOOo % O0 - OoooooooOO
if 33 - 33: iIii1I11I1II1 * iII111i / OoooooooOO - oO0o * Ii1I
def encode ( self ) :
oOoOo00oo = socket . htonl ( 0x90000000 )
i1II1IiiIi = struct . pack ( "II" , oOoOo00oo , 0 )
i1II1IiiIi += struct . pack ( "Q" , self . nonce )
i1II1IiiIi += json . dumps ( self . packet_json )
return ( i1II1IiiIi )
if 95 - 95: OoooooooOO % I1ii11iIi11i . I1Ii111 . IiII
if 98 - 98: OoooooooOO - OoO0O00 . oO0o - iIii1I11I1II1 * iIii1I11I1II1 % Ii1I
def decode ( self , packet ) :
oOoOo000 = "I"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( False )
oOoOo00oo = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
oOoOo00oo = socket . ntohl ( oOoOo00oo )
if ( ( oOoOo00oo & 0xff000000 ) != 0x90000000 ) : return ( False )
if 87 - 87: O0 % iII111i
if ( len ( packet ) < O0OOoooO ) : return ( False )
o0o0O00 = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
if 57 - 57: Ii1I
o0o0O00 = socket . ntohl ( o0o0O00 )
ii11IIIIi1 = o0o0O00 >> 24
IiIiiIi1i1 = ( o0o0O00 >> 16 ) & 0xff
iIiiI = ( o0o0O00 >> 8 ) & 0xff
oo00 = o0o0O00 & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( ii11IIIIi1 , IiIiiIi1i1 , iIiiI , oo00 )
self . local_port = str ( oOoOo00oo & 0xffff )
if 57 - 57: i1IIi / I11i + OoO0O00 * OOooOOo + OoooooooOO
oOoOo000 = "Q"
O0OOoooO = struct . calcsize ( oOoOo000 )
if ( len ( packet ) < O0OOoooO ) : return ( False )
self . nonce = struct . unpack ( oOoOo000 , packet [ : O0OOoooO ] ) [ 0 ]
packet = packet [ O0OOoooO : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 30 - 30: I1Ii111 . IiII . iIii1I11I1II1 % o0oOOo0O0Ooo + iIii1I11I1II1
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 83 - 83: I1IiiI % OoOoOO00 - o0oOOo0O0Ooo
return ( True )
if 85 - 85: OoO0O00 * I1IiiI - I1Ii111 . ooOoO0o * II111iiii
if 76 - 76: OoO0O00 * IiII * oO0o * OoOoOO00
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 67 - 67: OoooooooOO - I1ii11iIi11i - II111iiii
if 26 - 26: ooOoO0o - i1IIi / OOooOOo + OoOoOO00 / iII111i
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
OoOOo , IIiII = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( OoOOo == None ) :
OoOOo , IIiII = rts_rloc . split ( ":" )
IIiII = int ( IIiII )
lprint ( "Send LISP-Trace to address {}:{}" . format ( OoOOo , IIiII ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( OoOOo ,
IIiII ) )
if 27 - 27: I11i % Ii1I / iII111i . OoOoOO00
if 88 - 88: iII111i - i11iIiiIii * I1Ii111 * i11iIiiIii - O0
if ( lisp_socket == None ) :
IiIIi1I1I11Ii = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
IiIIi1I1I11Ii . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
IiIIi1I1I11Ii . sendto ( packet , ( OoOOo , IIiII ) )
IiIIi1I1I11Ii . close ( )
else :
lisp_socket . sendto ( packet , ( OoOOo , IIiII ) )
if 8 - 8: oO0o + O0
if 52 - 52: I11i * OOooOOo - OoOoOO00 % iIii1I11I1II1 . II111iiii
if 1 - 1: OOooOOo / I1IiiI / Ii1I * iII111i
def packet_length ( self ) :
I1iIIIiI = 8 ; i11I1iII = 4 + 4 + 8
return ( I1iIIIiI + i11I1iII + len ( json . dumps ( self . packet_json ) ) )
if 69 - 69: IiII + I1Ii111 - I1IiiI . iII111i . OoooooooOO
if 88 - 88: i11iIiiIii
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
iii11 = self . local_rloc + ":" + self . local_port
oOO = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ iii11 ] = oOO
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( iii11 , oOO ) )
if 54 - 54: OOooOOo % oO0o * Ii1I / I1IiiI
if 46 - 46: o0oOOo0O0Ooo . ooOoO0o / Ii1I
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
iii11 = local_rloc_and_port
try : oOO = lisp_rtr_nat_trace_cache [ iii11 ]
except : oOO = ( None , None )
return ( oOO )
if 97 - 97: Ii1I . Oo0Ooo - O0 - I1Ii111 . i1IIi
if 47 - 47: IiII * ooOoO0o - i1IIi % OoOoOO00 * i11iIiiIii . OoooooooOO
if 84 - 84: OoOoOO00 / IiII - i1IIi - I1IiiI * OOooOOo
if 35 - 35: II111iiii
if 28 - 28: I1Ii111 + IiII + I1ii11iIi11i . Ii1I
if 82 - 82: ooOoO0o - ooOoO0o . Ii1I . i11iIiiIii % Ii1I + OOooOOo
if 33 - 33: Oo0Ooo - OOooOOo / OoOoOO00 % II111iiii % OOooOOo + I1Ii111
if 41 - 41: I11i + Oo0Ooo . Oo0Ooo / iII111i . OoOoOO00
if 1 - 1: ooOoO0o + iII111i % i11iIiiIii / OoOoOO00
if 98 - 98: IiII
if 75 - 75: OoooooooOO % IiII + Ii1I - i1IIi / OoooooooOO
def lisp_get_map_server ( address ) :
for oooO0OOo0O0O0 in lisp_map_servers_list . values ( ) :
if ( oooO0OOo0O0O0 . map_server . is_exact_match ( address ) ) : return ( oooO0OOo0O0O0 )
if 57 - 57: iII111i
return ( None )
if 18 - 18: II111iiii % i11iIiiIii + I11i - OOooOOo
if 100 - 100: o0oOOo0O0Ooo / Ii1I - iIii1I11I1II1 / oO0o
if 68 - 68: I11i / II111iiii * oO0o . II111iiii * OOooOOo
if 78 - 78: I11i * OoO0O00 / II111iiii
if 86 - 86: I1Ii111 % II111iiii
if 90 - 90: OoO0O00 / I11i - Oo0Ooo
if 76 - 76: O0 + OoO0O00 / ooOoO0o . II111iiii * iIii1I11I1II1 . I1Ii111
def lisp_get_any_map_server ( ) :
for oooO0OOo0O0O0 in lisp_map_servers_list . values ( ) : return ( oooO0OOo0O0O0 )
return ( None )
if 43 - 43: Oo0Ooo + o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i / iIii1I11I1II1 . I1ii11iIi11i
if 59 - 59: IiII . OoO0O00 - OoooooooOO . O0
if 33 - 33: Ii1I
if 95 - 95: OoooooooOO + OoO0O00 * ooOoO0o
if 40 - 40: I1IiiI / OOooOOo * Ii1I
if 98 - 98: I1IiiI
if 4 - 4: I1IiiI % O0 / Oo0Ooo / O0
if 90 - 90: ooOoO0o - O0 . IiII - O0 . iIii1I11I1II1
if 42 - 42: I1ii11iIi11i
if 51 - 51: iII111i % i11iIiiIii . OoO0O00 . IiII - OoOoOO00 * i1IIi
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
o0o0O00 = address . print_address ( )
oOO0O000OOo0 = None
for iii11 in lisp_map_resolvers_list :
if ( iii11 . find ( o0o0O00 ) == - 1 ) : continue
oOO0O000OOo0 = lisp_map_resolvers_list [ iii11 ]
if 14 - 14: I1ii11iIi11i . OoO0O00
return ( oOO0O000OOo0 )
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
if 29 - 29: O0 + iII111i
if 4 - 4: I11i * I11i - Ii1I * oO0o . I1ii11iIi11i % o0oOOo0O0Ooo
if 33 - 33: Ii1I * i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
if 76 - 76: OoooooooOO - O0
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
if ( eid == "" ) :
iiiIIi1Iii = ""
elif ( eid == None ) :
iiiIIi1Iii = "all"
else :
I111I = lisp_db_for_lookups . lookup_cache ( eid , False )
iiiIIi1Iii = "all" if I111I == None else I111I . use_mr_name
if 39 - 39: iII111i - I1ii11iIi11i % ooOoO0o - OoOoOO00 + OoOoOO00
if 97 - 97: I11i * I1Ii111 * oO0o
IiI1IOOO0 = None
for oOO0O000OOo0 in lisp_map_resolvers_list . values ( ) :
if ( iiiIIi1Iii == "" ) : return ( oOO0O000OOo0 )
if ( oOO0O000OOo0 . mr_name != iiiIIi1Iii ) : continue
if ( IiI1IOOO0 == None or oOO0O000OOo0 . last_used < IiI1IOOO0 . last_used ) : IiI1IOOO0 = oOO0O000OOo0
if 20 - 20: I1ii11iIi11i . IiII
return ( IiI1IOOO0 )
if 98 - 98: I11i + Oo0Ooo . IiII / iII111i % OoooooooOO
if 35 - 35: O0 . Oo0Ooo / Oo0Ooo / Ii1I / i1IIi * I11i
if 93 - 93: O0 + IiII
if 91 - 91: iIii1I11I1II1
if 66 - 66: i1IIi . ooOoO0o
if 84 - 84: O0 % ooOoO0o / I1Ii111
if 75 - 75: I11i - iII111i . O0
if 52 - 52: I1ii11iIi11i
def lisp_get_decent_map_resolver ( eid ) :
iI11I = lisp_get_decent_index ( eid )
IIiiiIiI = str ( iI11I ) + "." + lisp_decent_dns_suffix
if 65 - 65: oO0o
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( IIiiiIiI , False ) , eid . print_prefix ( ) ) )
if 57 - 57: I1Ii111 + IiII . o0oOOo0O0Ooo % OoO0O00 - I11i * oO0o
if 55 - 55: I1IiiI / ooOoO0o
IiI1IOOO0 = None
for oOO0O000OOo0 in lisp_map_resolvers_list . values ( ) :
if ( IIiiiIiI != oOO0O000OOo0 . dns_name ) : continue
if ( IiI1IOOO0 == None or oOO0O000OOo0 . last_used < IiI1IOOO0 . last_used ) : IiI1IOOO0 = oOO0O000OOo0
if 81 - 81: ooOoO0o + I1Ii111 / I1ii11iIi11i - o0oOOo0O0Ooo + OoOoOO00 * OOooOOo
return ( IiI1IOOO0 )
if 83 - 83: OoO0O00 . O0 + II111iiii
if 42 - 42: OOooOOo * I1Ii111
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
if 91 - 91: iII111i . OoooooooOO
if 90 - 90: i11iIiiIii - I1IiiI
if 39 - 39: iII111i % OoooooooOO % Ii1I % I1IiiI
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
def lisp_ipv4_input ( packet ) :
if 79 - 79: II111iiii - II111iiii + OoOoOO00 / iII111i % OoooooooOO - OoO0O00
if 22 - 22: o0oOOo0O0Ooo + I1Ii111 . Oo0Ooo
if 84 - 84: O0 + I1IiiI % Oo0Ooo + OOooOOo
if 94 - 94: OOooOOo
oOOoo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( oOOoo0 == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
oOOoo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( oOOoo0 != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( None )
if 81 - 81: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii / OOooOOo / iII111i
if 34 - 34: i11iIiiIii - o0oOOo0O0Ooo * OoooooooOO * I1ii11iIi11i * Oo0Ooo % I1ii11iIi11i
if 31 - 31: I11i . o0oOOo0O0Ooo
if 82 - 82: I11i - Oo0Ooo
if 77 - 77: I1IiiI + OoO0O00 % iIii1I11I1II1 - OOooOOo
if 80 - 80: oO0o % I1ii11iIi11i * I1Ii111 + i1IIi
if 79 - 79: oO0o + IiII
iiI = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( iiI == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( None )
elif ( iiI == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 4 - 4: iII111i + OoooooooOO / I1Ii111
return ( None )
if 57 - 57: I1IiiI . iIii1I11I1II1 % iII111i * iII111i / I1Ii111
if 30 - 30: O0 / I11i % OoOoOO00 * I1Ii111 / O0 % ooOoO0o
iiI -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , iiI ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( packet )
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
if 97 - 97: i11iIiiIii / O0 % OoO0O00
if 88 - 88: i1IIi . I1IiiI
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
if 51 - 51: oO0o + Ii1I * Ii1I * I1ii11iIi11i % I11i - I1ii11iIi11i
if 15 - 15: i1IIi / OoO0O00 - Oo0Ooo
def lisp_ipv6_input ( packet ) :
iI111I1 = packet . inner_dest
packet = packet . packet
if 74 - 74: o0oOOo0O0Ooo % Ii1I - II111iiii / ooOoO0o
if 84 - 84: I1IiiI + OOooOOo
if 80 - 80: OOooOOo / OoOoOO00
if 93 - 93: OOooOOo
if 82 - 82: iIii1I11I1II1 + OoO0O00 / iIii1I11I1II1 . iIii1I11I1II1
iiI = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( iiI == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( iiI == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 36 - 36: iII111i % I1ii11iIi11i + OoOoOO00 - i11iIiiIii % II111iiii % I11i
return ( None )
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
if 49 - 49: OoOoOO00 - O0 % I11i - ooOoO0o * OOooOOo
if 58 - 58: OoooooooOO - OOooOOo * oO0o / Ii1I . IiII
if ( iI111I1 . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 50 - 50: IiII . OOooOOo + I1ii11iIi11i - OoooooooOO
if 2 - 2: o0oOOo0O0Ooo % ooOoO0o / O0 / i11iIiiIii
iiI -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , iiI ) + packet [ 8 : : ]
return ( packet )
if 91 - 91: II111iiii * o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % Oo0Ooo * OoOoOO00 % IiII
if 93 - 93: I11i * iIii1I11I1II1 * oO0o
if 74 - 74: I1IiiI
if 39 - 39: iII111i * IiII / iII111i * IiII % I1ii11iIi11i
if 27 - 27: iIii1I11I1II1 . ooOoO0o
if 74 - 74: i1IIi % OoOoOO00
if 98 - 98: IiII * OOooOOo / O0 - I1Ii111 . I1Ii111 + OOooOOo
def lisp_mac_input ( packet ) :
return ( packet )
if 61 - 61: iII111i * Ii1I % Ii1I + I1IiiI
if 23 - 23: oO0o + I1Ii111 / OoooooooOO / O0 + IiII
if 80 - 80: i11iIiiIii - OoooooooOO + II111iiii / i1IIi - oO0o
if 100 - 100: Ii1I
if 73 - 73: IiII - O0
if 54 - 54: OOooOOo
if 28 - 28: i1IIi - Oo0Ooo * OoO0O00 + OoooooooOO - Ii1I * i11iIiiIii
if 71 - 71: iII111i - OOooOOo / iIii1I11I1II1 % i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo
def lisp_rate_limit_map_request ( source , dest ) :
if ( lisp_last_map_request_sent == None ) : return ( False )
i111I1I1i = lisp_get_timestamp ( )
i11IiIIi11I = i111I1I1i - lisp_last_map_request_sent
Iii1ii11iiii1 = ( i11IiIIi11I < LISP_MAP_REQUEST_RATE_LIMIT )
if 87 - 87: O0 - i1IIi . I11i / Ii1I % iIii1I11I1II1
if ( Iii1ii11iiii1 ) :
if ( source != None ) : source = source . print_address ( )
dest = dest . print_address ( )
dprint ( "Rate-limiting Map-Request for {} -> {}" . format ( source , dest ) )
if 57 - 57: I11i . IiII / iIii1I11I1II1 - ooOoO0o
return ( Iii1ii11iiii1 )
if 50 - 50: O0 / II111iiii
if 94 - 94: O0 + O0 % I1ii11iIi11i % i1IIi
if 15 - 15: I1IiiI
if 48 - 48: Ii1I * IiII % O0 - II111iiii
if 66 - 66: iIii1I11I1II1 / OOooOOo
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
if 82 - 82: iIii1I11I1II1 * iII111i + iIii1I11I1II1 / OoO0O00 + O0
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ) :
global lisp_last_map_request_sent
if 67 - 67: I1Ii111
if 94 - 94: I1Ii111 % iIii1I11I1II1 - II111iiii . ooOoO0o + i11iIiiIii - i11iIiiIii
if 55 - 55: OoooooooOO % iIii1I11I1II1 % I1ii11iIi11i % i1IIi
if 46 - 46: I11i - ooOoO0o . I1IiiI
if 36 - 36: I11i + OoO0O00 * O0 * OoOoOO00 * iII111i
if 90 - 90: i11iIiiIii / i1IIi
I1ioOo0oO0O0 = O0000o0O = None
if ( rloc ) :
I1ioOo0oO0O0 = rloc . rloc
O0000o0O = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 35 - 35: i11iIiiIii
if 41 - 41: IiII
if 79 - 79: OOooOOo / Ii1I . iIii1I11I1II1 % I1IiiI
if 55 - 55: i11iIiiIii - I1IiiI . oO0o - OoooooooOO
if 44 - 44: I1Ii111
oo000ooOOo , iiI1iIIIIii1i , O0OoO0o = lisp_myrlocs
if ( oo000ooOOo == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 77 - 77: I1IiiI
if ( iiI1iIIIIii1i == None and I1ioOo0oO0O0 != None and I1ioOo0oO0O0 . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 16 - 16: I1IiiI + ooOoO0o - O0 / o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo - OoOoOO00 - II111iiii
oooO0o = lisp_map_request ( )
oooO0o . record_count = 1
oooO0o . nonce = lisp_get_control_nonce ( )
oooO0o . rloc_probe = ( I1ioOo0oO0O0 != None )
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
if 90 - 90: i1IIi + oO0o * oO0o / ooOoO0o . IiII
if 98 - 98: I11i % OoO0O00 . iII111i - o0oOOo0O0Ooo
if 92 - 92: I11i
if ( rloc ) : rloc . last_rloc_probe_nonce = oooO0o . nonce
if 34 - 34: I1IiiI % iIii1I11I1II1 . I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 / O0
OOoo = deid . is_multicast_address ( )
if ( OOoo ) :
oooO0o . target_eid = seid
oooO0o . target_group = deid
else :
oooO0o . target_eid = deid
if 98 - 98: iII111i % IiII + OoO0O00
if 23 - 23: OOooOOo
if 83 - 83: I1ii11iIi11i / O0 * II111iiii + IiII + Oo0Ooo
if 99 - 99: II111iiii + O0
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
if 88 - 88: Oo0Ooo . iII111i
if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00
if 9 - 9: OoOoOO00 % i1IIi + IiII
if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo
if ( oooO0o . rloc_probe == False ) :
I111I = lisp_get_signature_eid ( )
if ( I111I ) :
oooO0o . signature_eid . copy_address ( I111I . eid )
oooO0o . privkey_filename = "./lisp-sig.pem"
if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00
if 95 - 95: ooOoO0o
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
if 32 - 32: OoOoOO00 % i11iIiiIii
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
if ( seid == None or OOoo ) :
oooO0o . source_eid . afi = LISP_AFI_NONE
else :
oooO0o . source_eid = seid
if 44 - 44: I1Ii111 + ooOoO0o
if 15 - 15: I11i + OoO0O00 + OoOoOO00
if 100 - 100: I1Ii111
if 78 - 78: OoOoOO00
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
if 13 - 13: I1ii11iIi11i * II111iiii
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
if 53 - 53: I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
if 64 - 64: ooOoO0o
if 23 - 23: Oo0Ooo . OoO0O00
if ( I1ioOo0oO0O0 != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( I1ioOo0oO0O0 . is_private_address ( ) == False ) :
oo000ooOOo = lisp_get_any_translated_rloc ( )
if 49 - 49: oO0o % i11iIiiIii * Ii1I
if ( oo000ooOOo == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo
if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo
if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii
if 26 - 26: O0 % Oo0Ooo + ooOoO0o - Ii1I . Oo0Ooo
if 33 - 33: I1Ii111 / iII111i . I1Ii111 % II111iiii
if 52 - 52: I1ii11iIi11i
if 1 - 1: II111iiii + I1ii11iIi11i * OoOoOO00 % ooOoO0o - iII111i % OoooooooOO
if 77 - 77: iII111i + o0oOOo0O0Ooo
if ( I1ioOo0oO0O0 == None or I1ioOo0oO0O0 . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and I1ioOo0oO0O0 == None ) :
oo0000O0 = lisp_get_any_translated_rloc ( )
if ( oo0000O0 != None ) : oo000ooOOo = oo0000O0
if 91 - 91: I1IiiI - I1Ii111 % O0 / I11i . Oo0Ooo / Ii1I
oooO0o . itr_rlocs . append ( oo000ooOOo )
if 71 - 71: o0oOOo0O0Ooo + Oo0Ooo % OoO0O00 - i11iIiiIii + iIii1I11I1II1
if ( I1ioOo0oO0O0 == None or I1ioOo0oO0O0 . is_ipv6 ( ) ) :
if ( iiI1iIIIIii1i == None or iiI1iIIIIii1i . is_ipv6_link_local ( ) ) :
iiI1iIIIIii1i = None
else :
oooO0o . itr_rloc_count = 1 if ( I1ioOo0oO0O0 == None ) else 0
oooO0o . itr_rlocs . append ( iiI1iIIIIii1i )
if 52 - 52: OoooooooOO
if 44 - 44: O0 / OoooooooOO + ooOoO0o * I1ii11iIi11i
if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0
if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO
if 86 - 86: Oo0Ooo / OoO0O00
if 78 - 78: I1IiiI * I1IiiI
if 13 - 13: oO0o
if 43 - 43: oO0o / Ii1I % OOooOOo
if 45 - 45: II111iiii
if ( I1ioOo0oO0O0 != None and oooO0o . itr_rlocs != [ ] ) :
Ii1 = oooO0o . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
Ii1 = oo000ooOOo
elif ( deid . is_ipv6 ( ) ) :
Ii1 = iiI1iIIIIii1i
else :
Ii1 = oo000ooOOo
if 41 - 41: Ii1I / OOooOOo * Oo0Ooo . O0 - i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo + I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi
if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o
if 43 - 43: OOooOOo . O0
if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii
if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o
i1II1IiiIi = oooO0o . encode ( I1ioOo0oO0O0 , O0000o0O )
oooO0o . print_map_request ( )
if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i
if 74 - 74: O0 . i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
if 24 - 24: ooOoO0o % I1Ii111 + OoO0O00 * o0oOOo0O0Ooo % O0 - i11iIiiIii
if 49 - 49: o0oOOo0O0Ooo / OoOoOO00 + iII111i
if 85 - 85: I1IiiI - o0oOOo0O0Ooo
if 86 - 86: II111iiii + Ii1I * Ii1I
if ( I1ioOo0oO0O0 != None ) :
if ( rloc . is_rloc_translated ( ) ) :
Iii111I = lisp_get_nat_info ( I1ioOo0oO0O0 , rloc . rloc_name )
if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii
if 86 - 86: Ii1I
if 69 - 69: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 1 - 1: Ii1I
if ( Iii111I == None ) :
iIIIIIi11Ii = rloc . rloc . print_address_no_iid ( )
O0ooO0oOO = "gleaned-{}" . format ( iIIIIIi11Ii )
OoOoO = rloc . translated_port
Iii111I = lisp_nat_info ( iIIIIIi11Ii , O0ooO0oOO , OoOoO )
if 43 - 43: o0oOOo0O0Ooo
lisp_encapsulate_rloc_probe ( lisp_sockets , I1ioOo0oO0O0 , Iii111I ,
i1II1IiiIi )
return
if 78 - 78: I1Ii111 % i1IIi * I11i
if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi
I1iiIiiii1111 = I1ioOo0oO0O0 . print_address_no_iid ( )
iI111I1 = lisp_convert_4to6 ( I1iiIiiii1111 )
lisp_send ( lisp_sockets , iI111I1 , LISP_CTRL_PORT , i1II1IiiIi )
return
if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi
if 77 - 77: I11i . IiII / OoO0O00 / I1Ii111
if 8 - 8: o0oOOo0O0Ooo + iII111i / OoO0O00 * ooOoO0o - oO0o . iII111i
if 32 - 32: OoooooooOO . I1Ii111 - I1ii11iIi11i
if 29 - 29: OoO0O00
if 33 - 33: I1ii11iIi11i - O0
oOO000o00O0o = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
oOO0O000OOo0 = lisp_get_decent_map_resolver ( deid )
else :
oOO0O000OOo0 = lisp_get_map_resolver ( None , oOO000o00O0o )
if 11 - 11: OoOoOO00 - I1Ii111 / OOooOOo
if ( oOO0O000OOo0 == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 12 - 12: IiII + OoO0O00
return
if 18 - 18: I1Ii111 / OoooooooOO
oOO0O000OOo0 . last_used = lisp_get_timestamp ( )
oOO0O000OOo0 . map_requests_sent += 1
if ( oOO0O000OOo0 . last_nonce == 0 ) : oOO0O000OOo0 . last_nonce = oooO0o . nonce
if 77 - 77: oO0o % I11i + i1IIi + Oo0Ooo + I1Ii111 + OoO0O00
if 78 - 78: O0 . oO0o
if 72 - 72: O0 - IiII
if 49 - 49: IiII - OOooOOo * OOooOOo . O0
if ( seid == None ) : seid = Ii1
lisp_send_ecm ( lisp_sockets , i1II1IiiIi , seid , lisp_ephem_port , deid ,
oOO0O000OOo0 . map_resolver )
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
if 100 - 100: OoOoOO00
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 97 - 97: OoooooooOO
if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00
if 35 - 35: iII111i % OoO0O00 * O0
if 37 - 37: OOooOOo
oOO0O000OOo0 . resolve_dns_name ( )
return
if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o
if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0
if 75 - 75: OoooooooOO
if 24 - 24: I1Ii111 % i11iIiiIii % oO0o . OOooOOo % IiII
if 23 - 23: o0oOOo0O0Ooo * II111iiii - Oo0Ooo - I1IiiI
if 86 - 86: I1IiiI - II111iiii * II111iiii * oO0o % OoooooooOO * OoOoOO00
if 93 - 93: I1IiiI + OoO0O00 % O0 - ooOoO0o * i1IIi
if 60 - 60: I1IiiI
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
if 29 - 29: ooOoO0o . II111iiii . i1IIi % oO0o
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
if 17 - 17: OOooOOo / i11iIiiIii - i11iIiiIii . II111iiii . ooOoO0o
IIiiiIiIII = lisp_info ( )
IIiiiIiIII . nonce = lisp_get_control_nonce ( )
if ( device_name ) : IIiiiIiIII . hostname += "-" + device_name
if 36 - 36: oO0o + Ii1I - O0
I1iiIiiii1111 = dest . print_address_no_iid ( )
if 19 - 19: O0 + I1Ii111 . I1Ii111 * IiII * ooOoO0o + i1IIi
if 51 - 51: ooOoO0o % OoOoOO00 % i1IIi / O0
if 11 - 11: OOooOOo . I1ii11iIi11i * OOooOOo * OoO0O00
if 11 - 11: I11i
if 85 - 85: OoOoOO00 - Ii1I / Oo0Ooo % I1ii11iIi11i
if 12 - 12: i1IIi + o0oOOo0O0Ooo / oO0o . O0
if 37 - 37: IiII
if 99 - 99: i11iIiiIii % i11iIiiIii . I11i * I1ii11iIi11i . OoO0O00 / I1IiiI
if 44 - 44: iII111i - OoO0O00 / i11iIiiIii
if 55 - 55: O0 * OoO0O00 * i1IIi
if 9 - 9: IiII
if 64 - 64: ooOoO0o + OoooooooOO
if 99 - 99: iIii1I11I1II1 * II111iiii * i11iIiiIii
if 10 - 10: OOooOOo
if 75 - 75: I11i * ooOoO0o * Oo0Ooo . i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
Oo0O00o = False
if ( device_name ) :
o0OoOoOO0 = lisp_get_host_route_next_hop ( I1iiIiiii1111 )
if 25 - 25: OoO0O00 % IiII * iIii1I11I1II1 - oO0o / o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 73 - 73: OoOoOO00 . OoooooooOO
if 14 - 14: i1IIi - OOooOOo / I1IiiI + Ii1I
if 89 - 89: I1IiiI + i11iIiiIii % I1Ii111
if 69 - 69: OoooooooOO + I1Ii111 * OoO0O00
if 84 - 84: OoOoOO00
if 80 - 80: oO0o
if 59 - 59: iIii1I11I1II1 / IiII % I1ii11iIi11i + OoO0O00 - I11i % OOooOOo
if 92 - 92: iII111i
if ( port == LISP_CTRL_PORT and o0OoOoOO0 != None ) :
while ( True ) :
time . sleep ( .01 )
o0OoOoOO0 = lisp_get_host_route_next_hop ( I1iiIiiii1111 )
if ( o0OoOoOO0 == None ) : break
if 96 - 96: OoOoOO00 / OoOoOO00 / OoOoOO00 + OoooooooOO + Oo0Ooo
if 91 - 91: OoOoOO00 + II111iiii / I11i * iIii1I11I1II1
if 92 - 92: I1Ii111 - IiII / IiII
iiiiI1I11iI1 = lisp_get_default_route_next_hops ( )
for O0OoO0o , i11i1i in iiiiI1I11iI1 :
if ( O0OoO0o != device_name ) : continue
if 51 - 51: ooOoO0o / OoOoOO00 % OOooOOo * i11iIiiIii
if 21 - 21: I1ii11iIi11i / I1ii11iIi11i % iII111i . Oo0Ooo * Oo0Ooo . i11iIiiIii
if 73 - 73: i1IIi - i11iIiiIii - Ii1I % oO0o
if 99 - 99: ooOoO0o % I1IiiI
if 11 - 11: OoO0O00 - I1Ii111 . Ii1I + OoooooooOO
if 98 - 98: OOooOOo . ooOoO0o . OoOoOO00 - I1Ii111 . i1IIi - iIii1I11I1II1
if ( o0OoOoOO0 != i11i1i ) :
if ( o0OoOoOO0 != None ) :
lisp_install_host_route ( I1iiIiiii1111 , o0OoOoOO0 , False )
if 89 - 89: II111iiii * I1ii11iIi11i - I1IiiI
lisp_install_host_route ( I1iiIiiii1111 , i11i1i , True )
Oo0O00o = True
if 58 - 58: Ii1I / Oo0Ooo % IiII
break
if 33 - 33: II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % i11iIiiIii
if 60 - 60: iII111i . o0oOOo0O0Ooo
if 56 - 56: I1ii11iIi11i
if 89 - 89: Oo0Ooo + I1ii11iIi11i * o0oOOo0O0Ooo * oO0o % O0 % OoO0O00
if 70 - 70: o0oOOo0O0Ooo + O0 % I1IiiI
if 56 - 56: Ii1I
i1II1IiiIi = IIiiiIiIII . encode ( )
IIiiiIiIII . print_info ( )
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
if 30 - 30: OoO0O00 + OoooooooOO
if 98 - 98: I1ii11iIi11i % I1IiiI
II11o0ooOOo0OoO = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
II11o0ooOOo0OoO = bold ( II11o0ooOOo0OoO , False )
OoOoO = bold ( "{}" . format ( port ) , False )
oOO0oo = red ( I1iiIiiii1111 , False )
oOOoOO = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( oOOoOO , oOO0oo , OoOoO , II11o0ooOOo0OoO ) )
if 63 - 63: I11i
if 32 - 32: Ii1I . I1ii11iIi11i + OoooooooOO - OoooooooOO + i1IIi
if 42 - 42: i1IIi
if 33 - 33: iIii1I11I1II1 * i11iIiiIii
if 7 - 7: oO0o
if 89 - 89: i11iIiiIii / o0oOOo0O0Ooo / I1ii11iIi11i % iII111i . OoooooooOO - iIii1I11I1II1
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , i1II1IiiIi )
else :
iIiI1I1II1 = lisp_data_header ( )
iIiI1I1II1 . instance_id ( 0xffffff )
iIiI1I1II1 = iIiI1I1II1 . encode ( )
if ( iIiI1I1II1 ) :
i1II1IiiIi = iIiI1I1II1 + i1II1IiiIi
if 63 - 63: Ii1I % I1Ii111 + O0 * OoO0O00 . oO0o
if 34 - 34: I1IiiI . I1ii11iIi11i . O0 - OoOoOO00 - i11iIiiIii / iII111i
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
if 13 - 13: Ii1I - OoOoOO00 . Ii1I
if 7 - 7: Ii1I - I11i / I1ii11iIi11i + iII111i
if 47 - 47: I11i * IiII / oO0o - OoooooooOO . OoooooooOO / I11i
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , i1II1IiiIi )
if 73 - 73: Ii1I . IiII % IiII
if 56 - 56: I1Ii111 + iII111i + iII111i
if 99 - 99: o0oOOo0O0Ooo % I1ii11iIi11i / Oo0Ooo . O0 + OoO0O00 * OoOoOO00
if 48 - 48: iIii1I11I1II1 + O0 * I11i * i11iIiiIii . Ii1I / i1IIi
if 48 - 48: i1IIi % iIii1I11I1II1 + I1IiiI - OoOoOO00 % I11i . I1Ii111
if 66 - 66: I1Ii111 * i11iIiiIii + I1IiiI % II111iiii
if 47 - 47: II111iiii % o0oOOo0O0Ooo
if ( Oo0O00o ) :
lisp_install_host_route ( I1iiIiiii1111 , None , False )
if ( o0OoOoOO0 != None ) : lisp_install_host_route ( I1iiIiiii1111 , o0OoOoOO0 , True )
if 26 - 26: I1ii11iIi11i / I11i / Oo0Ooo / i1IIi + O0 * ooOoO0o
return
if 53 - 53: IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if 64 - 64: ooOoO0o
if 28 - 28: i11iIiiIii - IiII * I1ii11iIi11i + IiII * iII111i
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 75 - 75: o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + OOooOOo . II111iiii
if 12 - 12: ooOoO0o
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
IIiiiIiIII = lisp_info ( )
packet = IIiiiIiIII . decode ( packet )
if ( packet == None ) : return
IIiiiIiIII . print_info ( )
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
if 78 - 78: i1IIi
if 25 - 25: Ii1I * II111iiii / OoOoOO00
if 86 - 86: i1IIi + I1IiiI + I1Ii111 % II111iiii . IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
IIiiiIiIII . info_reply = True
IIiiiIiIII . global_etr_rloc . store_address ( addr_str )
IIiiiIiIII . etr_port = sport
if 49 - 49: OOooOOo % I11i - OOooOOo + Ii1I . I1ii11iIi11i + ooOoO0o
if 15 - 15: i11iIiiIii
if 85 - 85: I1Ii111 + iII111i - oO0o
if 59 - 59: IiII . oO0o / i11iIiiIii . I1Ii111
if 64 - 64: OoOoOO00
if ( IIiiiIiIII . hostname != None ) :
IIiiiIiIII . private_etr_rloc . afi = LISP_AFI_NAME
IIiiiIiIII . private_etr_rloc . store_address ( IIiiiIiIII . hostname )
if 20 - 20: OoOoOO00 / O0 * OOooOOo % I11i + OoO0O00 + o0oOOo0O0Ooo
if 51 - 51: Ii1I - OoOoOO00 / i11iIiiIii + O0
if ( rtr_list != None ) : IIiiiIiIII . rtr_list = rtr_list
packet = IIiiiIiIII . encode ( )
IIiiiIiIII . print_info ( )
if 71 - 71: ooOoO0o
if 35 - 35: OoOoOO00
if 55 - 55: iII111i - o0oOOo0O0Ooo + IiII * II111iiii
if 6 - 6: I1Ii111 / i1IIi / IiII . o0oOOo0O0Ooo
if 69 - 69: ooOoO0o - OoOoOO00 . I1IiiI . I11i + OoOoOO00 / i11iIiiIii
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
iI111I1 = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , iI111I1 , sport , packet )
if 20 - 20: OoO0O00 . OoooooooOO - ooOoO0o . I11i / Oo0Ooo
if 89 - 89: iIii1I11I1II1 . ooOoO0o
if 82 - 82: OoOoOO00 - II111iiii . OoO0O00 * ooOoO0o
if 78 - 78: OoOoOO00 % oO0o
if 39 - 39: iIii1I11I1II1
Ooo000o = lisp_info_source ( IIiiiIiIII . hostname , addr_str , sport )
Ooo000o . cache_address_for_info_source ( )
return
if 95 - 95: OoooooooOO + OOooOOo + II111iiii + IiII + OoO0O00
if 86 - 86: II111iiii / iII111i - I1ii11iIi11i
if 65 - 65: I1ii11iIi11i + OoOoOO00
if 43 - 43: O0 + I11i % II111iiii
if 56 - 56: IiII + Oo0Ooo . IiII % iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 70 - 70: ooOoO0o / i1IIi - I11i - i11iIiiIii
if 79 - 79: OoO0O00 - OoooooooOO % iII111i . O0
if 93 - 93: I1Ii111
def lisp_get_signature_eid ( ) :
for I111I in lisp_db_list :
if ( I111I . signature_eid ) : return ( I111I )
if 3 - 3: OoO0O00 / IiII - oO0o / oO0o
return ( None )
if 50 - 50: II111iiii + OoOoOO00
if 17 - 17: ooOoO0o + I1ii11iIi11i
if 34 - 34: Ii1I / II111iiii + OoOoOO00 . II111iiii + OoooooooOO * o0oOOo0O0Ooo
if 48 - 48: O0
if 99 - 99: II111iiii * oO0o / I1ii11iIi11i - i1IIi
if 84 - 84: i11iIiiIii . OoooooooOO
if 69 - 69: I1Ii111 * II111iiii % I1Ii111 * i11iIiiIii . ooOoO0o / Oo0Ooo
if 5 - 5: Ii1I
def lisp_get_any_translated_port ( ) :
for I111I in lisp_db_list :
for IiI1I1iii11 in I111I . rloc_set :
if ( IiI1I1iii11 . translated_rloc . is_null ( ) ) : continue
return ( IiI1I1iii11 . translated_port )
if 19 - 19: oO0o
if 61 - 61: OoOoOO00 + iIii1I11I1II1 / I1ii11iIi11i - i1IIi
return ( None )
if 11 - 11: oO0o * o0oOOo0O0Ooo . I1IiiI
if 12 - 12: I1IiiI % OoO0O00 / I1Ii111 / O0 % o0oOOo0O0Ooo
if 1 - 1: OoOoOO00 / I11i
if 43 - 43: o0oOOo0O0Ooo - i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
if 69 - 69: i11iIiiIii - iIii1I11I1II1
if 40 - 40: I1IiiI / oO0o + ooOoO0o
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
def lisp_get_any_translated_rloc ( ) :
for I111I in lisp_db_list :
for IiI1I1iii11 in I111I . rloc_set :
if ( IiI1I1iii11 . translated_rloc . is_null ( ) ) : continue
return ( IiI1I1iii11 . translated_rloc )
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
if 16 - 16: I11i % O0
return ( None )
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
if 15 - 15: I1Ii111
if 64 - 64: OOooOOo * Oo0Ooo
if 96 - 96: Oo0Ooo / I1ii11iIi11i * iIii1I11I1II1 / iII111i
if 18 - 18: I1Ii111
if 29 - 29: i1IIi - I1IiiI / i1IIi
if 64 - 64: IiII
def lisp_get_all_translated_rlocs ( ) :
oOo = [ ]
for I111I in lisp_db_list :
for IiI1I1iii11 in I111I . rloc_set :
if ( IiI1I1iii11 . is_rloc_translated ( ) == False ) : continue
o0o0O00 = IiI1I1iii11 . translated_rloc . print_address_no_iid ( )
oOo . append ( o0o0O00 )
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
return ( oOo )
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
if 97 - 97: iIii1I11I1II1 * I1Ii111
if 39 - 39: I1Ii111 . II111iiii
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
oO0OO0oOo00 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 94 - 94: OoO0O00 - OoO0O00 + iIii1I11I1II1 + O0 * oO0o
I1iIiI1IiIIi = { }
for OoOOo in rtr_list :
if ( OoOOo == None ) : continue
o0o0O00 = rtr_list [ OoOOo ]
if ( oO0OO0oOo00 and o0o0O00 . is_private_address ( ) ) : continue
I1iIiI1IiIIi [ OoOOo ] = o0o0O00
if 78 - 78: II111iiii / i1IIi . OOooOOo / OOooOOo . OOooOOo
rtr_list = I1iIiI1IiIIi
if 14 - 14: I1Ii111
I1I1i1II1I1Ii = [ ]
for iioOO in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( iioOO == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 76 - 76: oO0o
if 42 - 42: OoO0O00 * i1IIi
if 60 - 60: I1IiiI * I1Ii111 + oO0o - Ii1I
if 58 - 58: i11iIiiIii . o0oOOo0O0Ooo - i1IIi - I1IiiI * i1IIi % I1Ii111
if 37 - 37: I11i
OoooO00OO0OO = lisp_address ( iioOO , "" , 0 , iid )
OoooO00OO0OO . make_default_route ( OoooO00OO0OO )
IIII = lisp_map_cache . lookup_cache ( OoooO00OO0OO , True )
if ( IIII ) :
if ( IIII . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( IIII . print_eid_tuple ( ) , False ) ) )
if 61 - 61: OoooooooOO % iIii1I11I1II1 % O0 % I1Ii111 / Oo0Ooo . I1IiiI
elif ( IIII . do_rloc_sets_match ( rtr_list . values ( ) ) ) :
continue
if 20 - 20: ooOoO0o - I1Ii111
IIII . delete_cache ( )
if 97 - 97: O0
if 56 - 56: Ii1I * I1IiiI * ooOoO0o
I1I1i1II1I1Ii . append ( [ OoooO00OO0OO , "" ] )
if 39 - 39: iII111i % Ii1I * iIii1I11I1II1 - Ii1I - I1Ii111
if 60 - 60: i11iIiiIii + i11iIiiIii - OoooooooOO + OoooooooOO
if 5 - 5: o0oOOo0O0Ooo
if 78 - 78: OOooOOo * O0 * II111iiii % OoOoOO00
O0oo0oo0 = lisp_address ( iioOO , "" , 0 , iid )
O0oo0oo0 . make_default_multicast_route ( O0oo0oo0 )
IIOo0Oo00o0 = lisp_map_cache . lookup_cache ( O0oo0oo0 , True )
if ( IIOo0Oo00o0 ) : IIOo0Oo00o0 = IIOo0Oo00o0 . source_cache . lookup_cache ( OoooO00OO0OO , True )
if ( IIOo0Oo00o0 ) : IIOo0Oo00o0 . delete_cache ( )
if 82 - 82: OoO0O00 + Ii1I
I1I1i1II1I1Ii . append ( [ OoooO00OO0OO , O0oo0oo0 ] )
if 3 - 3: iIii1I11I1II1 * I1ii11iIi11i * i1IIi - O0 - iII111i * O0
if ( len ( I1I1i1II1I1Ii ) == 0 ) : return
if 10 - 10: I1Ii111 . IiII * I1ii11iIi11i
if 81 - 81: i11iIiiIii + I1Ii111
if 65 - 65: OOooOOo - iII111i * I1Ii111 + i1IIi % ooOoO0o
if 6 - 6: O0 + Ii1I % II111iiii % i1IIi . iII111i / OoooooooOO
oooo0O = [ ]
for oOOoOO in rtr_list :
I1IIIIi = rtr_list [ oOOoOO ]
IiI1I1iii11 = lisp_rloc ( )
IiI1I1iii11 . rloc . copy_address ( I1IIIIi )
IiI1I1iii11 . priority = 254
IiI1I1iii11 . mpriority = 255
IiI1I1iii11 . rloc_name = "RTR"
oooo0O . append ( IiI1I1iii11 )
if 23 - 23: Ii1I
if 92 - 92: II111iiii - IiII / II111iiii
for OoooO00OO0OO in I1I1i1II1I1Ii :
IIII = lisp_mapping ( OoooO00OO0OO [ 0 ] , OoooO00OO0OO [ 1 ] , oooo0O )
IIII . mapping_source = map_resolver
IIII . map_cache_ttl = LISP_MR_TTL * 60
IIII . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( IIII . print_eid_tuple ( ) , False ) , rtr_list . keys ( ) ) )
if 23 - 23: Ii1I * II111iiii - I1ii11iIi11i
oooo0O = copy . deepcopy ( oooo0O )
if 86 - 86: ooOoO0o . OoO0O00 + I1Ii111 - I11i % i11iIiiIii / OoOoOO00
return
if 47 - 47: IiII
if 32 - 32: i1IIi / iIii1I11I1II1 / iII111i
if 11 - 11: I1ii11iIi11i - iIii1I11I1II1
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO
if 68 - 68: ooOoO0o / I1Ii111 * OoO0O00 + ooOoO0o / iIii1I11I1II1 . iII111i
if 91 - 91: OoO0O00
if 8 - 8: oO0o
if 96 - 96: IiII
if 37 - 37: Ii1I % i11iIiiIii + iIii1I11I1II1 % Oo0Ooo - iIii1I11I1II1
if 26 - 26: o0oOOo0O0Ooo . i1IIi
def lisp_process_info_reply ( source , packet , store ) :
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
IIiiiIiIII = lisp_info ( )
packet = IIiiiIiIII . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
IIiiiIiIII . print_info ( )
if 100 - 100: II111iiii / Oo0Ooo / iII111i / OOooOOo
if 100 - 100: iIii1I11I1II1
if 50 - 50: I1Ii111 / ooOoO0o * I11i
if 53 - 53: II111iiii . IiII
ii1iII111i = False
for oOOoOO in IIiiiIiIII . rtr_list :
I1iiIiiii1111 = oOOoOO . print_address_no_iid ( )
if ( lisp_rtr_list . has_key ( I1iiIiiii1111 ) ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ I1iiIiiii1111 ] != None ) : continue
if 80 - 80: IiII - i11iIiiIii % I11i
ii1iII111i = True
lisp_rtr_list [ I1iiIiiii1111 ] = oOOoOO
if 5 - 5: OoooooooOO
if 5 - 5: iII111i + oO0o % O0 . OoooooooOO + i1IIi
if 55 - 55: I1ii11iIi11i
if 34 - 34: OoO0O00 * iIii1I11I1II1 . iIii1I11I1II1
if 39 - 39: o0oOOo0O0Ooo
if ( lisp_i_am_itr and ii1iII111i ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for o0OOoOO in lisp_iid_to_interface . keys ( ) :
lisp_update_default_routes ( source , int ( o0OOoOO ) , lisp_rtr_list )
if 29 - 29: Oo0Ooo . Oo0Ooo * OoO0O00 % Ii1I - ooOoO0o
if 67 - 67: I1IiiI % O0 + I1IiiI * I1Ii111 * OoOoOO00 * II111iiii
if 79 - 79: I1IiiI
if 37 - 37: I1Ii111 + Ii1I
if 50 - 50: i11iIiiIii
if 57 - 57: O0 * i1IIi - I1IiiI
if 48 - 48: IiII / iIii1I11I1II1
if ( store == False ) :
return ( [ IIiiiIiIII . global_etr_rloc , IIiiiIiIII . etr_port , ii1iII111i ] )
if 20 - 20: oO0o / OoooooooOO
if 95 - 95: Oo0Ooo . i11iIiiIii
if 50 - 50: iII111i . i11iIiiIii - i1IIi
if 24 - 24: i11iIiiIii % iII111i . oO0o
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
if 34 - 34: I1ii11iIi11i % ooOoO0o / II111iiii * O0 % OOooOOo
for I111I in lisp_db_list :
for IiI1I1iii11 in I111I . rloc_set :
OoOOo = IiI1I1iii11 . rloc
I111IIiIII = IiI1I1iii11 . interface
if ( I111IIiIII == None ) :
if ( OoOOo . is_null ( ) ) : continue
if ( OoOOo . is_local ( ) == False ) : continue
if ( IIiiiIiIII . private_etr_rloc . is_null ( ) == False and
OoOOo . is_exact_match ( IIiiiIiIII . private_etr_rloc ) == False ) :
continue
if 9 - 9: I1ii11iIi11i / I1ii11iIi11i - OOooOOo . iIii1I11I1II1
elif ( IIiiiIiIII . private_etr_rloc . is_dist_name ( ) ) :
OO000 = IIiiiIiIII . private_etr_rloc . address
if ( OO000 != IiI1I1iii11 . rloc_name ) : continue
if 33 - 33: I1IiiI + oO0o % I1IiiI / iII111i - ooOoO0o - i11iIiiIii
if 39 - 39: i11iIiiIii / oO0o
oOoo0OooOOo00 = green ( I111I . eid . print_prefix ( ) , False )
ooOo = red ( OoOOo . print_address_no_iid ( ) , False )
if 71 - 71: I1Ii111 * iIii1I11I1II1 - I1Ii111
oo0Oi1ii = IIiiiIiIII . global_etr_rloc . is_exact_match ( OoOOo )
if ( IiI1I1iii11 . translated_port == 0 and oo0Oi1ii ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( ooOo ,
I111IIiIII , oOoo0OooOOo00 ) )
continue
if 38 - 38: I1ii11iIi11i + I1Ii111 / IiII % oO0o
if 42 - 42: ooOoO0o
if 62 - 62: OOooOOo + OoOoOO00 . iII111i
if 26 - 26: OOooOOo
if 89 - 89: i11iIiiIii . o0oOOo0O0Ooo % iIii1I11I1II1 * O0 + OOooOOo . o0oOOo0O0Ooo
iI1iI1II1i1i = IIiiiIiIII . global_etr_rloc
o00O00O0OO0O = IiI1I1iii11 . translated_rloc
if ( o00O00O0OO0O . is_exact_match ( iI1iI1II1i1i ) and
IIiiiIiIII . etr_port == IiI1I1iii11 . translated_port ) : continue
if 3 - 3: iIii1I11I1II1 + iIii1I11I1II1 + OoO0O00
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( IIiiiIiIII . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# iII111i . Ii1I . o0oOOo0O0Ooo * oO0o
IIiiiIiIII . etr_port , ooOo , I111IIiIII , oOoo0OooOOo00 ) )
if 18 - 18: iII111i % I1Ii111 / I1Ii111 % OoOoOO00 - OoOoOO00 + I1IiiI
IiI1I1iii11 . store_translated_rloc ( IIiiiIiIII . global_etr_rloc ,
IIiiiIiIII . etr_port )
if 13 - 13: oO0o - o0oOOo0O0Ooo * oO0o
if 27 - 27: OOooOOo * iII111i * I11i
return ( [ IIiiiIiIII . global_etr_rloc , IIiiiIiIII . etr_port , ii1iII111i ] )
if 65 - 65: iII111i + OoO0O00 - iIii1I11I1II1 / OoooooooOO . ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
if 77 - 77: ooOoO0o % I1IiiI
if 26 - 26: o0oOOo0O0Ooo
if 72 - 72: I1IiiI
if 90 - 90: ooOoO0o
if 67 - 67: iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
if 23 - 23: IiII
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
i1OO0o = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
O00oiI1i1iIII11 = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 67 - 67: OOooOOo - II111iiii * OoO0O00 . I1ii11iIi11i
if 9 - 9: OoO0O00 - OoO0O00 / i11iIiiIii . iII111i / I1ii11iIi11i . OoOoOO00
if 89 - 89: I11i * iIii1I11I1II1 - I1ii11iIi11i
if 82 - 82: iIii1I11I1II1
i1OO0o . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , i1OO0o , None )
i1OO0o . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , i1OO0o , None )
if 18 - 18: Ii1I . i11iIiiIii - i1IIi * OoooooooOO
if 52 - 52: Oo0Ooo + I11i - OoooooooOO + iII111i - oO0o
if 2 - 2: oO0o * OoO0O00 - IiII
if 24 - 24: O0 * OOooOOo . OoO0O00 + iII111i + i1IIi + oO0o
O00oiI1i1iIII11 . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , O00oiI1i1iIII11 , None )
O00oiI1i1iIII11 . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , O00oiI1i1iIII11 , None )
if 57 - 57: OOooOOo * OOooOOo
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii
if 72 - 72: o0oOOo0O0Ooo * I1ii11iIi11i
if 57 - 57: IiII * OOooOOo
ii1IIIIIiII1 = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
ii1IIIIIiII1 . start ( )
return
if 10 - 10: I1ii11iIi11i
if 6 - 6: OoO0O00 + OoO0O00 * OOooOOo / IiII % ooOoO0o - I1IiiI
if 17 - 17: II111iiii
if 66 - 66: O0 % OoOoOO00 + IiII % I1Ii111
if 94 - 94: OoOoOO00 / OoooooooOO % Ii1I * i11iIiiIii
if 95 - 95: iIii1I11I1II1 % OOooOOo % O0
if 93 - 93: I1ii11iIi11i
if 61 - 61: o0oOOo0O0Ooo * ooOoO0o
if 82 - 82: O0 * O0 % I1IiiI / o0oOOo0O0Ooo
if 46 - 46: IiII . O0 . I11i % I1ii11iIi11i * oO0o - oO0o
if 92 - 92: I1IiiI - I1IiiI
if 28 - 28: oO0o * iII111i + IiII
if 73 - 73: OoooooooOO
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 45 - 45: IiII + I1IiiI * I1Ii111
o0o0O00 = lisp_get_interface_address ( rloc . interface )
if ( o0o0O00 == None ) : return
if 82 - 82: OOooOOo / I11i % Ii1I * OoOoOO00
oOOo0oo0OOO = rloc . rloc . print_address_no_iid ( )
IIi1i1iI11I11 = o0o0O00 . print_address_no_iid ( )
if 23 - 23: i11iIiiIii
if ( oOOo0oo0OOO == IIi1i1iI11I11 ) : return
if 14 - 14: I1ii11iIi11i + I1IiiI % I1Ii111
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , oOOo0oo0OOO , IIi1i1iI11I11 ) )
if 48 - 48: o0oOOo0O0Ooo . OoooooooOO * Oo0Ooo
if 14 - 14: OoO0O00
rloc . rloc . copy_address ( o0o0O00 )
lisp_myrlocs [ 0 ] = o0o0O00
return
if 21 - 21: II111iiii + i11iIiiIii + I11i % I1IiiI
if 65 - 65: IiII + I1ii11iIi11i / iII111i / I1IiiI + Ii1I
if 88 - 88: IiII % iIii1I11I1II1
if 3 - 3: ooOoO0o / I1Ii111 % iIii1I11I1II1 % I11i * oO0o / iIii1I11I1II1
if 75 - 75: i11iIiiIii . iII111i
if 68 - 68: OOooOOo . I1ii11iIi11i % I1ii11iIi11i . i11iIiiIii
if 45 - 45: oO0o % I1ii11iIi11i * I1Ii111
if 21 - 21: O0 + i11iIiiIii
def lisp_update_encap_port ( mc ) :
for OoOOo in mc . rloc_set :
Iii111I = lisp_get_nat_info ( OoOOo . rloc , OoOOo . rloc_name )
if ( Iii111I == None ) : continue
if ( OoOOo . translated_port == Iii111I . port ) : continue
if 72 - 72: OoOoOO00 * OoooooooOO % O0 / I1ii11iIi11i % Ii1I - I11i
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( OoOOo . translated_port , Iii111I . port ,
# OoOoOO00 % ooOoO0o . I1Ii111 / OoO0O00
red ( OoOOo . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 21 - 21: IiII
OoOOo . store_translated_rloc ( OoOOo . rloc , Iii111I . port )
if 15 - 15: OoOoOO00 % O0 - OOooOOo - oO0o . iII111i . OoO0O00
return
if 52 - 52: II111iiii * o0oOOo0O0Ooo
if 95 - 95: I1Ii111 - OoooooooOO
if 99 - 99: OoooooooOO % IiII . I11i + OoooooooOO
if 57 - 57: Ii1I / I1IiiI * i1IIi
if 21 - 21: I11i . O0 * OoooooooOO + ooOoO0o * oO0o % i11iIiiIii
if 30 - 30: ooOoO0o * I1Ii111 + OoO0O00
if 30 - 30: Ii1I / iII111i * Ii1I
if 11 - 11: OoOoOO00 - OoOoOO00 % oO0o
if 3 - 3: I1IiiI - OoooooooOO % iIii1I11I1II1 + I1Ii111 + OoOoOO00
if 71 - 71: i1IIi % O0 % ooOoO0o
if 24 - 24: O0
if 88 - 88: OoooooooOO / Oo0Ooo / oO0o
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 99 - 99: I1Ii111 % OoOoOO00 % IiII - Ii1I
if 79 - 79: ooOoO0o + Oo0Ooo
if 80 - 80: OoOoOO00 % OoO0O00 . OoO0O00 * OoO0O00 * O0
if 18 - 18: II111iiii . o0oOOo0O0Ooo + OoO0O00
if 69 - 69: OoO0O00 . ooOoO0o * ooOoO0o * iIii1I11I1II1
if ( mc . action == LISP_NO_ACTION ) :
i111I1I1i = lisp_get_timestamp ( )
if ( mc . last_refresh_time + mc . map_cache_ttl > i111I1I1i ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 8 - 8: iII111i . oO0o . OOooOOo + iII111i . Ii1I
if 46 - 46: OoO0O00
if 21 - 21: iIii1I11I1II1 - iII111i
if 15 - 15: O0 + iII111i + i11iIiiIii
if 31 - 31: iIii1I11I1II1 * iIii1I11I1II1 . I11i
if 52 - 52: i11iIiiIii / oO0o / IiII
i11IiIIi11I = lisp_print_elapsed ( mc . last_refresh_time )
iiI11IIii1i1 = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( iiI11IIii1i1 , False ) , bold ( "timed out" , False ) , i11IiIIi11I ) )
if 84 - 84: I11i . oO0o + ooOoO0o
if 75 - 75: I1Ii111
if 97 - 97: ooOoO0o % Oo0Ooo . o0oOOo0O0Ooo
if 22 - 22: O0 % I11i + OoO0O00 - iII111i + I1IiiI . O0
if 73 - 73: ooOoO0o + O0 - I11i . I1IiiI + OOooOOo
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 36 - 36: I11i % OoO0O00 * OoOoOO00 - I1Ii111
if 16 - 16: ooOoO0o % OOooOOo . OoO0O00 % II111iiii . iIii1I11I1II1
if 21 - 21: oO0o + II111iiii / OoOoOO00 * I11i
if 90 - 90: OoOoOO00 % OoOoOO00 + I11i
if 70 - 70: I1IiiI . ooOoO0o / I11i / OoO0O00
if 40 - 40: oO0o % iIii1I11I1II1 * iIii1I11I1II1 / Oo0Ooo * OoO0O00
if 61 - 61: OOooOOo
if 80 - 80: I1ii11iIi11i
def lisp_timeout_map_cache_walk ( mc , parms ) :
iIi11ii1 = parms [ 0 ]
iI111IOoo = parms [ 1 ]
if 33 - 33: oO0o % ooOoO0o / I11i
if 2 - 2: I1ii11iIi11i
if 90 - 90: II111iiii * I1Ii111 . ooOoO0o - I1ii11iIi11i % I11i * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1
if ( mc . group . is_null ( ) ) :
o00o0OO0o , iIi11ii1 = lisp_timeout_map_cache_entry ( mc , iIi11ii1 )
if ( iIi11ii1 == [ ] or mc != iIi11ii1 [ - 1 ] ) :
iI111IOoo = lisp_write_checkpoint_entry ( iI111IOoo , mc )
if 76 - 76: i11iIiiIii % I1IiiI / I11i
return ( [ o00o0OO0o , parms ] )
if 42 - 42: o0oOOo0O0Ooo . I1IiiI + I11i . OoOoOO00 - O0 / Ii1I
if 66 - 66: IiII + OoOoOO00 + I1IiiI + i1IIi + OoooooooOO % I1IiiI
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 80 - 80: iII111i / O0 % OoooooooOO / Oo0Ooo
if 75 - 75: ooOoO0o
if 72 - 72: oO0o . OoooooooOO % ooOoO0o % OoO0O00 * oO0o * OoO0O00
if 14 - 14: I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 93 - 93: oO0o / ooOoO0o - I1Ii111
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
if 30 - 30: IiII
if 6 - 6: O0
if 92 - 92: I11i
if 76 - 76: I11i / iIii1I11I1II1 - i11iIiiIii / O0 / O0
def lisp_timeout_map_cache ( lisp_map_cache ) :
III11I1 = [ [ ] , [ ] ]
III11I1 = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , III11I1 )
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
if 42 - 42: iII111i % i11iIiiIii % o0oOOo0O0Ooo . O0 % iII111i
if 72 - 72: Oo0Ooo . Oo0Ooo . IiII . Oo0Ooo
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
iIi11ii1 = III11I1 [ 0 ]
for IIII in iIi11ii1 : IIII . delete_cache ( )
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
if 62 - 62: oO0o / Oo0Ooo / IiII + I11i * ooOoO0o
if 84 - 84: ooOoO0o + OoOoOO00 * I1ii11iIi11i % OoooooooOO . O0
if 27 - 27: OoO0O00 * OoooooooOO - II111iiii / o0oOOo0O0Ooo
iI111IOoo = III11I1 [ 1 ]
lisp_checkpoint ( iI111IOoo )
return
if 76 - 76: I11i % I1Ii111 % iII111i + IiII * iII111i + OoOoOO00
if 83 - 83: OOooOOo . ooOoO0o / IiII
if 80 - 80: I1Ii111 . I11i - I11i + I1ii11iIi11i
if 42 - 42: I11i / IiII % O0 - Oo0Ooo
if 33 - 33: I1Ii111
if 1 - 1: IiII - iIii1I11I1II1 % OoooooooOO
if 1 - 1: o0oOOo0O0Ooo - i11iIiiIii + I11i
if 47 - 47: O0 + IiII + ooOoO0o + OOooOOo / OoOoOO00
if 31 - 31: oO0o * iII111i % OoOoOO00
if 80 - 80: ooOoO0o % I1ii11iIi11i % I11i . I1Ii111
if 3 - 3: ooOoO0o - Oo0Ooo
if 2 - 2: iII111i . iII111i
if 77 - 77: OOooOOo
if 74 - 74: O0
if 86 - 86: OoOoOO00
if 4 - 4: OoooooooOO * OoO0O00
def lisp_store_nat_info ( hostname , rloc , port ) :
I1iiIiiii1111 = rloc . print_address_no_iid ( )
oOO0Oo0oO = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( I1iiIiiii1111 , False ) , port )
if 99 - 99: i11iIiiIii * IiII - OoO0O00 - OoooooooOO
Ii1I1i = lisp_nat_info ( I1iiIiiii1111 , hostname , port )
if 99 - 99: iII111i . O0 . oO0o / OoOoOO00 + oO0o
if ( lisp_nat_state_info . has_key ( hostname ) == False ) :
lisp_nat_state_info [ hostname ] = [ Ii1I1i ]
lprint ( oOO0Oo0oO . format ( "Store initial" ) )
return ( True )
if 34 - 34: ooOoO0o - I1IiiI - II111iiii - oO0o
if 29 - 29: OoO0O00 + I1IiiI - I1ii11iIi11i
if 86 - 86: Oo0Ooo / I1Ii111 / I1Ii111 - ooOoO0o / O0
if 7 - 7: II111iiii + Oo0Ooo . I1Ii111
if 44 - 44: i1IIi / I1IiiI * I11i . Oo0Ooo - iIii1I11I1II1 / IiII
if 56 - 56: Ii1I + i1IIi * oO0o
Iii111I = lisp_nat_state_info [ hostname ] [ 0 ]
if ( Iii111I . address == I1iiIiiii1111 and Iii111I . port == port ) :
Iii111I . uptime = lisp_get_timestamp ( )
lprint ( oOO0Oo0oO . format ( "Refresh existing" ) )
return ( False )
if 4 - 4: IiII - IiII . OoOoOO00 . iIii1I11I1II1
if 36 - 36: i1IIi * I11i
if 80 - 80: iIii1I11I1II1 % Ii1I . I1ii11iIi11i % iII111i - IiII % OoO0O00
if 58 - 58: IiII + Oo0Ooo - i1IIi
if 3 - 3: o0oOOo0O0Ooo * Ii1I
if 53 - 53: I1ii11iIi11i / i1IIi . OoOoOO00 % Ii1I + I1IiiI
if 25 - 25: oO0o + OoooooooOO / i1IIi + O0 % OoooooooOO . OoooooooOO
Ooo0o = None
for Iii111I in lisp_nat_state_info [ hostname ] :
if ( Iii111I . address == I1iiIiiii1111 and Iii111I . port == port ) :
Ooo0o = Iii111I
break
if 85 - 85: iIii1I11I1II1
if 72 - 72: II111iiii
if 26 - 26: Oo0Ooo
if ( Ooo0o == None ) :
lprint ( oOO0Oo0oO . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( Ooo0o )
lprint ( oOO0Oo0oO . format ( "Use previous" ) )
if 14 - 14: O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
OO000o00000oOOoO = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ Ii1I1i ] + OO000o00000oOOoO
return ( True )
if 74 - 74: OoOoOO00
if 8 - 8: IiII . IiII - ooOoO0o
if 97 - 97: O0 % I1IiiI
if 69 - 69: ooOoO0o . OoooooooOO
if 17 - 17: ooOoO0o / OoO0O00 / I1IiiI / OOooOOo % IiII
if 88 - 88: i1IIi - OoOoOO00
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
def lisp_get_nat_info ( rloc , hostname ) :
if ( lisp_nat_state_info . has_key ( hostname ) == False ) : return ( None )
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
I1iiIiiii1111 = rloc . print_address_no_iid ( )
for Iii111I in lisp_nat_state_info [ hostname ] :
if ( Iii111I . address == I1iiIiiii1111 ) : return ( Iii111I )
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
return ( None )
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
if 15 - 15: OoooooooOO . Ii1I * I1Ii111 . ooOoO0o % OoO0O00 * Oo0Ooo
if 10 - 10: iII111i + i11iIiiIii . OOooOOo % iII111i - i1IIi
if 10 - 10: iIii1I11I1II1 * i11iIiiIii - O0
if 45 - 45: oO0o % OOooOOo - IiII + o0oOOo0O0Ooo + i11iIiiIii
if 79 - 79: IiII % I1Ii111 . I1IiiI + O0 * oO0o * ooOoO0o
if 38 - 38: IiII
if 78 - 78: Oo0Ooo * I1ii11iIi11i % OOooOOo / Oo0Ooo + I1ii11iIi11i * IiII
if 2 - 2: Oo0Ooo - OoOoOO00
if 22 - 22: OoO0O00 - oO0o - O0
if 49 - 49: iIii1I11I1II1 + I1Ii111 / i11iIiiIii
if 62 - 62: ooOoO0o . I1IiiI * i11iIiiIii
if 2 - 2: i11iIiiIii
if 86 - 86: I1Ii111 + o0oOOo0O0Ooo
if 17 - 17: iIii1I11I1II1
if 32 - 32: IiII - OoOoOO00
if 88 - 88: OOooOOo - II111iiii + i1IIi * Oo0Ooo
if 48 - 48: I1Ii111 + IiII % iII111i * iII111i + I1Ii111
if 83 - 83: OoO0O00 . I11i * I1ii11iIi11i - II111iiii
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 41 - 41: OoooooooOO . OoOoOO00 * iIii1I11I1II1
if 18 - 18: IiII / I1Ii111 % i1IIi * i11iIiiIii
if 16 - 16: Oo0Ooo
if 24 - 24: o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
oO0o0Oo = [ ]
IiI1Ii11ii1 = [ ]
if ( dest == None ) :
for oOO0O000OOo0 in lisp_map_resolvers_list . values ( ) :
IiI1Ii11ii1 . append ( oOO0O000OOo0 . map_resolver )
if 48 - 48: i1IIi * iIii1I11I1II1 * IiII - i1IIi - i11iIiiIii + I11i
oO0o0Oo = IiI1Ii11ii1
if ( oO0o0Oo == [ ] ) :
for oooO0OOo0O0O0 in lisp_map_servers_list . values ( ) :
oO0o0Oo . append ( oooO0OOo0O0O0 . map_server )
if 63 - 63: I11i + ooOoO0o + oO0o / i11iIiiIii
if 51 - 51: i1IIi - o0oOOo0O0Ooo . I1Ii111 - OoO0O00
if ( oO0o0Oo == [ ] ) : return
else :
oO0o0Oo . append ( dest )
if 92 - 92: I1Ii111
if 4 - 4: OoOoOO00 % o0oOOo0O0Ooo * i11iIiiIii - OOooOOo
if 68 - 68: iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
if 45 - 45: II111iiii . iII111i
if 55 - 55: ooOoO0o / iII111i / O0
oOo = { }
for I111I in lisp_db_list :
for IiI1I1iii11 in I111I . rloc_set :
lisp_update_local_rloc ( IiI1I1iii11 )
if ( IiI1I1iii11 . rloc . is_null ( ) ) : continue
if ( IiI1I1iii11 . interface == None ) : continue
if 98 - 98: O0 % iII111i + II111iiii
o0o0O00 = IiI1I1iii11 . rloc . print_address_no_iid ( )
if ( o0o0O00 in oOo ) : continue
oOo [ o0o0O00 ] = IiI1I1iii11 . interface
if 13 - 13: I1IiiI * oO0o - o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + oO0o . oO0o / o0oOOo0O0Ooo
if ( oOo == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 77 - 77: i1IIi * o0oOOo0O0Ooo * IiII
return
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
if 36 - 36: O0
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
if 21 - 21: i1IIi * iII111i + OoO0O00
for o0o0O00 in oOo :
I111IIiIII = oOo [ o0o0O00 ]
oOO0oo = red ( o0o0O00 , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( oOO0oo ,
I111IIiIII ) )
O0OoO0o = I111IIiIII if len ( oOo ) > 1 else None
for dest in oO0o0Oo :
lisp_send_info_request ( lisp_sockets , dest , port , O0OoO0o )
if 27 - 27: I11i / oO0o . iII111i + o0oOOo0O0Ooo - OOooOOo
if 85 - 85: OoooooooOO
if 83 - 83: iII111i * I11i . OOooOOo - OoO0O00 % IiII
if 8 - 8: I1Ii111
if 86 - 86: ooOoO0o + iII111i * O0 % OoO0O00 + OoOoOO00
if 49 - 49: OOooOOo / i1IIi - II111iiii . iIii1I11I1II1 + I11i . OOooOOo
if ( IiI1Ii11ii1 != [ ] ) :
for oOO0O000OOo0 in lisp_map_resolvers_list . values ( ) :
oOO0O000OOo0 . resolve_dns_name ( )
if 9 - 9: iIii1I11I1II1 + Ii1I + I11i
if 96 - 96: OoO0O00 + i11iIiiIii + OoO0O00
return
if 7 - 7: i1IIi . I1IiiI
if 68 - 68: OoooooooOO
if 91 - 91: IiII . ooOoO0o * I11i
if 39 - 39: o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
if 63 - 63: OoOoOO00 - iII111i
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 82 - 82: iIii1I11I1II1 / OOooOOo
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
if 56 - 56: I1Ii111 / iIii1I11I1II1 % IiII * iIii1I11I1II1 . I1ii11iIi11i . OOooOOo
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 1 - 1: Ii1I . Ii1I % II111iiii + I11i + OoOoOO00
if 52 - 52: OoooooooOO - OoO0O00
if 24 - 24: iII111i / Oo0Ooo - I1ii11iIi11i + o0oOOo0O0Ooo
if 44 - 44: OoOoOO00 + I1IiiI . I1ii11iIi11i / i1IIi + II111iiii . Oo0Ooo
if ( value . find ( "." ) != - 1 ) :
o0o0O00 = value . split ( "." )
if ( len ( o0o0O00 ) != 4 ) : return ( False )
if 39 - 39: o0oOOo0O0Ooo
for oOoO0Ooo in o0o0O00 :
if ( oOoO0Ooo . isdigit ( ) == False ) : return ( False )
if ( int ( oOoO0Ooo ) > 255 ) : return ( False )
if 20 - 20: i11iIiiIii + iIii1I11I1II1 / iII111i . I1IiiI
return ( True )
if 8 - 8: O0 - iII111i - i1IIi * oO0o / II111iiii
if 48 - 48: I1ii11iIi11i . IiII * oO0o
if 92 - 92: OoOoOO00 + oO0o % Ii1I / Ii1I - iII111i
if 11 - 11: Oo0Ooo % II111iiii * Ii1I + II111iiii
if 9 - 9: I1Ii111
if ( value . find ( "-" ) != - 1 ) :
o0o0O00 = value . split ( "-" )
for Ii11 in [ "N" , "S" , "W" , "E" ] :
if ( Ii11 in o0o0O00 ) :
if ( len ( o0o0O00 ) < 8 ) : return ( False )
return ( True )
if 69 - 69: i1IIi + ooOoO0o + Ii1I
if 88 - 88: OoOoOO00 + iII111i % O0 + OOooOOo / OoooooooOO / OOooOOo
if 95 - 95: ooOoO0o . Oo0Ooo % IiII + iII111i
if 16 - 16: I11i * OoO0O00 % o0oOOo0O0Ooo - O0 % II111iiii - I1IiiI
if 72 - 72: OoooooooOO * OoOoOO00 . OOooOOo + Ii1I . OOooOOo / II111iiii
if 8 - 8: i1IIi
if 1 - 1: OoOoOO00 . OoO0O00 . OoO0O00 * O0
if ( value . find ( "-" ) != - 1 ) :
o0o0O00 = value . split ( "-" )
if ( len ( o0o0O00 ) != 3 ) : return ( False )
if 97 - 97: OoooooooOO % ooOoO0o . I1Ii111 / iII111i
for OooooO0 in o0o0O00 :
try : int ( OooooO0 , 16 )
except : return ( False )
if 36 - 36: iII111i + oO0o / I1Ii111
return ( True )
if 94 - 94: iIii1I11I1II1 - IiII . i11iIiiIii
if 88 - 88: I1IiiI / i11iIiiIii * OOooOOo
if 3 - 3: oO0o / o0oOOo0O0Ooo - OOooOOo . OoOoOO00 * I1Ii111
if 61 - 61: OOooOOo + OoooooooOO
if 17 - 17: I1Ii111 / OOooOOo . i11iIiiIii - I11i
if ( value . find ( ":" ) != - 1 ) :
o0o0O00 = value . split ( ":" )
if ( len ( o0o0O00 ) < 2 ) : return ( False )
if 7 - 7: I1Ii111 + ooOoO0o % o0oOOo0O0Ooo
Oo00 = False
I1I11Iiii111 = 0
for OooooO0 in o0o0O00 :
I1I11Iiii111 += 1
if ( OooooO0 == "" ) :
if ( Oo00 ) :
if ( len ( o0o0O00 ) == I1I11Iiii111 ) : break
if ( I1I11Iiii111 > 2 ) : return ( False )
if 73 - 73: oO0o / OoOoOO00 * iIii1I11I1II1 % O0 - i1IIi
Oo00 = True
continue
if 51 - 51: I1ii11iIi11i * Ii1I - oO0o / O0 * OoooooooOO
try : int ( OooooO0 , 16 )
except : return ( False )
if 12 - 12: i1IIi / iIii1I11I1II1 / O0 * OoO0O00
return ( True )
if 15 - 15: i11iIiiIii / IiII + Ii1I % OOooOOo % I1ii11iIi11i * oO0o
if 24 - 24: OOooOOo / OOooOOo + I11i / iII111i . oO0o - iII111i
if 59 - 59: I1ii11iIi11i % II111iiii - i11iIiiIii - I1Ii111
if 34 - 34: II111iiii + iII111i / IiII
if 47 - 47: OoO0O00
if ( value [ 0 ] == "+" ) :
o0o0O00 = value [ 1 : : ]
for iIi1I in o0o0O00 :
if ( iIi1I . isdigit ( ) == False ) : return ( False )
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
return ( True )
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
return ( False )
if 100 - 100: II111iiii . IiII . I11i
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
def lisp_process_api ( process , lisp_socket , data_structure ) :
O0oo0OoOo0oOooOO , III11I1 = data_structure . split ( "%" )
if 22 - 22: iIii1I11I1II1 . I11i
lprint ( "Process API request '{}', parameters: '{}'" . format ( O0oo0OoOo0oOooOO ,
III11I1 ) )
if 21 - 21: I1IiiI % Oo0Ooo - II111iiii / I1IiiI . OoOoOO00 - o0oOOo0O0Ooo
IIII1iI1iiI = [ ]
if ( O0oo0OoOo0oOooOO == "map-cache" ) :
if ( III11I1 == "" ) :
IIII1iI1iiI = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , IIII1iI1iiI )
else :
IIII1iI1iiI = lisp_process_api_map_cache_entry ( json . loads ( III11I1 ) )
if 23 - 23: OoOoOO00 / O0 * OoOoOO00 . I1IiiI + Oo0Ooo . iII111i
if 1 - 1: i11iIiiIii * OoO0O00 - OoooooooOO + OoooooooOO
if ( O0oo0OoOo0oOooOO == "site-cache" ) :
if ( III11I1 == "" ) :
IIII1iI1iiI = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
IIII1iI1iiI )
else :
IIII1iI1iiI = lisp_process_api_site_cache_entry ( json . loads ( III11I1 ) )
if 31 - 31: OoooooooOO - OoOoOO00 * II111iiii % ooOoO0o - ooOoO0o / i11iIiiIii
if 8 - 8: I1IiiI . i1IIi - I11i
if ( O0oo0OoOo0oOooOO == "map-server" ) :
III11I1 = { } if ( III11I1 == "" ) else json . loads ( III11I1 )
IIII1iI1iiI = lisp_process_api_ms_or_mr ( True , III11I1 )
if 85 - 85: OOooOOo * IiII % O0 / I1ii11iIi11i
if ( O0oo0OoOo0oOooOO == "map-resolver" ) :
III11I1 = { } if ( III11I1 == "" ) else json . loads ( III11I1 )
IIII1iI1iiI = lisp_process_api_ms_or_mr ( False , III11I1 )
if 17 - 17: Oo0Ooo / i11iIiiIii / I11i - I1Ii111
if ( O0oo0OoOo0oOooOO == "database-mapping" ) :
IIII1iI1iiI = lisp_process_api_database_mapping ( )
if 3 - 3: I1Ii111 - Oo0Ooo / iIii1I11I1II1
if 71 - 71: o0oOOo0O0Ooo + i11iIiiIii + OoooooooOO % OoOoOO00 - I1ii11iIi11i / OoooooooOO
if 26 - 26: II111iiii
if 41 - 41: Oo0Ooo . OoOoOO00 . iII111i / i11iIiiIii
if 65 - 65: iII111i * o0oOOo0O0Ooo * OoooooooOO + I11i + oO0o % OoO0O00
IIII1iI1iiI = json . dumps ( IIII1iI1iiI )
IIi1IiIii = lisp_api_ipc ( process , IIII1iI1iiI )
lisp_ipc ( IIi1IiIii , lisp_socket , "lisp-core" )
return
if 1 - 1: I1ii11iIi11i . ooOoO0o
if 54 - 54: OoOoOO00 % I1IiiI . ooOoO0o + IiII / i11iIiiIii / o0oOOo0O0Ooo
if 51 - 51: OoOoOO00 / Ii1I . I1IiiI / Ii1I . II111iiii - iIii1I11I1II1
if 78 - 78: I11i
if 42 - 42: Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
def lisp_process_api_map_cache ( mc , data ) :
if 21 - 21: I1ii11iIi11i - ooOoO0o
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 76 - 76: OoooooooOO
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 78 - 78: IiII % i11iIiiIii
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
if 86 - 86: o0oOOo0O0Ooo * i11iIiiIii - I11i
if 71 - 71: OoO0O00 - I11i
if 96 - 96: I1Ii111 / Ii1I
if 65 - 65: I1ii11iIi11i * O0 . IiII
if 11 - 11: I11i / Ii1I % oO0o
if 50 - 50: i11iIiiIii
def lisp_gather_map_cache_data ( mc , data ) :
iIIiI11iI1Ii1 = { }
iIIiI11iI1Ii1 [ "instance-id" ] = str ( mc . eid . instance_id )
iIIiI11iI1Ii1 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
iIIiI11iI1Ii1 [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
iIIiI11iI1Ii1 [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
iIIiI11iI1Ii1 [ "expires" ] = lisp_print_elapsed ( mc . uptime )
iIIiI11iI1Ii1 [ "action" ] = lisp_map_reply_action_string [ mc . action ]
iIIiI11iI1Ii1 [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 25 - 25: I11i / ooOoO0o % ooOoO0o - OOooOOo
if 59 - 59: I1IiiI + o0oOOo0O0Ooo . iIii1I11I1II1 - O0 - i11iIiiIii
if 4 - 4: I1IiiI
if 36 - 36: Ii1I
if 76 - 76: i11iIiiIii + i1IIi
oooo0O = [ ]
for OoOOo in mc . rloc_set :
iIIIIIi11Ii = { }
if ( OoOOo . rloc_exists ( ) ) :
iIIIIIi11Ii [ "address" ] = OoOOo . rloc . print_address_no_iid ( )
if 56 - 56: OoOoOO00 + II111iiii / i11iIiiIii * OoOoOO00 * OoooooooOO
if 15 - 15: OoOoOO00 / OoooooooOO + OOooOOo
if ( OoOOo . translated_port != 0 ) :
iIIIIIi11Ii [ "encap-port" ] = str ( OoOOo . translated_port )
if 76 - 76: Ii1I * iII111i . OoooooooOO
iIIIIIi11Ii [ "state" ] = OoOOo . print_state ( )
if ( OoOOo . geo ) : iIIIIIi11Ii [ "geo" ] = OoOOo . geo . print_geo ( )
if ( OoOOo . elp ) : iIIIIIi11Ii [ "elp" ] = OoOOo . elp . print_elp ( False )
if ( OoOOo . rle ) : iIIIIIi11Ii [ "rle" ] = OoOOo . rle . print_rle ( False )
if ( OoOOo . json ) : iIIIIIi11Ii [ "json" ] = OoOOo . json . print_json ( False )
if ( OoOOo . rloc_name ) : iIIIIIi11Ii [ "rloc-name" ] = OoOOo . rloc_name
I1iIii1Ii = OoOOo . stats . get_stats ( False , False )
if ( I1iIii1Ii ) : iIIIIIi11Ii [ "stats" ] = I1iIii1Ii
iIIIIIi11Ii [ "uptime" ] = lisp_print_elapsed ( OoOOo . uptime )
iIIIIIi11Ii [ "upriority" ] = str ( OoOOo . priority )
iIIIIIi11Ii [ "uweight" ] = str ( OoOOo . weight )
iIIIIIi11Ii [ "mpriority" ] = str ( OoOOo . mpriority )
iIIIIIi11Ii [ "mweight" ] = str ( OoOOo . mweight )
OoOOOO0O0 = OoOOo . last_rloc_probe_reply
if ( OoOOOO0O0 ) :
iIIIIIi11Ii [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( OoOOOO0O0 )
iIIIIIi11Ii [ "rloc-probe-rtt" ] = str ( OoOOo . rloc_probe_rtt )
if 44 - 44: I1Ii111 - II111iiii / OOooOOo
iIIIIIi11Ii [ "rloc-hop-count" ] = OoOOo . rloc_probe_hops
iIIIIIi11Ii [ "recent-rloc-hop-counts" ] = OoOOo . recent_rloc_probe_hops
if 50 - 50: I11i / I1ii11iIi11i
OoO0 = [ ]
for Oo0OOoo0 in OoOOo . recent_rloc_probe_rtts : OoO0 . append ( str ( Oo0OOoo0 ) )
iIIIIIi11Ii [ "recent-rloc-probe-rtts" ] = OoO0
if 78 - 78: I1IiiI * i1IIi / II111iiii
oooo0O . append ( iIIIIIi11Ii )
if 91 - 91: I1IiiI * I1Ii111 * i11iIiiIii - oO0o - IiII + I1ii11iIi11i
iIIiI11iI1Ii1 [ "rloc-set" ] = oooo0O
if 99 - 99: OoO0O00 % o0oOOo0O0Ooo
data . append ( iIIiI11iI1Ii1 )
return ( [ True , data ] )
if 3 - 3: OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
if 53 - 53: Ii1I / OoOoOO00 % iII111i * OoooooooOO + Oo0Ooo
if 70 - 70: OoO0O00 % OoO0O00 * OoooooooOO
if 96 - 96: ooOoO0o * Ii1I + I11i + II111iiii * I1IiiI / iII111i
def lisp_process_api_map_cache_entry ( parms ) :
o0OOoOO = parms [ "instance-id" ]
o0OOoOO = 0 if ( o0OOoOO == "" ) else int ( o0OOoOO )
if 40 - 40: OoooooooOO - I11i % OOooOOo - I1IiiI . I1IiiI + Ii1I
if 97 - 97: OOooOOo . OoooooooOO . OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
i1OO0o = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OOoOO )
i1OO0o . store_prefix ( parms [ "eid-prefix" ] )
iI111I1 = i1OO0o
II1i1iI = i1OO0o
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if 8 - 8: iII111i
O0oo0oo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OOoOO )
if ( parms . has_key ( "group-prefix" ) ) :
O0oo0oo0 . store_prefix ( parms [ "group-prefix" ] )
iI111I1 = O0oo0oo0
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
IIII1iI1iiI = [ ]
IIII = lisp_map_cache_lookup ( II1i1iI , iI111I1 )
if ( IIII ) : o00o0OO0o , IIII1iI1iiI = lisp_process_api_map_cache ( IIII , IIII1iI1iiI )
return ( IIII1iI1iiI )
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
def lisp_process_api_site_cache ( se , data ) :
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if ( se . source_cache == None ) : return ( [ True , data ] )
if 67 - 67: o0oOOo0O0Ooo
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
if 33 - 33: II111iiii
if 61 - 61: I1Ii111
if 56 - 56: I1ii11iIi11i - OoooooooOO
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
if 31 - 31: Oo0Ooo * IiII / IiII
if 3 - 3: I1Ii111
if 65 - 65: iIii1I11I1II1 % Oo0Ooo % I11i / OoooooooOO
if 82 - 82: o0oOOo0O0Ooo
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
oOoO0Oo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IIiiiIiI = data [ "dns-name" ] if data . has_key ( "dns-name" ) else None
if ( data . has_key ( "address" ) ) :
oOoO0Oo0 . store_address ( data [ "address" ] )
if 33 - 33: OoOoOO00 / i11iIiiIii - I1IiiI - OoooooooOO + i1IIi * I1Ii111
if 92 - 92: iII111i + OoO0O00
oOO = { }
if ( ms_or_mr ) :
for oooO0OOo0O0O0 in lisp_map_servers_list . values ( ) :
if ( IIiiiIiI ) :
if ( IIiiiIiI != oooO0OOo0O0O0 . dns_name ) : continue
else :
if ( oOoO0Oo0 . is_exact_match ( oooO0OOo0O0O0 . map_server ) == False ) : continue
if 70 - 70: iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
oOO [ "dns-name" ] = oooO0OOo0O0O0 . dns_name
oOO [ "address" ] = oooO0OOo0O0O0 . map_server . print_address_no_iid ( )
oOO [ "ms-name" ] = "" if oooO0OOo0O0O0 . ms_name == None else oooO0OOo0O0O0 . ms_name
return ( [ oOO ] )
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
else :
for oOO0O000OOo0 in lisp_map_resolvers_list . values ( ) :
if ( IIiiiIiI ) :
if ( IIiiiIiI != oOO0O000OOo0 . dns_name ) : continue
else :
if ( oOoO0Oo0 . is_exact_match ( oOO0O000OOo0 . map_resolver ) == False ) : continue
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
oOO [ "dns-name" ] = oOO0O000OOo0 . dns_name
oOO [ "address" ] = oOO0O000OOo0 . map_resolver . print_address_no_iid ( )
oOO [ "mr-name" ] = "" if oOO0O000OOo0 . mr_name == None else oOO0O000OOo0 . mr_name
return ( [ oOO ] )
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
return ( [ ] )
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
if 91 - 91: I11i
if 54 - 54: I1ii11iIi11i / i1IIi
if 14 - 14: iIii1I11I1II1 * I11i . I11i * ooOoO0o * iII111i
if 60 - 60: iIii1I11I1II1 + i1IIi + oO0o - iIii1I11I1II1 . i11iIiiIii * OoooooooOO
if 23 - 23: iII111i - IiII % i11iIiiIii
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
if 60 - 60: OOooOOo - I1Ii111 * Oo0Ooo
def lisp_process_api_database_mapping ( ) :
IIII1iI1iiI = [ ]
if 9 - 9: OoooooooOO * OOooOOo % OoO0O00 - ooOoO0o + Ii1I
for I111I in lisp_db_list :
iIIiI11iI1Ii1 = { }
iIIiI11iI1Ii1 [ "eid-prefix" ] = I111I . eid . print_prefix ( )
if ( I111I . group . is_null ( ) == False ) :
iIIiI11iI1Ii1 [ "group-prefix" ] = I111I . group . print_prefix ( )
if 39 - 39: iIii1I11I1II1 / i1IIi % I11i % I1ii11iIi11i * IiII
if 11 - 11: II111iiii + i1IIi
Ii11iiI = [ ]
for iIIIIIi11Ii in I111I . rloc_set :
OoOOo = { }
if ( iIIIIIi11Ii . rloc . is_null ( ) == False ) :
OoOOo [ "rloc" ] = iIIIIIi11Ii . rloc . print_address_no_iid ( )
if 1 - 1: OOooOOo
if ( iIIIIIi11Ii . rloc_name != None ) : OoOOo [ "rloc-name" ] = iIIIIIi11Ii . rloc_name
if ( iIIIIIi11Ii . interface != None ) : OoOOo [ "interface" ] = iIIIIIi11Ii . interface
Ii1iiI = iIIIIIi11Ii . translated_rloc
if ( Ii1iiI . is_null ( ) == False ) :
OoOOo [ "translated-rloc" ] = Ii1iiI . print_address_no_iid ( )
if 37 - 37: OoooooooOO . o0oOOo0O0Ooo - o0oOOo0O0Ooo - Oo0Ooo / I1IiiI
if ( OoOOo != { } ) : Ii11iiI . append ( OoOOo )
if 87 - 87: IiII
if 68 - 68: I1Ii111 + I1ii11iIi11i * IiII . OoO0O00 / I11i
if 39 - 39: Oo0Ooo + OOooOOo . I1IiiI + OoO0O00 . OoooooooOO
if 31 - 31: OoO0O00
if 55 - 55: OoOoOO00 + I1Ii111 * o0oOOo0O0Ooo - I1ii11iIi11i + OoOoOO00
iIIiI11iI1Ii1 [ "rlocs" ] = Ii11iiI
if 6 - 6: II111iiii % iIii1I11I1II1 * I1Ii111
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo + ooOoO0o
if 23 - 23: Oo0Ooo % Oo0Ooo / IiII
IIII1iI1iiI . append ( iIIiI11iI1Ii1 )
if 63 - 63: I11i % Oo0Ooo * I1Ii111 - Oo0Ooo % i11iIiiIii . II111iiii
return ( IIII1iI1iiI )
if 44 - 44: I11i . I1Ii111 . I1ii11iIi11i . oO0o
if 1 - 1: I11i % II111iiii / OoO0O00 + OoO0O00
if 46 - 46: Oo0Ooo * Ii1I / IiII % O0 * iII111i
if 74 - 74: OoooooooOO + Ii1I
if 100 - 100: I1IiiI
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
def lisp_gather_site_cache_data ( se , data ) :
iIIiI11iI1Ii1 = { }
iIIiI11iI1Ii1 [ "site-name" ] = se . site . site_name
iIIiI11iI1Ii1 [ "instance-id" ] = str ( se . eid . instance_id )
iIIiI11iI1Ii1 [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
iIIiI11iI1Ii1 [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
iIIiI11iI1Ii1 [ "registered" ] = "yes" if se . registered else "no"
iIIiI11iI1Ii1 [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
iIIiI11iI1Ii1 [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
o0o0O00 = se . last_registerer
o0o0O00 = "none" if o0o0O00 . is_null ( ) else o0o0O00 . print_address ( )
iIIiI11iI1Ii1 [ "last-registerer" ] = o0o0O00
iIIiI11iI1Ii1 [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
iIIiI11iI1Ii1 [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
iIIiI11iI1Ii1 [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
iIIiI11iI1Ii1 [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 77 - 77: o0oOOo0O0Ooo + I1Ii111 . OOooOOo . i1IIi . I1IiiI
if 100 - 100: ooOoO0o . i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoooooooOO
if 42 - 42: OoOoOO00 . I1IiiI / OoOoOO00 / I1ii11iIi11i . OoO0O00
if 67 - 67: Ii1I - O0 . OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
if 73 - 73: I11i - oO0o . I1Ii111 + oO0o
oooo0O = [ ]
for OoOOo in se . registered_rlocs :
iIIIIIi11Ii = { }
iIIIIIi11Ii [ "address" ] = OoOOo . rloc . print_address_no_iid ( ) if OoOOo . rloc_exists ( ) else "none"
if 48 - 48: IiII . IiII * o0oOOo0O0Ooo * II111iiii % ooOoO0o
if 40 - 40: I1ii11iIi11i
if ( OoOOo . geo ) : iIIIIIi11Ii [ "geo" ] = OoOOo . geo . print_geo ( )
if ( OoOOo . elp ) : iIIIIIi11Ii [ "elp" ] = OoOOo . elp . print_elp ( False )
if ( OoOOo . rle ) : iIIIIIi11Ii [ "rle" ] = OoOOo . rle . print_rle ( False )
if ( OoOOo . json ) : iIIIIIi11Ii [ "json" ] = OoOOo . json . print_json ( False )
if ( OoOOo . rloc_name ) : iIIIIIi11Ii [ "rloc-name" ] = OoOOo . rloc_name
iIIIIIi11Ii [ "uptime" ] = lisp_print_elapsed ( OoOOo . uptime )
iIIIIIi11Ii [ "upriority" ] = str ( OoOOo . priority )
iIIIIIi11Ii [ "uweight" ] = str ( OoOOo . weight )
iIIIIIi11Ii [ "mpriority" ] = str ( OoOOo . mpriority )
iIIIIIi11Ii [ "mweight" ] = str ( OoOOo . mweight )
if 76 - 76: Oo0Ooo - I11i
oooo0O . append ( iIIIIIi11Ii )
if 82 - 82: OoO0O00 % oO0o . I11i / O0 - I1Ii111
iIIiI11iI1Ii1 [ "registered-rlocs" ] = oooo0O
if 39 - 39: I1IiiI
data . append ( iIIiI11iI1Ii1 )
return ( [ True , data ] )
if 8 - 8: IiII * i1IIi * i1IIi * O0
if 69 - 69: Oo0Ooo
if 48 - 48: iII111i
if 11 - 11: i11iIiiIii * OoOoOO00 . OoO0O00
if 47 - 47: Oo0Ooo % I1Ii111 + ooOoO0o
if 89 - 89: iII111i
if 29 - 29: I1ii11iIi11i . ooOoO0o * II111iiii / iII111i . OoooooooOO - OoOoOO00
def lisp_process_api_site_cache_entry ( parms ) :
o0OOoOO = parms [ "instance-id" ]
o0OOoOO = 0 if ( o0OOoOO == "" ) else int ( o0OOoOO )
if 99 - 99: IiII % O0 - I1Ii111 * OoO0O00
if 77 - 77: OoooooooOO - I11i / I1IiiI % OoOoOO00 - OOooOOo
if 37 - 37: ooOoO0o
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
i1OO0o = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OOoOO )
i1OO0o . store_prefix ( parms [ "eid-prefix" ] )
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
if 82 - 82: iII111i - I1Ii111 - OoOoOO00
if 96 - 96: Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
O0oo0oo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OOoOO )
if ( parms . has_key ( "group-prefix" ) ) :
O0oo0oo0 . store_prefix ( parms [ "group-prefix" ] )
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
if 44 - 44: O0
IIII1iI1iiI = [ ]
ooOoOO0Oo = lisp_site_eid_lookup ( i1OO0o , O0oo0oo0 , False )
if ( ooOoOO0Oo ) : lisp_gather_site_cache_data ( ooOoOO0Oo , IIII1iI1iiI )
return ( IIII1iI1iiI )
if 95 - 95: OOooOOo + OOooOOo - OoOoOO00
if 83 - 83: II111iiii * ooOoO0o - O0 - i11iIiiIii
if 62 - 62: I1IiiI + II111iiii * iIii1I11I1II1 % iII111i + IiII / ooOoO0o
if 14 - 14: iIii1I11I1II1 * I1ii11iIi11i + OOooOOo + O0
if 79 - 79: II111iiii - iII111i
if 89 - 89: O0 - OoO0O00
if 8 - 8: I1ii11iIi11i / oO0o - OoooooooOO + ooOoO0o + o0oOOo0O0Ooo % i11iIiiIii
def lisp_get_interface_instance_id ( device , source_eid ) :
I111IIiIII = None
if ( lisp_myinterfaces . has_key ( device ) ) :
I111IIiIII = lisp_myinterfaces [ device ]
if 32 - 32: O0 + IiII
if 93 - 93: OoOoOO00 - I11i / iII111i - iIii1I11I1II1 + I11i % oO0o
if 24 - 24: Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo
if 17 - 17: OOooOOo
if 75 - 75: Ii1I / i1IIi % I1ii11iIi11i . Ii1I
if 46 - 46: II111iiii * OoO0O00
if ( I111IIiIII == None or I111IIiIII . instance_id == None ) :
return ( lisp_default_iid )
if 77 - 77: ooOoO0o * I11i
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
if 76 - 76: iII111i * OoooooooOO
if 49 - 49: II111iiii - OOooOOo + II111iiii + OoOoOO00
if 51 - 51: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 % i1IIi - II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
if 63 - 63: II111iiii - Oo0Ooo
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
o0OOoOO = I111IIiIII . get_instance_id ( )
if ( source_eid == None ) : return ( o0OOoOO )
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
o0OO0OOO = source_eid . instance_id
ii11I = None
for I111IIiIII in lisp_multi_tenant_interfaces :
if ( I111IIiIII . device != device ) : continue
OoooO00OO0OO = I111IIiIII . multi_tenant_eid
source_eid . instance_id = OoooO00OO0OO . instance_id
if ( source_eid . is_more_specific ( OoooO00OO0OO ) == False ) : continue
if ( ii11I == None or ii11I . multi_tenant_eid . mask_len < OoooO00OO0OO . mask_len ) :
ii11I = I111IIiIII
if 21 - 21: Ii1I * iIii1I11I1II1 % O0 % I11i + Ii1I
if 40 - 40: o0oOOo0O0Ooo / IiII
source_eid . instance_id = o0OO0OOO
if 25 - 25: i1IIi + o0oOOo0O0Ooo
if ( ii11I == None ) : return ( o0OOoOO )
return ( ii11I . get_instance_id ( ) )
if 90 - 90: OoooooooOO * ooOoO0o + IiII * OoOoOO00 - OoOoOO00
if 24 - 24: OoooooooOO / I1IiiI % iII111i . i11iIiiIii
if 14 - 14: O0 . IiII - Ii1I
if 39 - 39: O0 % I1Ii111
if 82 - 82: II111iiii . IiII - O0
if 18 - 18: oO0o * OOooOOo
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i - I1ii11iIi11i / iIii1I11I1II1
if 42 - 42: iIii1I11I1II1 / OOooOOo - O0 * OoooooooOO / i1IIi
if 33 - 33: OOooOOo . o0oOOo0O0Ooo % OoO0O00 - I1Ii111 . OoooooooOO
def lisp_allow_dynamic_eid ( device , eid ) :
if ( lisp_myinterfaces . has_key ( device ) == False ) : return ( None )
if 96 - 96: II111iiii % I11i / Ii1I - i11iIiiIii
I111IIiIII = lisp_myinterfaces [ device ]
oo0Oo = device if I111IIiIII . dynamic_eid_device == None else I111IIiIII . dynamic_eid_device
if 70 - 70: OoO0O00 * II111iiii / I11i + I11i
if 23 - 23: I1IiiI
if ( I111IIiIII . does_dynamic_eid_match ( eid ) ) : return ( oo0Oo )
return ( None )
if 51 - 51: i11iIiiIii / ooOoO0o - OoooooooOO + OoOoOO00 + oO0o
if 57 - 57: iIii1I11I1II1
if 19 - 19: Ii1I / o0oOOo0O0Ooo + O0 / iIii1I11I1II1 + II111iiii
if 3 - 3: oO0o % OoO0O00 % OOooOOo
if 64 - 64: o0oOOo0O0Ooo . II111iiii * IiII % Oo0Ooo + I11i - OoooooooOO
if 58 - 58: ooOoO0o
if 15 - 15: O0 * OOooOOo * I11i + Ii1I * OoooooooOO + OOooOOo
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 77 - 77: O0
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 98 - 98: iII111i - iII111i % i1IIi - I1Ii111 . I1IiiI % o0oOOo0O0Ooo
I1iIi1II1i = lisp_process_rloc_probe_timer
O0O0 = threading . Timer ( interval , I1iIi1II1i , [ lisp_sockets ] )
lisp_rloc_probe_timer = O0O0
O0O0 . start ( )
return
if 82 - 82: OoO0O00 . I1IiiI + o0oOOo0O0Ooo
if 52 - 52: oO0o . OOooOOo + iII111i * ooOoO0o + IiII / I1Ii111
if 88 - 88: OoO0O00 * I1ii11iIi11i - I1IiiI * IiII * Oo0Ooo % OoooooooOO
if 15 - 15: OOooOOo - I1Ii111 - OOooOOo
if 73 - 73: iII111i + o0oOOo0O0Ooo % iII111i . Ii1I + OoO0O00 - I1ii11iIi11i
if 47 - 47: OoO0O00 * O0 % iIii1I11I1II1
if 92 - 92: IiII
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for iii11 in lisp_rloc_probe_list :
OOo = lisp_rloc_probe_list [ iii11 ]
lprint ( "RLOC {}:" . format ( iii11 ) )
for iIIIIIi11Ii , ooo0OO , O0ooO0oOO in OOo :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( iIIIIIi11Ii ) ) , ooo0OO . print_prefix ( ) ,
O0ooO0oOO . print_prefix ( ) , iIIIIIi11Ii . translated_port ) )
if 90 - 90: i11iIiiIii . OoooooooOO % iII111i + I1Ii111 . O0
if 77 - 77: i11iIiiIii % OoOoOO00 - i1IIi
lprint ( bold ( "---------------------------" , False ) )
return
if 73 - 73: O0 + i1IIi + iII111i
if 100 - 100: oO0o / OoooooooOO % ooOoO0o / i1IIi . oO0o - OoO0O00
if 32 - 32: IiII
if 2 - 2: iII111i / IiII % iIii1I11I1II1 . iII111i + iIii1I11I1II1 + O0
if 96 - 96: I1ii11iIi11i - IiII % OoooooooOO . iII111i
if 30 - 30: Oo0Ooo . OoooooooOO / Oo0Ooo / oO0o
if 44 - 44: I1ii11iIi11i % o0oOOo0O0Ooo / iIii1I11I1II1 - o0oOOo0O0Ooo / I11i * I1Ii111
if 49 - 49: iII111i / iII111i - OoOoOO00
if 89 - 89: ooOoO0o
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 16 - 16: oO0o + oO0o + i1IIi + iIii1I11I1II1
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
if 11 - 11: iII111i
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
OoOOo , ooo0OO , O0ooO0oOO = eid_list [ 0 ]
o0oOoOOoo0O = [ lisp_print_eid_tuple ( ooo0OO , O0ooO0oOO ) ]
if 21 - 21: OoO0O00 - OOooOOo - i11iIiiIii . II111iiii
for OoOOo , ooo0OO , O0ooO0oOO in eid_list [ 1 : : ] :
OoOOo . state = LISP_RLOC_UNREACH_STATE
OoOOo . last_state_change = lisp_get_timestamp ( )
o0oOoOOoo0O . append ( lisp_print_eid_tuple ( ooo0OO , O0ooO0oOO ) )
if 98 - 98: IiII
if 17 - 17: iII111i - OOooOOo / OOooOOo % OoO0O00 + i11iIiiIii % OoO0O00
II1IiI = bold ( "unreachable" , False )
ooOo = red ( OoOOo . rloc . print_address_no_iid ( ) , False )
if 92 - 92: I1ii11iIi11i + iII111i
for i1OO0o in o0oOoOOoo0O :
ooo0OO = green ( i1OO0o , False )
lprint ( "RLOC {} went {} for EID {}" . format ( ooOo , II1IiI , ooo0OO ) )
if 55 - 55: ooOoO0o
if 68 - 68: Oo0Ooo
if 3 - 3: Ii1I % Ii1I + oO0o
if 19 - 19: Ii1I . IiII % o0oOOo0O0Ooo
if 92 - 92: i1IIi + IiII - iIii1I11I1II1 + i1IIi * ooOoO0o - i11iIiiIii
if 68 - 68: o0oOOo0O0Ooo + IiII / iII111i - i11iIiiIii / OOooOOo
for OoOOo , ooo0OO , O0ooO0oOO in eid_list :
IIII = lisp_map_cache . lookup_cache ( ooo0OO , True )
if ( IIII ) : lisp_write_ipc_map_cache ( True , IIII )
if 62 - 62: I1IiiI
return
if 42 - 42: II111iiii
if 49 - 49: OoooooooOO
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if 6 - 6: oO0o / II111iiii
if 23 - 23: IiII - OoooooooOO / oO0o
if 69 - 69: O0 - OoooooooOO
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
if 64 - 64: OOooOOo / OoOoOO00
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
if 24 - 24: oO0o + IiII . o0oOOo0O0Ooo . OoooooooOO . i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
iI11 = lisp_get_default_route_next_hops ( )
if 58 - 58: OoO0O00 + I1ii11iIi11i * oO0o * I11i / oO0o
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 68 - 68: iII111i . IiII . OoooooooOO . I1ii11iIi11i
if 79 - 79: OoooooooOO / i1IIi
if 30 - 30: Ii1I . IiII
if 24 - 24: O0
if 6 - 6: I1IiiI . i11iIiiIii . OoooooooOO . I1IiiI . o0oOOo0O0Ooo
I1I11Iiii111 = 0
Ooo0O = bold ( "RLOC-probe" , False )
for oOiiIiIIIi11 in lisp_rloc_probe_list . values ( ) :
if 27 - 27: i1IIi / i11iIiiIii * Oo0Ooo / OoO0O00
if 95 - 95: I11i . OoOoOO00 * Ii1I
if 94 - 94: OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
if 24 - 24: oO0o % Ii1I / i1IIi
oOOOI11I = None
for O0Oo00O00O0O , i1OO0o , O0oo0oo0 in oOiiIiIIIi11 :
I1iiIiiii1111 = O0Oo00O00O0O . rloc . print_address_no_iid ( )
if 19 - 19: O0 + i11iIiiIii % O0 / II111iiii
if 56 - 56: O0 + Oo0Ooo * II111iiii * iII111i * iII111i / I1Ii111
if 52 - 52: oO0o
if 73 - 73: IiII - II111iiii - OOooOOo % II111iiii + iIii1I11I1II1
ooOoOO00 , Ooo0O = lisp_allow_gleaning ( i1OO0o , O0Oo00O00O0O )
if ( ooOoOO00 and Ooo0O == False ) :
ooo0OO = green ( i1OO0o . print_address ( ) , False )
I1iiIiiii1111 += ":{}" . format ( O0Oo00O00O0O . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( I1iiIiiii1111 , False ) , ooo0OO ) )
if 87 - 87: OOooOOo
continue
if 44 - 44: Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
if 10 - 10: O0 / I11i
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if ( O0Oo00O00O0O . down_state ( ) ) : continue
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
if 72 - 72: ooOoO0o - Ii1I . Ii1I . I11i / OoooooooOO + Ii1I
if 32 - 32: O0
if 42 - 42: i1IIi * I1ii11iIi11i * OoOoOO00
if 43 - 43: I1ii11iIi11i % I1ii11iIi11i % i1IIi
if 56 - 56: I1IiiI - OoO0O00 - iII111i . o0oOOo0O0Ooo . I1Ii111
if 70 - 70: iIii1I11I1II1 - I11i
if 2 - 2: oO0o / II111iiii * OoO0O00
if 71 - 71: i1IIi + I11i * OoO0O00 . OOooOOo + oO0o
if ( oOOOI11I ) :
O0Oo00O00O0O . last_rloc_probe_nonce = oOOOI11I . last_rloc_probe_nonce
if 40 - 40: OOooOOo
if ( oOOOI11I . translated_port == O0Oo00O00O0O . translated_port and oOOOI11I . rloc_name == O0Oo00O00O0O . rloc_name ) :
if 14 - 14: OoooooooOO - OoooooooOO % i11iIiiIii % ooOoO0o / ooOoO0o
ooo0OO = green ( lisp_print_eid_tuple ( i1OO0o , O0oo0oo0 ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( I1iiIiiii1111 , False ) , ooo0OO ) )
if 33 - 33: iII111i / i1IIi . II111iiii % I1ii11iIi11i
continue
if 74 - 74: iII111i / OOooOOo / O0 / iIii1I11I1II1 + IiII
if 26 - 26: OOooOOo % i1IIi . I1Ii111 / O0 + I1Ii111
if 39 - 39: I1ii11iIi11i * I1IiiI * II111iiii . Oo0Ooo % I1IiiI
i11i1i = None
OoOOo = None
while ( True ) :
OoOOo = O0Oo00O00O0O if OoOOo == None else OoOOo . next_rloc
if ( OoOOo == None ) : break
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
if ( OoOOo . rloc_next_hop != None ) :
if ( OoOOo . rloc_next_hop not in iI11 ) :
if ( OoOOo . up_state ( ) ) :
oOo0OOOOOO , I1o0Ooo = OoOOo . rloc_next_hop
OoOOo . state = LISP_RLOC_UNREACH_STATE
OoOOo . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( OoOOo . rloc , False )
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
II1IiI = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( I1o0Ooo , oOo0OOOOOO ,
red ( I1iiIiiii1111 , False ) , II1IiI ) )
continue
if 18 - 18: Ii1I
if 82 - 82: OoOoOO00 + OoO0O00 - IiII / ooOoO0o
if 70 - 70: OoO0O00
if 43 - 43: ooOoO0o + OOooOOo + II111iiii - I1IiiI
if 58 - 58: I11i
if 94 - 94: Oo0Ooo
oo = OoOOo . last_rloc_probe
I11II1I1I = 0 if oo == None else time . time ( ) - oo
if ( OoOOo . unreach_state ( ) and I11II1I1I < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( I1iiIiiii1111 , False ) ) )
if 8 - 8: i1IIi % i1IIi % OoooooooOO % i1IIi . iIii1I11I1II1
continue
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
if 3 - 3: OoO0O00 + O0 % Oo0Ooo * Oo0Ooo % i11iIiiIii
if 29 - 29: ooOoO0o / iII111i / OOooOOo - iIii1I11I1II1
oOOo00ooO = lisp_get_echo_nonce ( None , I1iiIiiii1111 )
if ( oOOo00ooO and oOOo00ooO . request_nonce_timeout ( ) ) :
OoOOo . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
OoOOo . last_state_change = lisp_get_timestamp ( )
II1IiI = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( I1iiIiiii1111 , False ) , II1IiI ) )
if 31 - 31: i1IIi * Ii1I
lisp_update_rtr_updown ( OoOOo . rloc , False )
continue
if 94 - 94: oO0o / Ii1I % iIii1I11I1II1 + i1IIi / O0 - iII111i
if 77 - 77: o0oOOo0O0Ooo - IiII . i1IIi
if 70 - 70: i1IIi . I1Ii111 . iII111i - OoOoOO00 + II111iiii + OOooOOo
if 52 - 52: OOooOOo . OoOoOO00 - ooOoO0o % i1IIi
if 15 - 15: oO0o
if 6 - 6: oO0o . iIii1I11I1II1 - I1ii11iIi11i % IiII
if ( oOOo00ooO and oOOo00ooO . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( I1iiIiiii1111 , False ) ) )
if 58 - 58: iII111i * oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o
continue
if 63 - 63: oO0o . IiII . o0oOOo0O0Ooo
if 16 - 16: iII111i . I11i - Oo0Ooo / I1IiiI + OoOoOO00
if 14 - 14: iIii1I11I1II1 / i11iIiiIii - o0oOOo0O0Ooo . iII111i * OoO0O00
if 5 - 5: Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if ( OoOOo . last_rloc_probe != None ) :
oo = OoOOo . last_rloc_probe_reply
if ( oo == None ) : oo = 0
I11II1I1I = time . time ( ) - oo
if ( OoOOo . up_state ( ) and I11II1I1I >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
OoOOo . state = LISP_RLOC_UNREACH_STATE
OoOOo . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( OoOOo . rloc , False )
II1IiI = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( I1iiIiiii1111 , False ) , II1IiI ) )
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
if 56 - 56: Oo0Ooo
lisp_mark_rlocs_for_other_eids ( oOiiIiIIIi11 )
if 21 - 21: i11iIiiIii * o0oOOo0O0Ooo + Oo0Ooo
if 20 - 20: IiII / OoooooooOO / O0 / I1Ii111 * ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o
OoOOo . last_rloc_probe = lisp_get_timestamp ( )
if 19 - 19: o0oOOo0O0Ooo % I11i . I1ii11iIi11i
OOo00O0Oo = "" if OoOOo . unreach_state ( ) == False else " unreachable"
if 72 - 72: i11iIiiIii * I11i
if 69 - 69: I1Ii111 . Ii1I * I1ii11iIi11i % I11i - o0oOOo0O0Ooo
if 30 - 30: ooOoO0o / Oo0Ooo * iII111i % OoooooooOO / I1ii11iIi11i
if 64 - 64: OoooooooOO
if 41 - 41: Ii1I . I11i / oO0o * OoooooooOO
if 98 - 98: I1ii11iIi11i - O0 + i11iIiiIii
if 71 - 71: O0 - OoooooooOO
oo0o00Oo00o0 = ""
I1o0Ooo = None
if ( OoOOo . rloc_next_hop != None ) :
oOo0OOOOOO , I1o0Ooo = OoOOo . rloc_next_hop
lisp_install_host_route ( I1iiIiiii1111 , I1o0Ooo , True )
oo0o00Oo00o0 = ", send on nh {}({})" . format ( I1o0Ooo , oOo0OOOOOO )
if 62 - 62: IiII - I1Ii111 % iII111i / oO0o
if 27 - 27: o0oOOo0O0Ooo + iIii1I11I1II1 + OoooooooOO - iII111i
if 80 - 80: Ii1I . iII111i * I1IiiI * Ii1I
if 82 - 82: OoO0O00 % OoOoOO00 * i11iIiiIii . OoO0O00 . I1ii11iIi11i + Ii1I
if 60 - 60: i1IIi / iII111i
Oo0OOoo0 = OoOOo . print_rloc_probe_rtt ( )
I11IOOo = I1iiIiiii1111
if ( OoOOo . translated_port != 0 ) :
I11IOOo += ":{}" . format ( OoOOo . translated_port )
if 50 - 50: OoooooooOO . iII111i . I1ii11iIi11i / O0
I11IOOo = red ( I11IOOo , False )
if ( OoOOo . rloc_name != None ) :
I11IOOo += " (" + blue ( OoOOo . rloc_name , False ) + ")"
if 97 - 97: o0oOOo0O0Ooo / I1IiiI - OoOoOO00
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( Ooo0O , OOo00O0Oo ,
I11IOOo , Oo0OOoo0 , oo0o00Oo00o0 ) )
if 98 - 98: OoooooooOO . ooOoO0o % iII111i + I1IiiI * Ii1I . oO0o
if 21 - 21: i11iIiiIii % OoO0O00 * iII111i * o0oOOo0O0Ooo % Oo0Ooo
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
if 94 - 94: OoooooooOO
if 32 - 32: I1ii11iIi11i
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
if ( OoOOo . rloc_next_hop != None ) :
i11i1i = lisp_get_host_route_next_hop ( I1iiIiiii1111 )
if ( i11i1i ) : lisp_install_host_route ( I1iiIiiii1111 , i11i1i , False )
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
if 71 - 71: OoOoOO00 + oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
if ( OoOOo . rloc . is_null ( ) ) :
OoOOo . rloc . copy_address ( O0Oo00O00O0O . rloc )
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
if 36 - 36: o0oOOo0O0Ooo / OoO0O00
if 6 - 6: I11i % I1IiiI + iII111i * OoooooooOO . O0
I1i1III1i = None if ( O0oo0oo0 . is_null ( ) ) else i1OO0o
O000iI1ii1I = i1OO0o if ( O0oo0oo0 . is_null ( ) ) else O0oo0oo0
lisp_send_map_request ( lisp_sockets , 0 , I1i1III1i , O000iI1ii1I , OoOOo )
oOOOI11I = O0Oo00O00O0O
if 34 - 34: iIii1I11I1II1
if 26 - 26: iII111i / IiII * iII111i
if 91 - 91: Oo0Ooo
if 98 - 98: iIii1I11I1II1 . OoO0O00
if ( I1o0Ooo ) : lisp_install_host_route ( I1iiIiiii1111 , I1o0Ooo , False )
if 1 - 1: OOooOOo % Oo0Ooo
if 86 - 86: i11iIiiIii
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if ( i11i1i ) : lisp_install_host_route ( I1iiIiiii1111 , i11i1i , True )
if 79 - 79: I11i - II111iiii
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
I1I11Iiii111 += 1
if ( ( I1I11Iiii111 % 10 ) == 0 ) : time . sleep ( 0.020 )
if 44 - 44: I1IiiI * IiII . OoooooooOO
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
if 10 - 10: i1IIi + o0oOOo0O0Ooo
lprint ( "---------- End RLOC Probing ----------" )
return
if 47 - 47: OOooOOo * IiII % I1Ii111 . OoOoOO00 - OoooooooOO / OoooooooOO
if 79 - 79: I11i % i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
if 2 - 2: I11i
if 12 - 12: i1IIi . I1Ii111
if 99 - 99: Oo0Ooo / i11iIiiIii
if 81 - 81: Ii1I . i1IIi % iII111i . OoO0O00 % IiII
if 42 - 42: iII111i / Oo0Ooo
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 14 - 14: O0 . Oo0Ooo
if 8 - 8: i11iIiiIii
if 80 - 80: I1ii11iIi11i + Ii1I
if 16 - 16: i11iIiiIii * Oo0Ooo
if ( lisp_i_am_itr == False ) : return
if 76 - 76: iII111i . oO0o - i1IIi
if 94 - 94: O0 % iII111i
if 90 - 90: IiII
if 1 - 1: I1ii11iIi11i % OoOoOO00 . I1ii11iIi11i . OoooooooOO % oO0o + Ii1I
if 46 - 46: I1IiiI + OoO0O00 - Oo0Ooo
if ( lisp_register_all_rtrs ) : return
if 13 - 13: OoOoOO00
Ooo0Oo0000O0O = rtr . print_address_no_iid ( )
if 19 - 19: iIii1I11I1II1 . I1Ii111 - i11iIiiIii - OoooooooOO . Oo0Ooo % II111iiii
if 28 - 28: OoooooooOO / iII111i / iIii1I11I1II1
if 72 - 72: I1ii11iIi11i - OoooooooOO
if 5 - 5: iIii1I11I1II1 % ooOoO0o / II111iiii
if 44 - 44: O0 % OoooooooOO
if ( lisp_rtr_list . has_key ( Ooo0Oo0000O0O ) == False ) : return
if 6 - 6: I1IiiI / I1ii11iIi11i . I1ii11iIi11i + iIii1I11I1II1
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( Ooo0Oo0000O0O , False ) , bold ( updown , False ) ) )
if 78 - 78: OOooOOo . I1Ii111
if 60 - 60: i1IIi
if 69 - 69: O0 * iII111i % I11i . O0 * Ii1I - I1IiiI
if 9 - 9: IiII - I1Ii111 % iIii1I11I1II1 . i1IIi / OOooOOo . i1IIi
IIi1IiIii = "rtr%{}%{}" . format ( Ooo0Oo0000O0O , updown )
IIi1IiIii = lisp_command_ipc ( IIi1IiIii , "lisp-itr" )
lisp_ipc ( IIi1IiIii , lisp_ipc_socket , "lisp-etr" )
return
if 63 - 63: II111iiii . oO0o * IiII
if 73 - 73: iII111i . i1IIi + oO0o + OOooOOo + ooOoO0o - iIii1I11I1II1
if 47 - 47: I11i
if 88 - 88: OoO0O00 - OoooooooOO
if 93 - 93: Oo0Ooo * I1IiiI
if 60 - 60: I1Ii111 + OOooOOo % iII111i
if 40 - 40: I11i + oO0o . O0 % oO0o
def lisp_process_rloc_probe_reply ( rloc , source , port , nonce , hop_count , ttl ) :
Ooo0O = bold ( "RLOC-probe reply" , False )
ii1 = rloc . print_address_no_iid ( )
oo0OO = source . print_address_no_iid ( )
ooo0O = lisp_rloc_probe_list
if 1 - 1: I1Ii111
if 57 - 57: oO0o * i1IIi + iIii1I11I1II1
if 13 - 13: I1Ii111 * iII111i
if 46 - 46: Oo0Ooo
if 92 - 92: I1Ii111 * OoO0O00 . ooOoO0o
if 6 - 6: o0oOOo0O0Ooo + OOooOOo
o0o0O00 = ii1
if ( ooo0O . has_key ( o0o0O00 ) == False ) :
o0o0O00 += ":" + str ( port )
if ( ooo0O . has_key ( o0o0O00 ) == False ) :
o0o0O00 = oo0OO
if ( ooo0O . has_key ( o0o0O00 ) == False ) :
o0o0O00 += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( Ooo0O , red ( ii1 , False ) , red ( oo0OO ,
# OOooOOo - iII111i % iIii1I11I1II1 / I1Ii111 + I11i
False ) , port ) )
return
if 75 - 75: Oo0Ooo / I1ii11iIi11i - II111iiii * iIii1I11I1II1
if 42 - 42: OoO0O00 + OoO0O00 * I1Ii111 * OoooooooOO + O0 * OoOoOO00
if 54 - 54: O0 / Oo0Ooo
if 54 - 54: OoO0O00
if 38 - 38: II111iiii + o0oOOo0O0Ooo * I11i + I1Ii111 - II111iiii . OOooOOo
if 38 - 38: I1ii11iIi11i % OOooOOo + iII111i / Oo0Ooo / IiII / oO0o
if 2 - 2: iIii1I11I1II1
if 9 - 9: I1Ii111 / IiII
for rloc , i1OO0o , O0oo0oo0 in lisp_rloc_probe_list [ o0o0O00 ] :
if ( lisp_i_am_rtr and rloc . translated_port != 0 and
rloc . translated_port != port ) : continue
if 33 - 33: o0oOOo0O0Ooo + oO0o . o0oOOo0O0Ooo . I11i * OoooooooOO + iIii1I11I1II1
rloc . process_rloc_probe_reply ( nonce , i1OO0o , O0oo0oo0 , hop_count , ttl )
if 64 - 64: OoooooooOO . Ii1I
return
if 38 - 38: Oo0Ooo
if 64 - 64: ooOoO0o % i11iIiiIii
if 10 - 10: Ii1I % oO0o + oO0o * OoOoOO00 % iII111i / o0oOOo0O0Ooo
if 17 - 17: iII111i / I1IiiI . II111iiii - OoO0O00 + iII111i
if 22 - 22: Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if 85 - 85: iIii1I11I1II1 / Ii1I
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
if 97 - 97: I1Ii111 + I1ii11iIi11i
def lisp_db_list_length ( ) :
I1I11Iiii111 = 0
for I111I in lisp_db_list :
I1I11Iiii111 += len ( I111I . dynamic_eids ) if I111I . dynamic_eid_configured ( ) else 1
I1I11Iiii111 += len ( I111I . eid . iid_list )
if 21 - 21: O0 + o0oOOo0O0Ooo * OoooooooOO % IiII % I1ii11iIi11i
return ( I1I11Iiii111 )
if 80 - 80: I11i
if 28 - 28: OoOoOO00 * OoooooooOO * i11iIiiIii
if 88 - 88: ooOoO0o + ooOoO0o / I1Ii111
if 69 - 69: O0 * o0oOOo0O0Ooo + i1IIi * ooOoO0o . o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo / Oo0Ooo * IiII
if 65 - 65: iIii1I11I1II1 * o0oOOo0O0Ooo - iII111i % II111iiii - I1ii11iIi11i
if 65 - 65: I11i
if 92 - 92: iII111i . IiII + i1IIi % i1IIi
def lisp_is_myeid ( eid ) :
for I111I in lisp_db_list :
if ( eid . is_more_specific ( I111I . eid ) ) : return ( True )
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
return ( False )
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if 35 - 35: i11iIiiIii
if 59 - 59: ooOoO0o . iII111i - II111iiii
if 30 - 30: o0oOOo0O0Ooo % iII111i - i11iIiiIii
if 25 - 25: i11iIiiIii + OoOoOO00 + oO0o / Ii1I * Oo0Ooo + Oo0Ooo
if 26 - 26: I1IiiI % I1ii11iIi11i + o0oOOo0O0Ooo / I1ii11iIi11i - I1IiiI
if 55 - 55: OoooooooOO
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 2 - 2: Oo0Ooo + I11i / OOooOOo + OOooOOo
if 62 - 62: OOooOOo . iIii1I11I1II1 + I1IiiI / OOooOOo
if 90 - 90: OOooOOo
if 29 - 29: OoOoOO00 - I1IiiI / oO0o + Oo0Ooo + I1Ii111 + O0
if 65 - 65: oO0o
if 38 - 38: iIii1I11I1II1 / I1Ii111 + ooOoO0o . II111iiii - iIii1I11I1II1
if 13 - 13: Ii1I
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 34 - 34: I1IiiI / iIii1I11I1II1
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
oOOo00ooO = None
if ( lisp_nonce_echo_list . has_key ( rloc_str ) ) :
oOOo00ooO = lisp_nonce_echo_list [ rloc_str ]
if 35 - 35: oO0o / oO0o
return ( oOOo00ooO )
if 86 - 86: o0oOOo0O0Ooo . Oo0Ooo - Ii1I / i11iIiiIii
if 63 - 63: oO0o - O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
if 36 - 36: II111iiii
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
def lisp_decode_dist_name ( packet ) :
I1I11Iiii111 = 0
i1i1iI1iII = ""
if 72 - 72: O0 + OOooOOo * II111iiii * iII111i + IiII * i11iIiiIii
while ( packet [ 0 : 1 ] != "\0" ) :
if ( I1I11Iiii111 == 255 ) : return ( [ None , None ] )
i1i1iI1iII += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
I1I11Iiii111 += 1
if 35 - 35: i1IIi - OoOoOO00
if 57 - 57: iII111i / iIii1I11I1II1 + I1ii11iIi11i * I1ii11iIi11i
packet = packet [ 1 : : ]
return ( packet , i1i1iI1iII )
if 98 - 98: O0 % I1IiiI + O0 - iIii1I11I1II1 / I11i
if 22 - 22: OOooOOo * i11iIiiIii / oO0o / IiII / I1Ii111
if 84 - 84: Oo0Ooo / I1Ii111 * I1ii11iIi11i + O0 * Oo0Ooo
if 74 - 74: I11i . I11i
if 74 - 74: OoOoOO00 * ooOoO0o * I1Ii111
if 56 - 56: iIii1I11I1II1 * OoO0O00 - oO0o * Ii1I
if 62 - 62: i1IIi + I11i / OOooOOo - OoooooooOO % i1IIi . I1IiiI
if 13 - 13: O0 * iII111i
def lisp_write_flow_log ( flow_log ) :
Oo0OO0o0oOO0 = open ( "./logs/lisp-flow.log" , "a" )
if 26 - 26: i1IIi - I1Ii111 - ooOoO0o
I1I11Iiii111 = 0
for OoiIIii1Ii1 in flow_log :
i1II1IiiIi = OoiIIii1Ii1 [ 3 ]
oOIIii111II1iiI = i1II1IiiIi . print_flow ( OoiIIii1Ii1 [ 0 ] , OoiIIii1Ii1 [ 1 ] , OoiIIii1Ii1 [ 2 ] )
Oo0OO0o0oOO0 . write ( oOIIii111II1iiI )
I1I11Iiii111 += 1
if 19 - 19: iII111i * ooOoO0o * i1IIi * Ii1I . OoO0O00 % iII111i
Oo0OO0o0oOO0 . close ( )
del ( flow_log )
if 74 - 74: ooOoO0o
I1I11Iiii111 = bold ( str ( I1I11Iiii111 ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( I1I11Iiii111 ) )
return
if 70 - 70: iIii1I11I1II1 - I1Ii111 . oO0o . iII111i / o0oOOo0O0Ooo
if 8 - 8: O0 - I1Ii111
if 82 - 82: iII111i + II111iiii
if 29 - 29: O0 % Ii1I * ooOoO0o % O0
if 83 - 83: oO0o
if 95 - 95: Oo0Ooo * O0 % i1IIi / iII111i + oO0o
if 85 - 85: iIii1I11I1II1 / I11i
def lisp_policy_command ( kv_pair ) :
OoOoO = lisp_policy ( "" )
O00oo0O00oO0OO0o0 = None
if 91 - 91: OoooooooOO % I11i - OOooOOo
o0o0o = [ ]
for Ii11 in range ( len ( kv_pair [ "datetime-range" ] ) ) :
o0o0o . append ( lisp_policy_match ( ) )
if 96 - 96: i11iIiiIii - I1Ii111 % oO0o % OOooOOo % OoOoOO00
if 37 - 37: iIii1I11I1II1
for OoO0o in kv_pair . keys ( ) :
oOO = kv_pair [ OoO0o ]
if 39 - 39: II111iiii + OoooooooOO / I11i . i11iIiiIii + I1Ii111
if 19 - 19: I1ii11iIi11i / OOooOOo . I1IiiI / ooOoO0o + OoO0O00 + i11iIiiIii
if 80 - 80: OoO0O00 . O0 / Ii1I % I1Ii111 / iII111i * I1IiiI
if 41 - 41: O0 / OoooooooOO - i1IIi
if ( OoO0o == "instance-id" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
if ( IioO0o000o0 . source_eid == None ) :
IioO0o000o0 . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 2 - 2: ooOoO0o
if ( IioO0o000o0 . dest_eid == None ) :
IioO0o000o0 . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 44 - 44: OoOoOO00 - i1IIi - OoO0O00 * I1Ii111 / I1IiiI + ooOoO0o
IioO0o000o0 . source_eid . instance_id = int ( Ii1II1ii )
IioO0o000o0 . dest_eid . instance_id = int ( Ii1II1ii )
if 16 - 16: I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo % OoOoOO00 + ooOoO0o % I1ii11iIi11i
if 85 - 85: oO0o * OoooooooOO * iIii1I11I1II1 + iII111i
if ( OoO0o == "source-eid" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
if ( IioO0o000o0 . source_eid == None ) :
IioO0o000o0 . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 67 - 67: Ii1I / i11iIiiIii % OoOoOO00 % O0 / OoOoOO00
o0OOoOO = IioO0o000o0 . source_eid . instance_id
IioO0o000o0 . source_eid . store_prefix ( Ii1II1ii )
IioO0o000o0 . source_eid . instance_id = o0OOoOO
if 54 - 54: I11i . OoOoOO00 / II111iiii . i1IIi + OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if ( OoO0o == "destination-eid" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
if ( IioO0o000o0 . dest_eid == None ) :
IioO0o000o0 . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
o0OOoOO = IioO0o000o0 . dest_eid . instance_id
IioO0o000o0 . dest_eid . store_prefix ( Ii1II1ii )
IioO0o000o0 . dest_eid . instance_id = o0OOoOO
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if ( OoO0o == "source-rloc" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
IioO0o000o0 . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IioO0o000o0 . source_rloc . store_prefix ( Ii1II1ii )
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
if ( OoO0o == "destination-rloc" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
IioO0o000o0 . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IioO0o000o0 . dest_rloc . store_prefix ( Ii1II1ii )
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
if ( OoO0o == "rloc-record-name" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
IioO0o000o0 . rloc_record_name = Ii1II1ii
if 73 - 73: OoooooooOO
if 25 - 25: i1IIi . II111iiii . I1Ii111
if ( OoO0o == "geo-name" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
IioO0o000o0 . geo_name = Ii1II1ii
if 81 - 81: II111iiii + OoOoOO00 * II111iiii / iIii1I11I1II1 - Oo0Ooo % oO0o
if 66 - 66: ooOoO0o % O0 + iIii1I11I1II1 * I1Ii111 - I1Ii111
if ( OoO0o == "elp-name" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
IioO0o000o0 . elp_name = Ii1II1ii
if 61 - 61: I1ii11iIi11i
if 12 - 12: OoO0O00
if ( OoO0o == "rle-name" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
IioO0o000o0 . rle_name = Ii1II1ii
if 97 - 97: OOooOOo . Oo0Ooo . oO0o * i1IIi
if 7 - 7: Oo0Ooo
if ( OoO0o == "json-name" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IioO0o000o0 = o0o0o [ Ii11 ]
IioO0o000o0 . json_name = Ii1II1ii
if 38 - 38: Oo0Ooo - I1ii11iIi11i
if 19 - 19: Ii1I * OoO0O00 / OoO0O00 . II111iiii % iIii1I11I1II1
if ( OoO0o == "datetime-range" ) :
for Ii11 in range ( len ( o0o0o ) ) :
Ii1II1ii = oOO [ Ii11 ]
IioO0o000o0 = o0o0o [ Ii11 ]
if ( Ii1II1ii == "" ) : continue
IIi11I1i1I1I = lisp_datetime ( Ii1II1ii [ 0 : 19 ] )
oOii = lisp_datetime ( Ii1II1ii [ 19 : : ] )
if ( IIi11I1i1I1I . valid_datetime ( ) and oOii . valid_datetime ( ) ) :
IioO0o000o0 . datetime_lower = IIi11I1i1I1I
IioO0o000o0 . datetime_upper = oOii
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if 3 - 3: Ii1I
if 71 - 71: iIii1I11I1II1 . OOooOOo / I11i / i1IIi
if 69 - 69: i1IIi / iII111i + Ii1I + I11i + IiII
if 86 - 86: Oo0Ooo
if 97 - 97: I1IiiI
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
if ( OoO0o == "set-action" ) :
OoOoO . set_action = oOO
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if ( OoO0o == "set-record-ttl" ) :
OoOoO . set_record_ttl = int ( oOO )
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
if ( OoO0o == "set-instance-id" ) :
if ( OoOoO . set_source_eid == None ) :
OoOoO . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 94 - 94: I11i . i1IIi / II111iiii + OOooOOo
if ( OoOoO . set_dest_eid == None ) :
OoOoO . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 64 - 64: I1IiiI % ooOoO0o
O00oo0O00oO0OO0o0 = int ( oOO )
OoOoO . set_source_eid . instance_id = O00oo0O00oO0OO0o0
OoOoO . set_dest_eid . instance_id = O00oo0O00oO0OO0o0
if 72 - 72: O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
if ( OoO0o == "set-source-eid" ) :
if ( OoOoO . set_source_eid == None ) :
OoOoO . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 80 - 80: OOooOOo * I11i / OOooOOo - oO0o
OoOoO . set_source_eid . store_prefix ( oOO )
if ( O00oo0O00oO0OO0o0 != None ) : OoOoO . set_source_eid . instance_id = O00oo0O00oO0OO0o0
if 18 - 18: i1IIi - OOooOOo - o0oOOo0O0Ooo - iIii1I11I1II1
if ( OoO0o == "set-destination-eid" ) :
if ( OoOoO . set_dest_eid == None ) :
OoOoO . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 72 - 72: OoooooooOO % I1IiiI . OoO0O00
OoOoO . set_dest_eid . store_prefix ( oOO )
if ( O00oo0O00oO0OO0o0 != None ) : OoOoO . set_dest_eid . instance_id = O00oo0O00oO0OO0o0
if 28 - 28: II111iiii / iIii1I11I1II1 / iII111i - o0oOOo0O0Ooo . I1IiiI / O0
if ( OoO0o == "set-rloc-address" ) :
OoOoO . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OoOoO . set_rloc_address . store_address ( oOO )
if 16 - 16: ooOoO0o * oO0o . OoooooooOO
if ( OoO0o == "set-rloc-record-name" ) :
OoOoO . set_rloc_record_name = oOO
if 44 - 44: iIii1I11I1II1 * OOooOOo + OoO0O00 - OoooooooOO
if ( OoO0o == "set-elp-name" ) :
OoOoO . set_elp_name = oOO
if 13 - 13: Oo0Ooo . I11i . II111iiii
if ( OoO0o == "set-geo-name" ) :
OoOoO . set_geo_name = oOO
if 6 - 6: OOooOOo . IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
if ( OoO0o == "set-rle-name" ) :
OoOoO . set_rle_name = oOO
if 85 - 85: i11iIiiIii + OoOoOO00
if ( OoO0o == "set-json-name" ) :
OoOoO . set_json_name = oOO
if 4 - 4: OOooOOo . OoO0O00 * II111iiii + OoO0O00 % Oo0Ooo
if ( OoO0o == "policy-name" ) :
OoOoO . policy_name = oOO
if 60 - 60: OOooOOo . Ii1I
if 13 - 13: i1IIi . iII111i / OoOoOO00 . I1Ii111
if 65 - 65: oO0o % I1Ii111 % OoO0O00 . iIii1I11I1II1
if 38 - 38: IiII / I11i / IiII * iII111i
if 30 - 30: oO0o
if 30 - 30: IiII / OoO0O00
OoOoO . match_clauses = o0o0o
OoOoO . save_policy ( )
return
if 89 - 89: oO0o . OoOoOO00 . IiII / iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
if 54 - 54: OoOoOO00 / i1IIi + OOooOOo - I1ii11iIi11i - I1IiiI * I1Ii111
if 91 - 91: OoooooooOO * OoooooooOO
if 27 - 27: ooOoO0o / I1IiiI * I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
if 33 - 33: OOooOOo % OoooooooOO
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 98 - 98: Ii1I
I11111II = command
if ( interface != "" ) : I11111II = interface + ": " + I11111II
lprint ( "Send CLI command '{}' to hardware" . format ( I11111II ) )
if 40 - 40: I1IiiI + Ii1I . O0 . i1IIi - ooOoO0o . ooOoO0o
commands = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
os . system ( "FastCli -c '{}'" . format ( commands ) )
return
if 18 - 18: OoO0O00 * ooOoO0o
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if 67 - 67: I1IiiI
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
if 30 - 30: I11i . o0oOOo0O0Ooo / II111iiii
def lisp_arista_is_alive ( prefix ) :
o00OoOO0O0 = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
Oo0O = commands . getoutput ( "FastCli -c '{}'" . format ( o00OoOO0O0 ) )
if 59 - 59: i11iIiiIii
if 5 - 5: i11iIiiIii + o0oOOo0O0Ooo . OoO0O00 % OoOoOO00 + I11i
if 59 - 59: I1ii11iIi11i
if 47 - 47: I1IiiI + Oo0Ooo
Oo0O = Oo0O . split ( "\n" ) [ 1 ]
Oo0O0 = Oo0O . split ( " " )
Oo0O0 = Oo0O0 [ - 1 ] . replace ( "\r" , "" )
if 98 - 98: OoO0O00 . i1IIi % OoooooooOO
if 99 - 99: I1ii11iIi11i * IiII / o0oOOo0O0Ooo - i1IIi - OOooOOo
if 65 - 65: Ii1I + OoOoOO00 * Oo0Ooo . O0 . IiII
if 33 - 33: i11iIiiIii . i1IIi . I1Ii111 - OoOoOO00 + OOooOOo
return ( Oo0O0 == "Y" )
if 34 - 34: I1ii11iIi11i . i1IIi * O0 / OoooooooOO
if 22 - 22: OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
if 11 - 11: I1ii11iIi11i . I1Ii111 / o0oOOo0O0Ooo + IiII
if 73 - 73: OoO0O00 . i11iIiiIii * OoO0O00 * i1IIi + I11i
if 27 - 27: i11iIiiIii / OoOoOO00 % O0 / II111iiii . I11i - ooOoO0o
if 54 - 54: oO0o * II111iiii
if 79 - 79: o0oOOo0O0Ooo . ooOoO0o . Oo0Ooo * OoooooooOO
if 98 - 98: ooOoO0o
if 73 - 73: I1Ii111
if 97 - 97: OoO0O00 * Ii1I + Oo0Ooo
if 83 - 83: II111iiii - Oo0Ooo % II111iiii * o0oOOo0O0Ooo
if 51 - 51: iII111i * iIii1I11I1II1 % Ii1I * Ii1I + i11iIiiIii . OoooooooOO
if 54 - 54: i11iIiiIii . iIii1I11I1II1 * iIii1I11I1II1 + Ii1I % I11i - OoO0O00
if 16 - 16: IiII % iIii1I11I1II1 * i11iIiiIii + O0
if 76 - 76: iII111i * OOooOOo
if 7 - 7: ooOoO0o + o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 73 - 73: IiII % I11i % i11iIiiIii + ooOoO0o
if 83 - 83: Ii1I * I1Ii111 * i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i
if 40 - 40: iII111i
if 21 - 21: I1Ii111 / iII111i + Oo0Ooo / I1ii11iIi11i / I1Ii111
if 33 - 33: OoooooooOO
if 59 - 59: i11iIiiIii - OoooooooOO . ooOoO0o / i11iIiiIii % iIii1I11I1II1 * I1ii11iIi11i
if 45 - 45: I1ii11iIi11i * I1ii11iIi11i
if 31 - 31: OoO0O00 - OOooOOo . iII111i * I1Ii111 * iII111i + I1ii11iIi11i
if 5 - 5: Oo0Ooo . I1Ii111
if 77 - 77: i11iIiiIii / I1Ii111 / I1ii11iIi11i % oO0o
if 83 - 83: Ii1I % iIii1I11I1II1 / I1ii11iIi11i + I11i
if 23 - 23: iIii1I11I1II1 - I1IiiI
if 51 - 51: OoooooooOO / IiII / I1ii11iIi11i . Oo0Ooo - o0oOOo0O0Ooo * OoooooooOO
if 40 - 40: OoO0O00 / IiII . O0 / I1IiiI + OoO0O00 . o0oOOo0O0Ooo
if 25 - 25: ooOoO0o * I1Ii111 * oO0o
if 64 - 64: Ii1I / I1ii11iIi11i
if 30 - 30: OoooooooOO + O0 / I1ii11iIi11i * o0oOOo0O0Ooo
if 11 - 11: O0 + OoO0O00 - Oo0Ooo - Oo0Ooo . i11iIiiIii
if 15 - 15: Ii1I % i11iIiiIii / OoOoOO00
if 85 - 85: ooOoO0o . i1IIi / iII111i % iIii1I11I1II1 / II111iiii / I1Ii111
if 60 - 60: iIii1I11I1II1 - iIii1I11I1II1 . I11i
if 55 - 55: OoO0O00
def lisp_program_vxlan_hardware ( mc ) :
if 87 - 87: Ii1I - iII111i / O0 - o0oOOo0O0Ooo - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i * I1Ii111 % o0oOOo0O0Ooo / OoOoOO00 / OoO0O00 % OoO0O00
if 43 - 43: Oo0Ooo
if 34 - 34: OoO0O00 . i1IIi + IiII * IiII
if 76 - 76: OOooOOo
if 54 - 54: O0 * II111iiii * OOooOOo
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 44 - 44: I1IiiI
if 66 - 66: o0oOOo0O0Ooo
if 40 - 40: OOooOOo * Ii1I
if 38 - 38: ooOoO0o
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 5 - 5: OoooooooOO + iII111i - I11i
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
if 7 - 7: I1ii11iIi11i
if 37 - 37: O0 . II111iiii
iiI11Iiii = mc . eid . print_prefix_no_iid ( )
OoOOo = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
if 19 - 19: iIii1I11I1II1 / Oo0Ooo . O0 - Oo0Ooo
oOoo0O0OO00O0 = commands . getoutput ( "ip route get {} | egrep vlan4094" . format ( iiI11Iiii ) )
if 6 - 6: Oo0Ooo - II111iiii - iII111i . ooOoO0o - iIii1I11I1II1 - O0
if ( oOoo0O0OO00O0 != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( iiI11Iiii , False ) , oOoo0O0OO00O0 ) )
if 69 - 69: I1IiiI % Ii1I - OoooooooOO / iIii1I11I1II1 * OoooooooOO
return
if 70 - 70: o0oOOo0O0Ooo % OoooooooOO % I1IiiI . OoOoOO00 * I1IiiI - ooOoO0o
if 92 - 92: I1IiiI . I11i
if 66 - 66: I1Ii111 / I11i / OoooooooOO % OoOoOO00 . oO0o * iII111i
if 34 - 34: I1ii11iIi11i * I1ii11iIi11i % I11i / OOooOOo % oO0o . OoOoOO00
if 25 - 25: I1ii11iIi11i / I11i + i1IIi . I1IiiI + ooOoO0o
if 29 - 29: IiII + I1ii11iIi11i
if 8 - 8: IiII % I1IiiI
iiI1iii1ii = commands . getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( iiI1iii1ii . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 4 - 4: OoooooooOO * I1ii11iIi11i - I1ii11iIi11i
if ( iiI1iii1ii . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 38 - 38: I1Ii111
I1Io0oOOo00O0OOo = commands . getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( I1Io0oOOo00O0OOo == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 79 - 79: OOooOOo * IiII / i1IIi . iII111i - Ii1I
I1Io0oOOo00O0OOo = I1Io0oOOo00O0OOo . split ( "inet " ) [ 1 ]
I1Io0oOOo00O0OOo = I1Io0oOOo00O0OOo . split ( "/" ) [ 0 ]
if 76 - 76: oO0o % oO0o / o0oOOo0O0Ooo + OoooooooOO * O0
if 17 - 17: ooOoO0o
if 8 - 8: o0oOOo0O0Ooo
if 82 - 82: I1IiiI - OoO0O00 . Ii1I + I1IiiI * iII111i
if 72 - 72: I11i . Oo0Ooo / IiII * Oo0Ooo % I1ii11iIi11i + iII111i
if 49 - 49: i11iIiiIii + OoOoOO00
if 61 - 61: II111iiii / II111iiii * o0oOOo0O0Ooo - IiII + I1ii11iIi11i
ii1iIi = [ ]
oO0O0Oo000OOO = commands . getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for oooOo in oO0O0Oo000OOO :
if ( oooOo . find ( "vlan4094" ) == - 1 ) : continue
if ( oooOo . find ( "(incomplete)" ) == - 1 ) : continue
i11i1i = oooOo . split ( " " ) [ 0 ]
ii1iIi . append ( i11i1i )
if 23 - 23: OoOoOO00 / IiII
if 90 - 90: I1Ii111 - O0 % iII111i * i1IIi
i11i1i = None
I11 = I1Io0oOOo00O0OOo
I1Io0oOOo00O0OOo = I1Io0oOOo00O0OOo . split ( "." )
for Ii11 in range ( 1 , 255 ) :
I1Io0oOOo00O0OOo [ 3 ] = str ( Ii11 )
o0o0O00 = "." . join ( I1Io0oOOo00O0OOo )
if ( o0o0O00 in ii1iIi ) : continue
if ( o0o0O00 == I11 ) : continue
i11i1i = o0o0O00
break
if 14 - 14: ooOoO0o % I1IiiI
if ( i11i1i == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 96 - 96: O0 * I1Ii111 . o0oOOo0O0Ooo / iIii1I11I1II1 - I11i . oO0o
return
if 37 - 37: OoO0O00 . I1IiiI + I1ii11iIi11i - iIii1I11I1II1 % O0 * OoOoOO00
if 28 - 28: ooOoO0o + Oo0Ooo - I1ii11iIi11i
if 16 - 16: O0 - OoO0O00 % Ii1I % O0
if 51 - 51: iIii1I11I1II1 * i11iIiiIii . I1IiiI + o0oOOo0O0Ooo / iII111i - I1IiiI
if 73 - 73: OOooOOo
if 100 - 100: o0oOOo0O0Ooo - OoOoOO00
if 91 - 91: II111iiii / i11iIiiIii . Oo0Ooo * iIii1I11I1II1
i1iIIi11i1I1 = OoOOo . split ( "." )
IIio0OOOo0Oo0O = lisp_hex_string ( i1iIIi11i1I1 [ 1 ] ) . zfill ( 2 )
Ii1oO0OOOo = lisp_hex_string ( i1iIIi11i1I1 [ 2 ] ) . zfill ( 2 )
I11O0OooOOO = lisp_hex_string ( i1iIIi11i1I1 [ 3 ] ) . zfill ( 2 )
II11iI1iiI = "00:00:00:{}:{}:{}" . format ( IIio0OOOo0Oo0O , Ii1oO0OOOo , I11O0OooOOO )
IIii1I = "0000.00{}.{}{}" . format ( IIio0OOOo0Oo0O , Ii1oO0OOOo , I11O0OooOOO )
OOoOOoO = "arp -i vlan4094 -s {} {}" . format ( i11i1i , II11iI1iiI )
os . system ( OOoOOoO )
if 57 - 57: Ii1I + I1IiiI / O0
if 44 - 44: i1IIi - ooOoO0o / I1ii11iIi11i
if 60 - 60: o0oOOo0O0Ooo . i1IIi * IiII
if 100 - 100: I1IiiI / I1Ii111 - Oo0Ooo % iII111i - I1ii11iIi11i % OoO0O00
iIi1Ii11i1I = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( IIii1I , OoOOo )
if 37 - 37: iII111i / OoOoOO00 . Oo0Ooo + i1IIi * ooOoO0o
lisp_send_to_arista ( iIi1Ii11i1I , None )
if 89 - 89: OoOoOO00 / I1ii11iIi11i - i11iIiiIii % i11iIiiIii
if 31 - 31: iII111i
if 64 - 64: Ii1I
if 4 - 4: OoOoOO00
if 78 - 78: i1IIi - iII111i + O0 - I1IiiI % o0oOOo0O0Ooo
I11iI1i1 = "ip route add {} via {}" . format ( iiI11Iiii , i11i1i )
os . system ( I11iI1i1 )
if 48 - 48: i1IIi + iII111i - Ii1I
lprint ( "Hardware programmed with commands:" )
I11iI1i1 = I11iI1i1 . replace ( iiI11Iiii , green ( iiI11Iiii , False ) )
lprint ( " " + I11iI1i1 )
lprint ( " " + OOoOOoO )
iIi1Ii11i1I = iIi1Ii11i1I . replace ( OoOOo , red ( OoOOo , False ) )
lprint ( " " + iIi1Ii11i1I )
return
if 9 - 9: o0oOOo0O0Ooo
if 92 - 92: i11iIiiIii + OoooooooOO + O0 % oO0o
if 90 - 90: Oo0Ooo * i11iIiiIii
if 95 - 95: I1Ii111 % i11iIiiIii . i11iIiiIii . i11iIiiIii . OoooooooOO - I1Ii111
if 69 - 69: iIii1I11I1II1 * oO0o
if 80 - 80: IiII - oO0o % Ii1I - iIii1I11I1II1 . OoO0O00
if 64 - 64: I1IiiI % i11iIiiIii / oO0o
def lisp_clear_hardware_walk ( mc , parms ) :
OoooO00OO0OO = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( OoooO00OO0OO ) )
return ( [ True , None ] )
if 78 - 78: II111iiii - Oo0Ooo . iIii1I11I1II1 - ooOoO0o . oO0o
if 84 - 84: iII111i . ooOoO0o * I1IiiI * Oo0Ooo / I1Ii111
if 93 - 93: i1IIi * i11iIiiIii % OoOoOO00 % iII111i
if 31 - 31: OoO0O00
if 89 - 89: II111iiii
if 33 - 33: OOooOOo / oO0o % OoOoOO00 * O0
if 65 - 65: OoO0O00 % OoOoOO00 % I1ii11iIi11i / OoooooooOO
if 85 - 85: O0 * OOooOOo % I1Ii111
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
if 33 - 33: O0
IiiIIIiIIIii1II = bold ( "User cleared" , False )
I1I11Iiii111 = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( IiiIIIiIIIii1II , I1I11Iiii111 ) )
if 33 - 33: OoO0O00
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 10 - 10: oO0o
lisp_map_cache = lisp_cache ( )
if 75 - 75: II111iiii % OOooOOo / iIii1I11I1II1 / OoO0O00 + oO0o
if 16 - 16: oO0o + I1Ii111 - II111iiii - o0oOOo0O0Ooo / i11iIiiIii
if 59 - 59: OOooOOo - o0oOOo0O0Ooo
if 82 - 82: IiII % ooOoO0o - OoO0O00 % ooOoO0o
if 51 - 51: ooOoO0o % iII111i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
lisp_rloc_probe_list = { }
if 20 - 20: i1IIi - ooOoO0o % OoooooooOO * I1ii11iIi11i + II111iiii % i1IIi
if 30 - 30: i11iIiiIii - I1IiiI + o0oOOo0O0Ooo + IiII
if 16 - 16: I1ii11iIi11i / Ii1I + I1ii11iIi11i * I1Ii111
if 49 - 49: ooOoO0o * OoOoOO00 . OoooooooOO . ooOoO0o + Oo0Ooo * IiII
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 47 - 47: iII111i . i1IIi . I1ii11iIi11i / OoooooooOO
if 84 - 84: o0oOOo0O0Ooo * I11i
if 22 - 22: i1IIi + OOooOOo % OoooooooOO
if 34 - 34: oO0o / O0 - II111iiii % Oo0Ooo + I11i
if 23 - 23: o0oOOo0O0Ooo + i11iIiiIii . I1IiiI + iIii1I11I1II1
lisp_rtr_list = { }
if 18 - 18: o0oOOo0O0Ooo . O0 + I1Ii111
if 66 - 66: OoooooooOO
if 90 - 90: IiII - OoOoOO00
if 98 - 98: Oo0Ooo / oO0o . Ii1I
lisp_process_data_plane_restart ( True )
return
if 56 - 56: ooOoO0o % OoO0O00 * i11iIiiIii % IiII % I1IiiI - oO0o
if 37 - 37: iII111i - Ii1I . oO0o
if 47 - 47: IiII / I1ii11iIi11i . o0oOOo0O0Ooo . ooOoO0o + OOooOOo . OOooOOo
if 25 - 25: oO0o
if 43 - 43: Ii1I - o0oOOo0O0Ooo % oO0o - O0
if 20 - 20: OoO0O00 . ooOoO0o / OoOoOO00 - OoOoOO00 . iII111i / OOooOOo
if 39 - 39: iIii1I11I1II1 % ooOoO0o
if 75 - 75: i1IIi * II111iiii * O0 * i11iIiiIii % iII111i / iII111i
if 36 - 36: IiII / I1IiiI % iII111i / iII111i
if 38 - 38: OOooOOo * I1ii11iIi11i * I1Ii111 + I11i
if 65 - 65: O0 + O0 * I1Ii111
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 66 - 66: OOooOOo / O0 + i1IIi . O0 % I1ii11iIi11i - OoooooooOO
i11i1Ii11i = lisp_myrlocs [ 0 ]
if 43 - 43: IiII . o0oOOo0O0Ooo + I1Ii111 + OoO0O00 * II111iiii
if 67 - 67: i11iIiiIii * i1IIi + OOooOOo - I11i - I1Ii111
if 9 - 9: I1IiiI - I11i . ooOoO0o % i11iIiiIii
if 27 - 27: iIii1I11I1II1 . OoooooooOO
if 92 - 92: ooOoO0o + IiII * II111iiii
o00OOo00 = len ( packet ) + 28
i1I1i1i = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( o00OOo00 ) , 0 , 64 ,
17 , 0 , socket . htonl ( i11i1Ii11i . address ) , socket . htonl ( rloc . address ) )
i1I1i1i = lisp_ip_checksum ( i1I1i1i )
if 41 - 41: I1IiiI + OoOoOO00 . OOooOOo
I1iIIIiI = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( o00OOo00 - 20 ) , 0 )
if 57 - 57: II111iiii . iIii1I11I1II1
if 32 - 32: o0oOOo0O0Ooo
if 75 - 75: I1IiiI . II111iiii - iII111i % IiII * OoO0O00 % ooOoO0o
if 38 - 38: I1IiiI / OoooooooOO
packet = lisp_packet ( i1I1i1i + I1iIIIiI + packet )
if 16 - 16: i1IIi . i11iIiiIii . oO0o - I11i
if 96 - 96: iII111i - OoOoOO00
if 43 - 43: OoO0O00 - I1Ii111 % OoooooooOO % I1ii11iIi11i . OoOoOO00
if 87 - 87: OOooOOo
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( i11i1Ii11i )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( i11i1Ii11i )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 60 - 60: ooOoO0o * o0oOOo0O0Ooo . OoO0O00 * iII111i * oO0o * i1IIi
ooOo = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
i1II11 = " {}" . format ( blue ( nat_info . hostname , False ) )
Ooo0O = bold ( "RLOC-probe request" , False )
else :
i1II11 = ""
Ooo0O = bold ( "RLOC-probe reply" , False )
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( Ooo0O , ooOo , i1II11 , packet . encap_port ) )
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
if 90 - 90: i11iIiiIii + I1Ii111 % II111iiii
if 67 - 67: OoOoOO00 / iII111i * OoO0O00 % i11iIiiIii
if 76 - 76: OoO0O00
if 92 - 92: iIii1I11I1II1 * O0 % I11i
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 92 - 92: OoOoOO00 + oO0o
O0o0ooo0OO = lisp_sockets [ 3 ]
packet . send_packet ( O0o0ooo0OO , packet . outer_dest )
del ( packet )
return
if 99 - 99: O0 / I1IiiI
if 11 - 11: I1IiiI
if 92 - 92: iIii1I11I1II1 - I11i - OOooOOo / Ii1I . o0oOOo0O0Ooo . OoO0O00
if 33 - 33: oO0o / I11i % ooOoO0o * I11i / oO0o - OoOoOO00
if 89 - 89: iIii1I11I1II1 . II111iiii + IiII
if 8 - 8: I1ii11iIi11i / II111iiii / II111iiii
if 62 - 62: I11i - iII111i . Ii1I
if 20 - 20: I1ii11iIi11i
def lisp_get_default_route_next_hops ( ) :
if 99 - 99: o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
if ( lisp_is_macos ( ) ) :
o00OoOO0O0 = "route -n get default"
i1ii1iiI = commands . getoutput ( o00OoOO0O0 ) . split ( "\n" )
oOO00Oo0 = I111IIiIII = None
for Oo0OO0o0oOO0 in i1ii1iiI :
if ( Oo0OO0o0oOO0 . find ( "gateway: " ) != - 1 ) : oOO00Oo0 = Oo0OO0o0oOO0 . split ( ": " ) [ 1 ]
if ( Oo0OO0o0oOO0 . find ( "interface: " ) != - 1 ) : I111IIiIII = Oo0OO0o0oOO0 . split ( ": " ) [ 1 ]
if 69 - 69: I1ii11iIi11i * I1IiiI . IiII + I11i / II111iiii . o0oOOo0O0Ooo
return ( [ [ I111IIiIII , oOO00Oo0 ] ] )
if 65 - 65: Ii1I * OoOoOO00 % OOooOOo . iIii1I11I1II1 - Ii1I
if 39 - 39: o0oOOo0O0Ooo * iII111i
if 95 - 95: II111iiii / iII111i + i1IIi
if 70 - 70: IiII . I1Ii111
if 29 - 29: Oo0Ooo . i11iIiiIii + OoOoOO00 - Oo0Ooo
o00OoOO0O0 = "ip route | egrep 'default via'"
iiiiI1I11iI1 = commands . getoutput ( o00OoOO0O0 ) . split ( "\n" )
if 13 - 13: ooOoO0o
O0O0Oo = [ ]
for oOoo0O0OO00O0 in iiiiI1I11iI1 :
if ( oOoo0O0OO00O0 . find ( " metric " ) != - 1 ) : continue
iIIIIIi11Ii = oOoo0O0OO00O0 . split ( " " )
try :
O0oO = iIIIIIi11Ii . index ( "via" ) + 1
if ( O0oO >= len ( iIIIIIi11Ii ) ) : continue
iIIiii1iII1iII11i1 = iIIIIIi11Ii . index ( "dev" ) + 1
if ( iIIiii1iII1iII11i1 >= len ( iIIIIIi11Ii ) ) : continue
except :
continue
if 23 - 23: OOooOOo % Oo0Ooo . iII111i
if 53 - 53: OoO0O00 - OoooooooOO
O0O0Oo . append ( [ iIIIIIi11Ii [ iIIiii1iII1iII11i1 ] , iIIIIIi11Ii [ O0oO ] ] )
if 81 - 81: i1IIi / I1ii11iIi11i - OoOoOO00 + I1Ii111
return ( O0O0Oo )
if 21 - 21: OoooooooOO
if 63 - 63: I1IiiI / o0oOOo0O0Ooo - I1Ii111
if 49 - 49: iII111i . OoOoOO00
if 91 - 91: OOooOOo / Ii1I / IiII * OOooOOo
if 68 - 68: I11i
if 91 - 91: I11i
if 24 - 24: ooOoO0o . i1IIi - O0 + I11i
def lisp_get_host_route_next_hop ( rloc ) :
o00OoOO0O0 = "ip route | egrep '{} via'" . format ( rloc )
oOoo0O0OO00O0 = commands . getoutput ( o00OoOO0O0 ) . split ( " " )
if 71 - 71: OoOoOO00
try : iI11I = oOoo0O0OO00O0 . index ( "via" ) + 1
except : return ( None )
if 29 - 29: O0 . i11iIiiIii
if ( iI11I >= len ( oOoo0O0OO00O0 ) ) : return ( None )
return ( oOoo0O0OO00O0 [ iI11I ] )
if 51 - 51: IiII
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
oo0o00Oo00o0 = "none" if nh == None else nh
if 8 - 8: Oo0Ooo % ooOoO0o / i11iIiiIii
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , oo0o00Oo00o0 ) )
if 54 - 54: IiII
if ( nh == None ) :
OO0OOo00O = "ip route {} {}/32" . format ( install , dest )
else :
OO0OOo00O = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 85 - 85: OOooOOo - i1IIi
os . system ( OO0OOo00O )
return
if 10 - 10: I1ii11iIi11i
if 3 - 3: ooOoO0o * O0 / o0oOOo0O0Ooo
if 22 - 22: OoOoOO00 + OOooOOo . iII111i % iIii1I11I1II1 - I11i
if 23 - 23: OoOoOO00 * I1Ii111
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
if 85 - 85: I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo * OoO0O00
if 25 - 25: o0oOOo0O0Ooo / Ii1I / Oo0Ooo . ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 25 - 25: OoO0O00 * oO0o
Oo0OO0o0oOO0 = open ( lisp_checkpoint_filename , "w" )
for iIIiI11iI1Ii1 in checkpoint_list :
Oo0OO0o0oOO0 . write ( iIIiI11iI1Ii1 + "\n" )
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
Oo0OO0o0oOO0 . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
if 60 - 60: i1IIi . i11iIiiIii / i1IIi . I11i % OOooOOo
if 47 - 47: oO0o + IiII * I1Ii111 % o0oOOo0O0Ooo - O0 % IiII
if 66 - 66: II111iiii * I1IiiI . Oo0Ooo * OoooooooOO % OoOoOO00 . II111iiii
if 4 - 4: iII111i + I1Ii111 % OoOoOO00 / Ii1I
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 94 - 94: OoO0O00
Oo0OO0o0oOO0 = open ( lisp_checkpoint_filename , "r" )
if 35 - 35: I1ii11iIi11i % OoO0O00 + II111iiii % II111iiii / IiII - iII111i
I1I11Iiii111 = 0
for iIIiI11iI1Ii1 in Oo0OO0o0oOO0 :
I1I11Iiii111 += 1
ooo0OO = iIIiI11iI1Ii1 . split ( " rloc " )
Ii11iiI = [ ] if ( ooo0OO [ 1 ] in [ "native-forward\n" , "\n" ] ) else ooo0OO [ 1 ] . split ( ", " )
if 9 - 9: I1ii11iIi11i * o0oOOo0O0Ooo . oO0o
if 48 - 48: IiII . I1Ii111 + OoooooooOO - I1Ii111 . Ii1I . I1Ii111
oooo0O = [ ]
for OoOOo in Ii11iiI :
IiI1I1iii11 = lisp_rloc ( False )
iIIIIIi11Ii = OoOOo . split ( " " )
IiI1I1iii11 . rloc . store_address ( iIIIIIi11Ii [ 0 ] )
IiI1I1iii11 . priority = int ( iIIIIIi11Ii [ 1 ] )
IiI1I1iii11 . weight = int ( iIIIIIi11Ii [ 2 ] )
oooo0O . append ( IiI1I1iii11 )
if 24 - 24: ooOoO0o * iIii1I11I1II1
if 1 - 1: I1ii11iIi11i . O0
IIII = lisp_mapping ( "" , "" , oooo0O )
if ( IIII != None ) :
IIII . eid . store_prefix ( ooo0OO [ 0 ] )
IIII . checkpoint_entry = True
IIII . map_cache_ttl = LISP_NMR_TTL * 60
if ( oooo0O == [ ] ) : IIII . action = LISP_NATIVE_FORWARD_ACTION
IIII . add_cache ( )
continue
if 3 - 3: iIii1I11I1II1 * ooOoO0o - OoOoOO00 * I1ii11iIi11i % OoOoOO00 - OoooooooOO
if 42 - 42: I1Ii111 - i1IIi
I1I11Iiii111 -= 1
if 91 - 91: iII111i . OOooOOo / iIii1I11I1II1 . Oo0Ooo . II111iiii . OoOoOO00
if 31 - 31: OoO0O00 . I1ii11iIi11i % I11i - II111iiii
Oo0OO0o0oOO0 . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , I1I11Iiii111 , lisp_checkpoint_filename ) )
return
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
if 73 - 73: I1ii11iIi11i . oO0o % I11i . I1ii11iIi11i / I1Ii111 / II111iiii
if 23 - 23: OoooooooOO . o0oOOo0O0Ooo
if 76 - 76: I1Ii111
if 91 - 91: iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
if 85 - 85: II111iiii + I1ii11iIi11i
if 33 - 33: iII111i
if 14 - 14: O0 * Oo0Ooo / i1IIi
if 95 - 95: O0 % i1IIi % ooOoO0o % oO0o - I1IiiI
if 78 - 78: II111iiii % OOooOOo
if 6 - 6: OOooOOo
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 55 - 55: OOooOOo + oO0o - II111iiii
iIIiI11iI1Ii1 = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 5 - 5: iII111i * OoooooooOO . OoO0O00 % ooOoO0o + Ii1I
for IiI1I1iii11 in mc . rloc_set :
if ( IiI1I1iii11 . rloc . is_null ( ) ) : continue
iIIiI11iI1Ii1 += "{} {} {}, " . format ( IiI1I1iii11 . rloc . print_address_no_iid ( ) ,
IiI1I1iii11 . priority , IiI1I1iii11 . weight )
if 59 - 59: OoOoOO00
if 96 - 96: I1IiiI
if ( mc . rloc_set != [ ] ) :
iIIiI11iI1Ii1 = iIIiI11iI1Ii1 [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
iIIiI11iI1Ii1 += "native-forward"
if 3 - 3: OoooooooOO
if 3 - 3: IiII / O0 * i11iIiiIii . iII111i - iIii1I11I1II1
checkpoint_list . append ( iIIiI11iI1Ii1 )
return
if 56 - 56: ooOoO0o
if 82 - 82: ooOoO0o . IiII . I1Ii111 - iIii1I11I1II1 + II111iiii . OoOoOO00
if 59 - 59: Oo0Ooo
if 98 - 98: I1Ii111 * II111iiii / Oo0Ooo . Oo0Ooo % I1Ii111
if 52 - 52: OoOoOO00
if 59 - 59: ooOoO0o / OoooooooOO
if 71 - 71: OOooOOo + I11i * O0 / o0oOOo0O0Ooo + I1IiiI + Ii1I
def lisp_check_dp_socket ( ) :
i11IiIIIi11i = lisp_ipc_dp_socket_name
if ( os . path . exists ( i11IiIIIi11i ) == False ) :
iiiiII11Ii1 = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( i11IiIIIi11i , iiiiII11Ii1 ) )
return ( False )
if 2 - 2: I1Ii111 * oO0o
return ( True )
if 93 - 93: I11i
if 2 - 2: i1IIi / I1IiiI
if 29 - 29: Ii1I * iIii1I11I1II1 * i1IIi
if 83 - 83: oO0o % O0 . I11i / I11i / I1IiiI - OoOoOO00
if 91 - 91: iIii1I11I1II1 - IiII + iIii1I11I1II1 % Oo0Ooo % I1IiiI
if 84 - 84: iIii1I11I1II1 . Oo0Ooo - OoooooooOO % Oo0Ooo
if 27 - 27: I1ii11iIi11i - ooOoO0o + I11i - I1ii11iIi11i
def lisp_write_to_dp_socket ( entry ) :
try :
oo00Oo = json . dumps ( entry )
oooo00O000 = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( oooo00O000 , oo00Oo ) )
lisp_ipc_dp_socket . sendto ( oo00Oo , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( oo00Oo ) )
if 14 - 14: iII111i + Ii1I - IiII / OoO0O00
return
if 4 - 4: o0oOOo0O0Ooo + o0oOOo0O0Ooo - Oo0Ooo
if 87 - 87: II111iiii + iII111i / I1Ii111 - I11i
if 90 - 90: Ii1I + Ii1I . O0 - I1ii11iIi11i
if 40 - 40: OoooooooOO
if 100 - 100: IiII - I11i
if 79 - 79: iII111i % O0
if 73 - 73: Oo0Ooo
if 13 - 13: OOooOOo - ooOoO0o
if 8 - 8: I1Ii111 % oO0o
def lisp_write_ipc_keys ( rloc ) :
I1iiIiiii1111 = rloc . rloc . print_address_no_iid ( )
IIiII = rloc . translated_port
if ( IIiII != 0 ) : I1iiIiiii1111 += ":" + str ( IIiII )
if ( lisp_rloc_probe_list . has_key ( I1iiIiiii1111 ) == False ) : return
if 19 - 19: O0 + OoO0O00 - i1IIi % OoOoOO00 / Oo0Ooo + OoooooooOO
for iIIIIIi11Ii , ooo0OO , O0ooO0oOO in lisp_rloc_probe_list [ I1iiIiiii1111 ] :
IIII = lisp_map_cache . lookup_cache ( ooo0OO , True )
if ( IIII == None ) : continue
lisp_write_ipc_map_cache ( True , IIII )
if 93 - 93: i11iIiiIii % OOooOOo . I11i * ooOoO0o
return
if 90 - 90: OoO0O00
if 54 - 54: OOooOOo + Oo0Ooo * o0oOOo0O0Ooo - iIii1I11I1II1 * ooOoO0o
if 76 - 76: i11iIiiIii * I1IiiI - IiII . o0oOOo0O0Ooo % iII111i . i11iIiiIii
if 69 - 69: O0 + o0oOOo0O0Ooo / ooOoO0o
if 7 - 7: Ii1I . Ii1I . iIii1I11I1II1 / ooOoO0o
if 70 - 70: O0
if 42 - 42: I1Ii111 + OoooooooOO + I11i
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 48 - 48: Oo0Ooo . IiII / ooOoO0o + I11i
if 40 - 40: I1IiiI + I1ii11iIi11i * I1IiiI % Ii1I
if 27 - 27: O0 / Oo0Ooo . oO0o
if 34 - 34: I1Ii111 % Ii1I / Oo0Ooo % ooOoO0o / i11iIiiIii * I1IiiI
Oo = "add" if add_or_delete else "delete"
iIIiI11iI1Ii1 = { "type" : "map-cache" , "opcode" : Oo }
if 36 - 36: i11iIiiIii * i1IIi % iII111i . Oo0Ooo
iIiiIiI1I1ii = ( mc . group . is_null ( ) == False )
if ( iIiiIiI1I1ii ) :
iIIiI11iI1Ii1 [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
iIIiI11iI1Ii1 [ "rles" ] = [ ]
else :
iIIiI11iI1Ii1 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
iIIiI11iI1Ii1 [ "rlocs" ] = [ ]
if 54 - 54: o0oOOo0O0Ooo % i1IIi % I1ii11iIi11i . o0oOOo0O0Ooo / OoOoOO00
iIIiI11iI1Ii1 [ "instance-id" ] = str ( mc . eid . instance_id )
if 55 - 55: O0 / OoooooooOO % Ii1I * O0 + iIii1I11I1II1 . iIii1I11I1II1
if ( iIiiIiI1I1ii ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for IIi1i1111i in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
o0o0O00 = IIi1i1111i . address . print_address_no_iid ( )
IIiII = str ( 4341 ) if IIi1i1111i . translated_port == 0 else str ( IIi1i1111i . translated_port )
if 55 - 55: Ii1I . OoooooooOO % Ii1I . IiII
iIIIIIi11Ii = { "rle" : o0o0O00 , "port" : IIiII }
Iiio0oO , oooOoooOO = IIi1i1111i . get_encap_keys ( )
iIIIIIi11Ii = lisp_build_json_keys ( iIIIIIi11Ii , Iiio0oO , oooOoooOO , "encrypt-key" )
iIIiI11iI1Ii1 [ "rles" ] . append ( iIIIIIi11Ii )
if 12 - 12: I1IiiI
if 50 - 50: ooOoO0o
else :
for OoOOo in mc . rloc_set :
if ( OoOOo . rloc . is_ipv4 ( ) == False and OoOOo . rloc . is_ipv6 ( ) == False ) :
continue
if 19 - 19: OoooooooOO / IiII
if ( OoOOo . up_state ( ) == False ) : continue
if 40 - 40: OoOoOO00 / OoooooooOO * iIii1I11I1II1 / i1IIi . OoooooooOO
IIiII = str ( 4341 ) if OoOOo . translated_port == 0 else str ( OoOOo . translated_port )
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
iIIIIIi11Ii = { "rloc" : OoOOo . rloc . print_address_no_iid ( ) , "priority" :
str ( OoOOo . priority ) , "weight" : str ( OoOOo . weight ) , "port" :
IIiII }
Iiio0oO , oooOoooOO = OoOOo . get_encap_keys ( )
iIIIIIi11Ii = lisp_build_json_keys ( iIIIIIi11Ii , Iiio0oO , oooOoooOO , "encrypt-key" )
iIIiI11iI1Ii1 [ "rlocs" ] . append ( iIIIIIi11Ii )
if 72 - 72: OoO0O00 - I1ii11iIi11i . Oo0Ooo / OoO0O00
if 86 - 86: i11iIiiIii - oO0o . i11iIiiIii
if 51 - 51: OoO0O00 - OoO0O00 * IiII
if ( dont_send == False ) : lisp_write_to_dp_socket ( iIIiI11iI1Ii1 )
return ( iIIiI11iI1Ii1 )
if 24 - 24: OoooooooOO . II111iiii
if 97 - 97: II111iiii . O0
if 18 - 18: iII111i
if 35 - 35: ooOoO0o / O0 / iIii1I11I1II1 - iIii1I11I1II1 + I11i
if 8 - 8: I1Ii111 . oO0o % Oo0Ooo * OoooooooOO
if 25 - 25: OoO0O00
if 54 - 54: O0
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
if 2 - 2: i1IIi - IiII . I1ii11iIi11i / i1IIi
if 92 - 92: ooOoO0o - iII111i
if 69 - 69: iII111i
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 48 - 48: O0 + o0oOOo0O0Ooo . oO0o - IiII * OoooooooOO . OoO0O00
Iiio0oO = keys [ 1 ] . encrypt_key
oooOoooOO = keys [ 1 ] . icv_key
if 63 - 63: oO0o * OoO0O00 * oO0o
if 31 - 31: Oo0Ooo
if 90 - 90: I11i . IiII * iIii1I11I1II1 . I11i + i1IIi
if 67 - 67: I1Ii111 . I1ii11iIi11i
ii11ii1 = rloc_addr . split ( ":" )
if ( len ( ii11ii1 ) == 1 ) :
iIIiI11iI1Ii1 = { "type" : "decap-keys" , "rloc" : ii11ii1 [ 0 ] }
else :
iIIiI11iI1Ii1 = { "type" : "decap-keys" , "rloc" : ii11ii1 [ 0 ] , "port" : ii11ii1 [ 1 ] }
if 81 - 81: II111iiii
iIIiI11iI1Ii1 = lisp_build_json_keys ( iIIiI11iI1Ii1 , Iiio0oO , oooOoooOO , "decrypt-key" )
if 34 - 34: o0oOOo0O0Ooo % I1ii11iIi11i + i11iIiiIii - Ii1I / I1ii11iIi11i
lisp_write_to_dp_socket ( iIIiI11iI1Ii1 )
return
if 17 - 17: I1ii11iIi11i + Ii1I * I1Ii111
if 98 - 98: OoOoOO00 . I1ii11iIi11i + oO0o
if 95 - 95: O0 + II111iiii / Ii1I % IiII . OoOoOO00
if 85 - 85: Ii1I * Oo0Ooo * ooOoO0o
if 48 - 48: i11iIiiIii
if 45 - 45: i1IIi + I1ii11iIi11i
if 49 - 49: i11iIiiIii . I1ii11iIi11i
if 91 - 91: ooOoO0o - OOooOOo - OOooOOo * o0oOOo0O0Ooo
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 33 - 33: II111iiii
entry [ "keys" ] = [ ]
iii11 = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( iii11 )
return ( entry )
if 39 - 39: ooOoO0o + I11i
if 24 - 24: o0oOOo0O0Ooo
if 5 - 5: i11iIiiIii - oO0o + o0oOOo0O0Ooo % ooOoO0o
if 63 - 63: oO0o
if 7 - 7: IiII / i11iIiiIii - OOooOOo
if 9 - 9: II111iiii + i11iIiiIii % I1Ii111 - Oo0Ooo * OOooOOo
if 55 - 55: I1Ii111 + ooOoO0o
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 58 - 58: iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
if 58 - 58: oO0o . ooOoO0o . I1IiiI . Oo0Ooo * iIii1I11I1II1 - iII111i
if 96 - 96: OOooOOo % o0oOOo0O0Ooo / iIii1I11I1II1
if 60 - 60: i1IIi / iIii1I11I1II1 + I11i % iII111i
iIIiI11iI1Ii1 = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 64 - 64: I11i . i11iIiiIii / iIii1I11I1II1 . I11i
if 73 - 73: OoO0O00 % iIii1I11I1II1 + IiII * I1Ii111 % II111iiii
if 20 - 20: I11i % I1ii11iIi11i . OoO0O00 % OoOoOO00
if 84 - 84: OoooooooOO / i11iIiiIii . IiII / I1IiiI
for I111I in lisp_db_list :
if ( I111I . eid . is_ipv4 ( ) == False and I111I . eid . is_ipv6 ( ) == False ) : continue
o0OooO0O = { "instance-id" : str ( I111I . eid . instance_id ) ,
"eid-prefix" : I111I . eid . print_prefix_no_iid ( ) }
iIIiI11iI1Ii1 [ "database-mappings" ] . append ( o0OooO0O )
if 15 - 15: O0 . iIii1I11I1II1 - I1Ii111 + O0 + ooOoO0o / I1IiiI
lisp_write_to_dp_socket ( iIIiI11iI1Ii1 )
if 8 - 8: iII111i % O0 - OoOoOO00
if 49 - 49: oO0o - OOooOOo / Ii1I / I1Ii111 . o0oOOo0O0Ooo . iII111i
if 58 - 58: IiII + Ii1I
if 89 - 89: Ii1I / Oo0Ooo * o0oOOo0O0Ooo / OoO0O00 + I11i
if 4 - 4: I11i
iIIiI11iI1Ii1 = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( iIIiI11iI1Ii1 )
return
if 59 - 59: OoOoOO00 * I1ii11iIi11i / I1IiiI * II111iiii + OoOoOO00
if 6 - 6: OoOoOO00 % oO0o + I11i * Ii1I
if 13 - 13: I1ii11iIi11i / Oo0Ooo - I1Ii111 * OoOoOO00
if 47 - 47: IiII
if 76 - 76: iII111i / II111iiii / I11i
if 62 - 62: I1ii11iIi11i
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
if 93 - 93: iIii1I11I1II1 / o0oOOo0O0Ooo - I11i - OOooOOo % ooOoO0o
iIIiI11iI1Ii1 = { "type" : "interfaces" , "interfaces" : [ ] }
if 16 - 16: ooOoO0o * o0oOOo0O0Ooo - IiII + I1ii11iIi11i / o0oOOo0O0Ooo - O0
for I111IIiIII in lisp_myinterfaces . values ( ) :
if ( I111IIiIII . instance_id == None ) : continue
o0OooO0O = { "interface" : I111IIiIII . device ,
"instance-id" : str ( I111IIiIII . instance_id ) }
iIIiI11iI1Ii1 [ "interfaces" ] . append ( o0OooO0O )
if 71 - 71: i1IIi
if 79 - 79: iII111i * O0 / Ii1I / O0 % i1IIi
lisp_write_to_dp_socket ( iIIiI11iI1Ii1 )
return
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
if 62 - 62: Ii1I . I1ii11iIi11i . iII111i + I11i * o0oOOo0O0Ooo
if 56 - 56: oO0o * iIii1I11I1II1 . II111iiii - II111iiii + II111iiii - i11iIiiIii
if 79 - 79: iII111i
if 29 - 29: Ii1I * I1Ii111 / OoO0O00 - O0 - i11iIiiIii * I1IiiI
if 2 - 2: OoOoOO00 . I1ii11iIi11i * I1ii11iIi11i
if 42 - 42: OoO0O00 . OoO0O00 + II111iiii - IiII - OOooOOo * Oo0Ooo
if 47 - 47: oO0o - OoooooooOO + iII111i
if 69 - 69: I1ii11iIi11i - I1IiiI % oO0o + OOooOOo - I1Ii111
if 5 - 5: ooOoO0o . OoO0O00
if 40 - 40: iII111i
if 87 - 87: IiII / II111iiii
if 44 - 44: OoO0O00 . I1Ii111 - OoooooooOO * OoOoOO00 . OoO0O00
if 84 - 84: OOooOOo . OOooOOo . oO0o % iII111i * Oo0Ooo - iIii1I11I1II1
def lisp_parse_auth_key ( value ) :
oOiiIiIIIi11 = value . split ( "[" )
ii1iiiiiIiIi = { }
if ( len ( oOiiIiIIIi11 ) == 1 ) :
ii1iiiiiIiIi [ 0 ] = value
return ( ii1iiiiiIiIi )
if 83 - 83: Ii1I - Ii1I + IiII + I11i / I1Ii111 % iIii1I11I1II1
if 17 - 17: I1ii11iIi11i * OOooOOo % II111iiii
for Ii1II1ii in oOiiIiIIIi11 :
if ( Ii1II1ii == "" ) : continue
iI11I = Ii1II1ii . find ( "]" )
I1o0 = Ii1II1ii [ 0 : iI11I ]
try : I1o0 = int ( I1o0 )
except : return
if 30 - 30: I1Ii111 . Ii1I . Oo0Ooo / OOooOOo * OoooooooOO / I1ii11iIi11i
ii1iiiiiIiIi [ I1o0 ] = Ii1II1ii [ iI11I + 1 : : ]
if 41 - 41: i1IIi
return ( ii1iiiiiIiIi )
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if 99 - 99: OOooOOo + o0oOOo0O0Ooo - OOooOOo . i1IIi
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
if 100 - 100: OoO0O00 . Oo0Ooo
if 29 - 29: OoO0O00
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
if 63 - 63: O0 % iIii1I11I1II1 . o0oOOo0O0Ooo . I1IiiI * Ii1I % i1IIi
if 47 - 47: II111iiii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i - o0oOOo0O0Ooo
if 71 - 71: I1ii11iIi11i * i1IIi
if 67 - 67: I1ii11iIi11i % OoOoOO00 . iII111i / Ii1I . I1IiiI
if 48 - 48: IiII + II111iiii . I1IiiI % o0oOOo0O0Ooo
if 57 - 57: OOooOOo . I11i % OoOoOO00
if 68 - 68: iIii1I11I1II1 % I1ii11iIi11i % II111iiii / O0 + iII111i
if 78 - 78: iII111i - OOooOOo / I1Ii111
if 38 - 38: I11i % i1IIi + o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI
def lisp_reassemble ( packet ) :
oO = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 1 - 1: II111iiii * o0oOOo0O0Ooo . O0 - Ii1I / oO0o
if 17 - 17: OoooooooOO % OoooooooOO + Oo0Ooo + I1Ii111
if 56 - 56: I11i % OoOoOO00 - OoO0O00
if 31 - 31: iII111i % i11iIiiIii - Ii1I / OOooOOo - I1Ii111
if ( oO == 0 or oO == 0x4000 ) : return ( packet )
if 60 - 60: o0oOOo0O0Ooo + Oo0Ooo . O0
if 51 - 51: i11iIiiIii / iIii1I11I1II1 . I1IiiI - Ii1I * I1Ii111 . iII111i
if 72 - 72: Ii1I . I11i / i1IIi % i1IIi + I1ii11iIi11i
if 56 - 56: OoO0O00 - OoOoOO00 - II111iiii * o0oOOo0O0Ooo
i1i1i11iI11II = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
O00o0ooo0OO0 = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 42 - 42: Ii1I
I1i1I = ( oO & 0x2000 == 0 and ( oO & 0x1fff ) != 0 )
iIIiI11iI1Ii1 = [ ( oO & 0x1fff ) * 8 , O00o0ooo0OO0 - 20 , packet , I1i1I ]
if 39 - 39: oO0o / i11iIiiIii % oO0o + oO0o
if 96 - 96: i1IIi * OoO0O00
if 89 - 89: O0 * OoOoOO00 * i11iIiiIii . iII111i
if 28 - 28: ooOoO0o % i1IIi % I1ii11iIi11i
if 58 - 58: I1IiiI
if 100 - 100: I11i % ooOoO0o - OOooOOo - I1IiiI * oO0o + I1IiiI
if 7 - 7: iIii1I11I1II1 * o0oOOo0O0Ooo / I1Ii111 % Oo0Ooo + OoO0O00 % OOooOOo
if 85 - 85: I1IiiI . i11iIiiIii - ooOoO0o * I11i * OoOoOO00 * I11i
if ( oO == 0x2000 ) :
oOO0ooi1iiIIiII1 , o0O00OooooO = struct . unpack ( "HH" , packet [ 20 : 24 ] )
oOO0ooi1iiIIiII1 = socket . ntohs ( oOO0ooi1iiIIiII1 )
o0O00OooooO = socket . ntohs ( o0O00OooooO )
if ( o0O00OooooO not in [ 4341 , 8472 , 4789 ] and oOO0ooi1iiIIiII1 != 4341 ) :
lisp_reassembly_queue [ i1i1i11iI11II ] = [ ]
iIIiI11iI1Ii1 [ 2 ] = None
if 29 - 29: I1Ii111 * I1Ii111 . iII111i + o0oOOo0O0Ooo
if 57 - 57: I1Ii111 - IiII
if 89 - 89: oO0o + iII111i
if 52 - 52: OOooOOo % O0 * I1ii11iIi11i . I1ii11iIi11i / IiII
if 7 - 7: II111iiii
if 7 - 7: iIii1I11I1II1 . O0 + Ii1I % I1IiiI * O0 + OoO0O00
if ( lisp_reassembly_queue . has_key ( i1i1i11iI11II ) == False ) :
lisp_reassembly_queue [ i1i1i11iI11II ] = [ ]
if 3 - 3: Oo0Ooo * OoooooooOO * oO0o % OoOoOO00 * OoOoOO00 . ooOoO0o
if 16 - 16: ooOoO0o / o0oOOo0O0Ooo - O0 * I1IiiI
if 13 - 13: iII111i . iII111i % O0 % o0oOOo0O0Ooo
if 99 - 99: OoO0O00 - OoOoOO00 + OoO0O00
if 67 - 67: I1Ii111
II1I1iIi11i1 = lisp_reassembly_queue [ i1i1i11iI11II ]
if 14 - 14: Ii1I * I1Ii111 + I1ii11iIi11i % OoO0O00 * Ii1I + iII111i
if 6 - 6: iII111i / iII111i . i11iIiiIii
if 12 - 12: I11i - OoO0O00
if 68 - 68: IiII - OoOoOO00
if 22 - 22: i1IIi . IiII
if ( len ( II1I1iIi11i1 ) == 1 and II1I1iIi11i1 [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( i1i1i11iI11II ) . zfill ( 4 ) ) )
if 8 - 8: IiII % o0oOOo0O0Ooo . i11iIiiIii
return ( None )
if 69 - 69: I1Ii111 / Ii1I - ooOoO0o
if 38 - 38: II111iiii % OoooooooOO / OoooooooOO . Ii1I . Ii1I
if 13 - 13: oO0o - i1IIi / i1IIi + OoooooooOO
if 57 - 57: OoooooooOO / O0 + I1ii11iIi11i % I11i * oO0o / Ii1I
if 49 - 49: I1IiiI * ooOoO0o * OOooOOo + OoO0O00 + ooOoO0o
II1I1iIi11i1 . append ( iIIiI11iI1Ii1 )
II1I1iIi11i1 = sorted ( II1I1iIi11i1 )
if 42 - 42: i1IIi . OoO0O00 % iII111i
if 57 - 57: I1ii11iIi11i / I1IiiI
if 69 - 69: iII111i - iII111i . OoO0O00 / oO0o - OoO0O00 + I1Ii111
if 98 - 98: iII111i . oO0o - O0 % I1IiiI . I1ii11iIi11i / i1IIi
o0o0O00 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
o0o0O00 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
Oo0OOo0oo = o0o0O00 . print_address_no_iid ( )
o0o0O00 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
ooO0o = o0o0O00 . print_address_no_iid ( )
o0o0O00 = red ( "{} -> {}" . format ( Oo0OOo0oo , ooO0o ) , False )
if 42 - 42: Ii1I / i1IIi - IiII / I1Ii111
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if iIIiI11iI1Ii1 [ 2 ] == None else "" , o0o0O00 , lisp_hex_string ( i1i1i11iI11II ) . zfill ( 4 ) ,
# OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
# I1IiiI
lisp_hex_string ( oO ) . zfill ( 4 ) ) )
if 39 - 39: O0 % o0oOOo0O0Ooo / Oo0Ooo + II111iiii + i11iIiiIii
if 30 - 30: IiII * Ii1I - O0 - OoOoOO00
if 1 - 1: OoO0O00 * II111iiii * I11i % ooOoO0o * OoO0O00
if 92 - 92: O0 . I1IiiI / iIii1I11I1II1
if 1 - 1: Oo0Ooo + OoO0O00 . oO0o
if ( II1I1iIi11i1 [ 0 ] [ 0 ] != 0 or II1I1iIi11i1 [ - 1 ] [ 3 ] == False ) : return ( None )
IIIIi = II1I1iIi11i1 [ 0 ]
for IiI1IiI1iiI1 in II1I1iIi11i1 [ 1 : : ] :
oO = IiI1IiI1iiI1 [ 0 ]
i1Iiiiii , Ii1Iiii1 = IIIIi [ 0 ] , IIIIi [ 1 ]
if ( i1Iiiiii + Ii1Iiii1 != oO ) : return ( None )
IIIIi = IiI1IiI1iiI1
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
lisp_reassembly_queue . pop ( i1i1i11iI11II )
if 30 - 30: i11iIiiIii . I1IiiI
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
if 22 - 22: ooOoO0o . ooOoO0o * OOooOOo % OoOoOO00
if 51 - 51: OoOoOO00 . oO0o - OoOoOO00
if 79 - 79: iII111i
packet = II1I1iIi11i1 [ 0 ] [ 2 ]
for IiI1IiI1iiI1 in II1I1iIi11i1 [ 1 : : ] : packet += IiI1IiI1iiI1 [ 2 ] [ 20 : : ]
if 71 - 71: i1IIi / OoO0O00 / OOooOOo + I1Ii111
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( i1i1i11iI11II ) . zfill ( 4 ) , len ( packet ) ) )
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
o00OOo00 = socket . htons ( len ( packet ) )
iIiI1I1II1 = packet [ 0 : 2 ] + struct . pack ( "H" , o00OOo00 ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
if 16 - 16: Ii1I / I1IiiI / I1IiiI - OoooooooOO
iIiI1I1II1 = lisp_ip_checksum ( iIiI1I1II1 )
return ( iIiI1I1II1 + packet [ 20 : : ] )
if 13 - 13: OOooOOo / OoooooooOO
if 7 - 7: II111iiii - ooOoO0o
if 72 - 72: Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
if 87 - 87: II111iiii
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
I1iiIiiii1111 = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( I1iiIiiii1111 ) ) : return ( I1iiIiiii1111 )
if 24 - 24: i11iIiiIii + ooOoO0o
I1iiIiiii1111 = addr . print_address_no_iid ( )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( I1iiIiiii1111 ) ) : return ( I1iiIiiii1111 )
if 80 - 80: IiII % I11i % oO0o
if 97 - 97: i1IIi * i11iIiiIii / Ii1I - I1IiiI % IiII
if 70 - 70: iIii1I11I1II1
if 2 - 2: IiII - i1IIi * IiII % O0 / Ii1I
if 64 - 64: iII111i - Oo0Ooo
for oo00O0000o00 in lisp_crypto_keys_by_rloc_decap :
oOO0oo = oo00O0000o00 . split ( ":" )
if ( len ( oOO0oo ) == 1 ) : continue
oOO0oo = oOO0oo [ 0 ] if len ( oOO0oo ) == 2 else ":" . join ( oOO0oo [ 0 : - 1 ] )
if ( oOO0oo == I1iiIiiii1111 ) :
o00OO0o0 = lisp_crypto_keys_by_rloc_decap [ oo00O0000o00 ]
lisp_crypto_keys_by_rloc_decap [ I1iiIiiii1111 ] = o00OO0o0
return ( I1iiIiiii1111 )
if 56 - 56: O0 / OoooooooOO / OoOoOO00
if 19 - 19: o0oOOo0O0Ooo / i11iIiiIii . i1IIi / Oo0Ooo / I1Ii111
return ( None )
if 83 - 83: iII111i % o0oOOo0O0Ooo * OoOoOO00
if 49 - 49: II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
if 56 - 56: I1IiiI * Oo0Ooo + OoO0O00 - oO0o * I1Ii111
if 68 - 68: ooOoO0o * i11iIiiIii * OOooOOo % iII111i
if 10 - 10: Ii1I / Oo0Ooo - i1IIi
if 11 - 11: I11i * iII111i
if 28 - 28: II111iiii + IiII / Oo0Ooo * I1IiiI - OOooOOo
if 2 - 2: oO0o + I11i / I1Ii111 . I11i
if 59 - 59: Ii1I
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
i1111iIiI1 = addr + ":" + str ( port )
if 17 - 17: oO0o / i1IIi
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 94 - 94: Oo0Ooo * O0 - o0oOOo0O0Ooo
if 31 - 31: i11iIiiIii + I11i . I11i / I11i
if 67 - 67: Oo0Ooo + Oo0Ooo . i11iIiiIii / IiII
if 53 - 53: I1ii11iIi11i
if 85 - 85: iIii1I11I1II1 - II111iiii + Ii1I
if 3 - 3: ooOoO0o - I1Ii111
for Iii111I in lisp_nat_state_info . values ( ) :
for oOooo0o in Iii111I :
if ( addr == oOooo0o . address ) : return ( i1111iIiI1 )
if 97 - 97: OOooOOo
if 87 - 87: iII111i
return ( addr )
if 73 - 73: II111iiii
return ( i1111iIiI1 )
if 2 - 2: i1IIi % iII111i . oO0o / II111iiii * I1IiiI
if 17 - 17: O0 + iII111i + oO0o / iIii1I11I1II1 % oO0o
if 81 - 81: iII111i * i11iIiiIii % O0 / iIii1I11I1II1 . OoO0O00
if 24 - 24: I1ii11iIi11i + OoOoOO00 % ooOoO0o % I1IiiI * I1Ii111 - o0oOOo0O0Ooo
if 95 - 95: Oo0Ooo * IiII - I1IiiI
if 37 - 37: Oo0Ooo - oO0o / I1ii11iIi11i . o0oOOo0O0Ooo * Ii1I
if 95 - 95: i11iIiiIii - ooOoO0o / I11i / I1Ii111
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 59 - 59: iII111i
return
if 59 - 59: Oo0Ooo - IiII
if 6 - 6: OOooOOo - I1IiiI . IiII
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 96 - 96: I1Ii111 * II111iiii % i11iIiiIii - oO0o
if 32 - 32: i11iIiiIii * o0oOOo0O0Ooo . OoooooooOO / O0
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if 93 - 93: iIii1I11I1II1 / IiII
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * I1IiiI + Ii1I
if 30 - 30: OoooooooOO / I1IiiI . iIii1I11I1II1 / ooOoO0o
if 20 - 20: OoooooooOO * OOooOOo
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
if 93 - 93: OoooooooOO / I1Ii111
if 91 - 91: I1Ii111
if 18 - 18: ooOoO0o * I11i
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
if 63 - 63: o0oOOo0O0Ooo . iIii1I11I1II1 % IiII * i11iIiiIii
if 70 - 70: iIii1I11I1II1
if 12 - 12: OoOoOO00 / o0oOOo0O0Ooo - I1ii11iIi11i + oO0o + O0
if 9 - 9: I1ii11iIi11i * OoooooooOO . O0 . ooOoO0o * i11iIiiIii / i1IIi
if 38 - 38: OoOoOO00 . OoooooooOO % I1ii11iIi11i . oO0o % oO0o
if 80 - 80: i11iIiiIii / OoOoOO00 . OOooOOo . iIii1I11I1II1
def lisp_is_rloc_probe ( packet , rr ) :
I1iIIIiI = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == 17 )
if ( I1iIIIiI == False ) : return ( [ packet , None , None , None ] )
if 81 - 81: I1ii11iIi11i * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO
if ( rr == 0 ) :
Ooo0O = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( Ooo0O == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
Ooo0O = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( Ooo0O == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
Ooo0O = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( Ooo0O == False ) :
Ooo0O = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( Ooo0O == False ) : return ( [ packet , None , None , None ] )
if 64 - 64: Oo0Ooo . I1ii11iIi11i / ooOoO0o % oO0o . iIii1I11I1II1
if 84 - 84: II111iiii . oO0o * O0 / iII111i + OoooooooOO
if 99 - 99: I1ii11iIi11i . oO0o + Oo0Ooo + I1ii11iIi11i / I1Ii111 . I1ii11iIi11i
if 95 - 95: OoOoOO00 * iIii1I11I1II1 / OoooooooOO % i1IIi
if 91 - 91: OOooOOo - OoOoOO00
if 58 - 58: II111iiii . OOooOOo % II111iiii * oO0o % OoO0O00 % I11i
II1i1iI = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
II1i1iI . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 71 - 71: Ii1I * II111iiii * I1IiiI
if 22 - 22: oO0o
if 96 - 96: ooOoO0o * iII111i . IiII
if 77 - 77: OOooOOo - I11i % o0oOOo0O0Ooo
if ( II1i1iI . is_local ( ) ) : return ( [ None , None , None , None ] )
if 46 - 46: I1IiiI % oO0o . OoooooooOO . IiII / I11i - i1IIi
if 43 - 43: OoOoOO00 - o0oOOo0O0Ooo
if 22 - 22: i1IIi
if 33 - 33: O0
II1i1iI = II1i1iI . print_address_no_iid ( )
IIiII = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
iiI = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 34 - 34: I1Ii111 . IiII % iII111i
iIIIIIi11Ii = bold ( "Receive(pcap)" , False )
Oo0OO0o0oOO0 = bold ( "from " + II1i1iI , False )
OoOoO = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( iIIIIIi11Ii , len ( packet ) , Oo0OO0o0oOO0 , IIiII , OoOoO ) )
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
return ( [ packet , II1i1iI , IIiII , iiI ] )
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
if 25 - 25: iII111i / oO0o
if 61 - 61: OoooooooOO . Ii1I . I11i + oO0o
if 73 - 73: II111iiii % i11iIiiIii * I1ii11iIi11i + O0
if 61 - 61: I1IiiI / OOooOOo
if 67 - 67: OoOoOO00
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 22 - 22: Ii1I * I1ii11iIi11i * o0oOOo0O0Ooo - I1IiiI . i11iIiiIii
IIi1IiIii = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 30 - 30: O0 / oO0o * i11iIiiIii + iIii1I11I1II1 + O0 % I1IiiI
lisp_write_to_dp_socket ( IIi1IiIii )
return
if 95 - 95: ooOoO0o % OOooOOo
if 17 - 17: i1IIi + Ii1I
if 35 - 35: iIii1I11I1II1 - Oo0Ooo - OoooooooOO % I1ii11iIi11i
if 27 - 27: Oo0Ooo * II111iiii - OOooOOo + o0oOOo0O0Ooo
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
if 96 - 96: Ii1I
def lisp_external_data_plane ( ) :
o00OoOO0O0 = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( commands . getoutput ( o00OoOO0O0 ) != "" ) : return ( True )
if 90 - 90: II111iiii
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 93 - 93: i11iIiiIii / Ii1I * Oo0Ooo . iII111i % iII111i / IiII
if 15 - 15: OoOoOO00 % I1Ii111 - iIii1I11I1II1
if 52 - 52: i11iIiiIii * ooOoO0o
if 15 - 15: OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
if 69 - 69: I1IiiI
if 11 - 11: I11i % I1Ii111 + O0 . Ii1I . I1ii11iIi11i % I1Ii111
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
if 86 - 86: ooOoO0o * OoOoOO00 + oO0o / II111iiii % OOooOOo
if 89 - 89: O0 * Ii1I / OoO0O00 / OoOoOO00 % iII111i * iIii1I11I1II1
if 72 - 72: iIii1I11I1II1 / iIii1I11I1II1 * I11i
if 19 - 19: I1ii11iIi11i
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 42 - 42: OoOoOO00 / IiII
o000O0O0 = { "type" : "entire-map-cache" , "entries" : [ ] }
if 74 - 74: ooOoO0o
if ( do_clear == False ) :
ii1Ii1Iii11i1 = o000O0O0 [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , ii1Ii1Iii11i1 )
if 93 - 93: Oo0Ooo % ooOoO0o
if 38 - 38: II111iiii . I1Ii111 . iIii1I11I1II1 / o0oOOo0O0Ooo
lisp_write_to_dp_socket ( o000O0O0 )
return
if 6 - 6: ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if 14 - 14: IiII . i11iIiiIii
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( msg . has_key ( "entries" ) == False ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
for msg in msg [ "entries" ] :
if ( msg . has_key ( "eid-prefix" ) == False ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 53 - 53: I1Ii111 % i11iIiiIii
oOoo0OooOOo00 = msg [ "eid-prefix" ]
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
if ( msg . has_key ( "instance-id" ) == False ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
o0OOoOO = int ( msg [ "instance-id" ] )
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
if 93 - 93: I1Ii111 + OOooOOo % ooOoO0o / I1Ii111 % OOooOOo . IiII
i1OO0o = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OOoOO )
i1OO0o . store_prefix ( oOoo0OooOOo00 )
IIII = lisp_map_cache_lookup ( None , i1OO0o )
if ( IIII == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( oOoo0OooOOo00 ) )
if 37 - 37: iII111i * oO0o / oO0o / Ii1I % I11i
continue
if 12 - 12: i11iIiiIii
if 62 - 62: oO0o + OOooOOo + oO0o + I1IiiI
if ( msg . has_key ( "rlocs" ) == False ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( oOoo0OooOOo00 ) )
if 10 - 10: IiII - Oo0Ooo % ooOoO0o
continue
if 38 - 38: oO0o * o0oOOo0O0Ooo . I11i % II111iiii / I11i % Ii1I
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 19 - 19: II111iiii / i11iIiiIii * II111iiii + OoOoOO00 - OoOoOO00
II1IiI1I = msg [ "rlocs" ]
if 88 - 88: IiII * I1ii11iIi11i + IiII
if 37 - 37: IiII . OoooooooOO - i11iIiiIii * I1ii11iIi11i - OOooOOo
if 74 - 74: Ii1I + i11iIiiIii * iII111i / o0oOOo0O0Ooo . i11iIiiIii
if 99 - 99: OOooOOo - OoooooooOO + OoooooooOO . OOooOOo
for I11iii11I in II1IiI1I :
if ( I11iii11I . has_key ( "rloc" ) == False ) : continue
if 67 - 67: I1ii11iIi11i + iII111i % II111iiii + I1IiiI % I11i
ooOo = I11iii11I [ "rloc" ]
if ( ooOo == "no-address" ) : continue
if 19 - 19: i11iIiiIii * ooOoO0o
OoOOo = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OoOOo . store_address ( ooOo )
if 70 - 70: OoO0O00 % I1ii11iIi11i
IiI1I1iii11 = IIII . get_rloc ( OoOOo )
if ( IiI1I1iii11 == None ) : continue
if 43 - 43: I1Ii111 / iIii1I11I1II1 * Oo0Ooo % O0 * iII111i
if 63 - 63: iII111i - I11i - iIii1I11I1II1 - Ii1I / iII111i % I1Ii111
if 59 - 59: OoooooooOO
if 89 - 89: i1IIi / OoooooooOO . I1IiiI
oOo0o00Oo0 = 0 if I11iii11I . has_key ( "packet-count" ) == False else I11iii11I [ "packet-count" ]
if 4 - 4: Ii1I + I1ii11iIi11i
Ii1OO0Oo00OO0o = 0 if I11iii11I . has_key ( "byte-count" ) == False else I11iii11I [ "byte-count" ]
if 40 - 40: OOooOOo % iII111i
OOOO0O00o = 0 if I11iii11I . has_key ( "seconds-last-packet" ) == False else I11iii11I [ "seconds-last-packet" ]
if 5 - 5: O0 + i11iIiiIii . IiII - OOooOOo
if 51 - 51: OOooOOo . I1IiiI % OoO0O00 . I1IiiI
IiI1I1iii11 . stats . packet_count += oOo0o00Oo0
IiI1I1iii11 . stats . byte_count += Ii1OO0Oo00OO0o
IiI1I1iii11 . stats . last_increment = lisp_get_timestamp ( ) - OOOO0O00o
if 88 - 88: O0 . iIii1I11I1II1 . iIii1I11I1II1 - ooOoO0o * iIii1I11I1II1 . Oo0Ooo
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( oOo0o00Oo0 , Ii1OO0Oo00OO0o ,
OOOO0O00o , oOoo0OooOOo00 , ooOo ) )
if 8 - 8: iII111i
if 78 - 78: i11iIiiIii % oO0o % ooOoO0o - I1Ii111
if 53 - 53: oO0o + i1IIi . i11iIiiIii + OoO0O00 + Oo0Ooo
if 27 - 27: OoooooooOO . I1IiiI + OoooooooOO % II111iiii . II111iiii - oO0o
if 8 - 8: o0oOOo0O0Ooo . i1IIi . Ii1I - OoOoOO00 / iIii1I11I1II1
if ( IIII . group . is_null ( ) and IIII . has_ttl_elapsed ( ) ) :
oOoo0OooOOo00 = green ( IIII . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( oOoo0OooOOo00 ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , IIII . eid , None )
if 11 - 11: oO0o - OOooOOo - I11i * I1IiiI
if 25 - 25: OoOoOO00 - OOooOOo * I11i / iII111i + o0oOOo0O0Ooo - O0
return
if 29 - 29: ooOoO0o
if 60 - 60: ooOoO0o / I1ii11iIi11i * i1IIi - IiII . II111iiii
if 65 - 65: oO0o * IiII
if 97 - 97: IiII % OoO0O00 . OoOoOO00 - Ii1I
if 28 - 28: O0 . I11i . I1IiiI - Ii1I - iII111i - iIii1I11I1II1
if 14 - 14: OOooOOo + ooOoO0o
if 56 - 56: o0oOOo0O0Ooo - OoOoOO00 - Ii1I
if 50 - 50: I1ii11iIi11i
if 24 - 24: ooOoO0o
if 19 - 19: oO0o
if 97 - 97: IiII
if 36 - 36: II111iiii
if 83 - 83: I11i . ooOoO0o
if 57 - 57: IiII
if 34 - 34: I1ii11iIi11i + i11iIiiIii - I1ii11iIi11i / OoOoOO00 + i1IIi . i11iIiiIii
if 48 - 48: I1ii11iIi11i % OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii / OoOoOO00
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
if 79 - 79: I1ii11iIi11i % I11i
if 78 - 78: i11iIiiIii % I1Ii111 + iIii1I11I1II1 + iII111i
if 66 - 66: I1IiiI - o0oOOo0O0Ooo
if 67 - 67: oO0o . iII111i * Ii1I - OOooOOo / oO0o
if 98 - 98: OoOoOO00 * OoO0O00 . Oo0Ooo
if 6 - 6: I11i % iIii1I11I1II1 + I1Ii111
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 48 - 48: II111iiii . OOooOOo . ooOoO0o - iII111i
if 90 - 90: OOooOOo
if 43 - 43: IiII + ooOoO0o
if 4 - 4: i1IIi
if 89 - 89: Oo0Ooo / iIii1I11I1II1 . OoOoOO00
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
IIi1IiIii = "stats%{}" . format ( json . dumps ( msg ) )
IIi1IiIii = lisp_command_ipc ( IIi1IiIii , "lisp-itr" )
lisp_ipc ( IIi1IiIii , lisp_ipc_socket , "lisp-etr" )
return
if 6 - 6: Ii1I / iII111i
if 69 - 69: iIii1I11I1II1 % I1Ii111 % OOooOOo + O0 - OoOoOO00 % oO0o
if 70 - 70: oO0o - I1IiiI + Ii1I
if 54 - 54: OoOoOO00 / ooOoO0o - I1IiiI
if 37 - 37: o0oOOo0O0Ooo
if 57 - 57: iII111i / i1IIi / i1IIi + IiII
if 75 - 75: IiII / O0
if 72 - 72: I11i
IIi1IiIii = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( IIi1IiIii , msg ) )
if 35 - 35: I11i % OoooooooOO / i1IIi * i1IIi / I1IiiI
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 42 - 42: I11i - i1IIi - oO0o / I11i + Ii1I + ooOoO0o
iIIOO0OO = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 67 - 67: OoO0O00 . II111iiii * O0
for iIIiii in iIIOO0OO :
oOo0o00Oo0 = 0 if msg . has_key ( iIIiii ) == False else msg [ iIIiii ] [ "packet-count" ]
if 77 - 77: I1ii11iIi11i + OoooooooOO * OoO0O00 * iIii1I11I1II1 % I1Ii111
lisp_decap_stats [ iIIiii ] . packet_count += oOo0o00Oo0
if 22 - 22: i1IIi
Ii1OO0Oo00OO0o = 0 if msg . has_key ( iIIiii ) == False else msg [ iIIiii ] [ "byte-count" ]
if 61 - 61: IiII
lisp_decap_stats [ iIIiii ] . byte_count += Ii1OO0Oo00OO0o
if 3 - 3: ooOoO0o . Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . I1Ii111
OOOO0O00o = 0 if msg . has_key ( iIIiii ) == False else msg [ iIIiii ] [ "seconds-last-packet" ]
if 20 - 20: iII111i + II111iiii + i11iIiiIii
lisp_decap_stats [ iIIiii ] . last_increment = lisp_get_timestamp ( ) - OOOO0O00o
if 75 - 75: OoooooooOO
return
if 63 - 63: iII111i % oO0o . ooOoO0o * I1Ii111 + o0oOOo0O0Ooo * II111iiii
if 61 - 61: oO0o
if 45 - 45: I11i * OoOoOO00 % Oo0Ooo / iII111i
if 78 - 78: II111iiii
if 38 - 38: I11i - i11iIiiIii
if 38 - 38: I1IiiI * i1IIi / OoO0O00 + iIii1I11I1II1 / I1Ii111 % II111iiii
if 62 - 62: OoOoOO00 * i1IIi + iII111i
if 43 - 43: OOooOOo % i11iIiiIii / I1ii11iIi11i + i1IIi / ooOoO0o
if 74 - 74: Ii1I + iIii1I11I1II1
if 23 - 23: OoO0O00 * i1IIi * oO0o % I1ii11iIi11i
if 92 - 92: iII111i / I1IiiI / i11iIiiIii
if 75 - 75: Oo0Ooo + IiII / I11i % I11i % IiII / I1Ii111
if 95 - 95: OoOoOO00
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
ooo00OOoo00 , II1i1iI = punt_socket . recvfrom ( 4000 )
if 21 - 21: I1IiiI - iII111i * IiII . I11i
oOO0Oo0oO = json . loads ( ooo00OOoo00 )
if ( type ( oOO0Oo0oO ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( II1i1iI ) )
if 18 - 18: OoOoOO00 % IiII - iIii1I11I1II1 / iIii1I11I1II1 % O0 / O0
return
if 28 - 28: OOooOOo
I1IIi1II1Ii = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( I1IIi1II1Ii , II1i1iI , oOO0Oo0oO ) )
if 21 - 21: iII111i * OoooooooOO
if ( oOO0Oo0oO . has_key ( "type" ) == False ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 67 - 67: IiII / oO0o . O0
if 70 - 70: I1ii11iIi11i % O0
if 57 - 57: i1IIi + OoOoOO00
if 8 - 8: Ii1I + I11i * oO0o % I11i
if 17 - 17: o0oOOo0O0Ooo + Oo0Ooo
if ( oOO0Oo0oO [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( oOO0Oo0oO , lisp_send_sockets , lisp_ephem_port )
return
if 38 - 38: oO0o + I1IiiI + OOooOOo
if ( oOO0Oo0oO [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( oOO0Oo0oO , punt_socket )
return
if 82 - 82: iIii1I11I1II1 . OOooOOo
if 7 - 7: i11iIiiIii . I11i
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
if ( oOO0Oo0oO [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if 31 - 31: ooOoO0o
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if ( oOO0Oo0oO [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
if ( oOO0Oo0oO . has_key ( "interface" ) == False ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( II1i1iI ) )
if 97 - 97: O0
return
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % OOooOOo
if 91 - 91: ooOoO0o
O0OoO0o = oOO0Oo0oO [ "interface" ]
if ( O0OoO0o == "" ) :
o0OOoOO = int ( oOO0Oo0oO [ "instance-id" ] )
if ( o0OOoOO == - 1 ) : return
else :
o0OOoOO = lisp_get_interface_instance_id ( O0OoO0o , None )
if 96 - 96: I1IiiI . OOooOOo
if 94 - 94: OoooooooOO + II111iiii % ooOoO0o - II111iiii / O0
if 34 - 34: IiII % oO0o
if 54 - 54: I1IiiI
if 80 - 80: OoOoOO00 . I1IiiI / I1ii11iIi11i . iII111i
I1i1III1i = None
if ( oOO0Oo0oO . has_key ( "source-eid" ) ) :
iiI1i = oOO0Oo0oO [ "source-eid" ]
I1i1III1i = lisp_address ( LISP_AFI_NONE , iiI1i , 0 , o0OOoOO )
if ( I1i1III1i . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( iiI1i ) )
return
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
O000iI1ii1I = None
if ( oOO0Oo0oO . has_key ( "dest-eid" ) ) :
iI11iiiI1 = oOO0Oo0oO [ "dest-eid" ]
O000iI1ii1I = lisp_address ( LISP_AFI_NONE , iI11iiiI1 , 0 , o0OOoOO )
if ( O000iI1ii1I . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( iI11iiiI1 ) )
return
if 99 - 99: Oo0Ooo + ooOoO0o / o0oOOo0O0Ooo . OoO0O00 + OOooOOo
if 95 - 95: I1ii11iIi11i % I1IiiI
if 64 - 64: O0
if 83 - 83: oO0o / i11iIiiIii
if 85 - 85: I11i
if 23 - 23: oO0o % I11i * Oo0Ooo + Oo0Ooo
if 23 - 23: Ii1I % i1IIi - I1Ii111
if 95 - 95: OoOoOO00 - ooOoO0o . i1IIi . OoooooooOO
if ( I1i1III1i ) :
ooo0OO = green ( I1i1III1i . print_address ( ) , False )
I111I = lisp_db_for_lookups . lookup_cache ( I1i1III1i , False )
if ( I111I != None ) :
if 38 - 38: I1IiiI + I1ii11iIi11i - Oo0Ooo . i11iIiiIii - i1IIi
if 11 - 11: IiII / I1IiiI . I1IiiI
if 87 - 87: OoooooooOO * OoO0O00 * iIii1I11I1II1
if 16 - 16: o0oOOo0O0Ooo * I11i + OoooooooOO + O0 / iIii1I11I1II1
if 60 - 60: Ii1I % IiII * OoooooooOO * ooOoO0o * Ii1I
if ( I111I . dynamic_eid_configured ( ) ) :
I111IIiIII = lisp_allow_dynamic_eid ( O0OoO0o , I1i1III1i )
if ( I111IIiIII != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( I111I , I1i1III1i , O0OoO0o , I111IIiIII )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( ooo0OO , O0OoO0o ) )
if 8 - 8: I1Ii111 - o0oOOo0O0Ooo
if 52 - 52: OoOoOO00 % O0 + I1ii11iIi11i . i11iIiiIii
if 59 - 59: Ii1I - I1Ii111 . ooOoO0o - OoOoOO00 + oO0o . OoO0O00
else :
lprint ( "Punt from non-EID source {}" . format ( ooo0OO ) )
if 88 - 88: OOooOOo - ooOoO0o * o0oOOo0O0Ooo . OoooooooOO
if 3 - 3: I1Ii111
if 24 - 24: Ii1I + i11iIiiIii * I1Ii111 - OoOoOO00 / Ii1I - OoOoOO00
if 69 - 69: I11i - I1IiiI . oO0o - OoooooooOO
if 33 - 33: o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 55 - 55: OoooooooOO / IiII + i1IIi
if ( O000iI1ii1I ) :
IIII = lisp_map_cache_lookup ( I1i1III1i , O000iI1ii1I )
if ( IIII == None or IIII . action == LISP_SEND_MAP_REQUEST_ACTION ) :
if 54 - 54: ooOoO0o * Ii1I / Ii1I
if 15 - 15: oO0o * I1Ii111
if 11 - 11: Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: oO0o + OoOoOO00
if ( lisp_rate_limit_map_request ( I1i1III1i , O000iI1ii1I ) ) : return
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
I1i1III1i , O000iI1ii1I , None )
else :
ooo0OO = green ( O000iI1ii1I . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( ooo0OO ) )
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
return
if 75 - 75: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i * oO0o * I11i / OoooooooOO
if 17 - 17: Ii1I % I1ii11iIi11i + I11i
if 80 - 80: i1IIi . OoooooooOO % OoooooooOO . oO0o / OOooOOo
if 85 - 85: OOooOOo
if 80 - 80: ooOoO0o % O0 % I1ii11iIi11i + Oo0Ooo
if 82 - 82: oO0o / iIii1I11I1II1 % ooOoO0o . Ii1I / i1IIi - I1Ii111
if 15 - 15: I11i - OOooOOo . II111iiii . iIii1I11I1II1
def lisp_ipc_map_cache_entry ( mc , jdata ) :
iIIiI11iI1Ii1 = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( iIIiI11iI1Ii1 )
return ( [ True , jdata ] )
if 93 - 93: I11i + o0oOOo0O0Ooo / OOooOOo + Ii1I % Oo0Ooo % I1ii11iIi11i
if 72 - 72: IiII / II111iiii
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
if 21 - 21: I1ii11iIi11i
if 60 - 60: i1IIi / OoO0O00 . Ii1I
if 16 - 16: i11iIiiIii + OoOoOO00 % Oo0Ooo + I1ii11iIi11i * Ii1I / I1Ii111
if 26 - 26: iII111i
if 31 - 31: iII111i
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 45 - 45: OoO0O00
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 + I11i - ooOoO0o + I1IiiI * O0
if 47 - 47: ooOoO0o + iIii1I11I1II1 * OOooOOo . I1IiiI . o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo . OoOoOO00 * OOooOOo
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 86 - 86: IiII * OOooOOo + Ii1I
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 62 - 62: I11i
if 86 - 86: Oo0Ooo % II111iiii + I1Ii111 / I1ii11iIi11i
if 15 - 15: I1IiiI / I1Ii111 % iII111i
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
if 75 - 75: I11i * IiII * ooOoO0o
if 31 - 31: Ii1I
if 72 - 72: OOooOOo * Ii1I % OoO0O00
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
oOoo0OooOOo00 = eid . print_address ( )
if ( db . dynamic_eids . has_key ( oOoo0OooOOo00 ) ) :
db . dynamic_eids [ oOoo0OooOOo00 ] . last_packet = lisp_get_timestamp ( )
return
if 72 - 72: OoOoOO00 + o0oOOo0O0Ooo - i1IIi - OoO0O00 % OoOoOO00
if 42 - 42: oO0o / i1IIi . IiII
if 12 - 12: i11iIiiIii . ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
if 88 - 88: OoooooooOO . I1IiiI
oOiiI1i11I = lisp_dynamic_eid ( )
oOiiI1i11I . dynamic_eid . copy_address ( eid )
oOiiI1i11I . interface = routed_interface
oOiiI1i11I . last_packet = lisp_get_timestamp ( )
oOiiI1i11I . get_timeout ( routed_interface )
db . dynamic_eids [ oOoo0OooOOo00 ] = oOiiI1i11I
if 6 - 6: I1Ii111 - i11iIiiIii - oO0o
iiII = ""
if ( input_interface != routed_interface ) :
iiII = ", routed-interface " + routed_interface
if 18 - 18: I1ii11iIi11i + I1IiiI / iII111i + iIii1I11I1II1
if 1 - 1: I1Ii111 - Oo0Ooo + I11i - iII111i * ooOoO0o % i11iIiiIii
II1i1 = green ( oOoo0OooOOo00 , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( II1i1 , input_interface , iiII , oOiiI1i11I . timeout ) )
if 5 - 5: Oo0Ooo
if 29 - 29: IiII - IiII - OoooooooOO . Ii1I % OoooooooOO - OoOoOO00
if 33 - 33: oO0o * OoO0O00 / i11iIiiIii - I1IiiI * OoO0O00
if 19 - 19: OoooooooOO
if 34 - 34: OoOoOO00 . oO0o
IIi1IiIii = "learn%{}%{}" . format ( oOoo0OooOOo00 , routed_interface )
IIi1IiIii = lisp_command_ipc ( IIi1IiIii , "lisp-itr" )
lisp_ipc ( IIi1IiIii , lisp_ipc_listen_socket , "lisp-etr" )
return
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
if 80 - 80: II111iiii . i11iIiiIii
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 52 - 52: iIii1I11I1II1 + O0
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
if 63 - 63: ooOoO0o
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
if 52 - 52: I1ii11iIi11i % ooOoO0o * Ii1I * IiII + IiII / i11iIiiIii
if ( addr_str . find ( ":" ) != - 1 ) : return
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo % o0oOOo0O0Ooo . Ii1I / OoooooooOO
IiI1 = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 23 - 23: oO0o * I1IiiI - oO0o - ooOoO0o . IiII / i11iIiiIii
for iii11 in lisp_crypto_keys_by_rloc_decap :
if 53 - 53: Ii1I * Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
if 98 - 98: OOooOOo
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
if 29 - 29: OOooOOo % I11i - OOooOOo - OOooOOo * I11i . oO0o
if ( iii11 . find ( addr_str ) == - 1 ) : continue
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
if ( iii11 == addr_str ) : continue
if 62 - 62: I1Ii111 + I1IiiI
if 9 - 9: iIii1I11I1II1 / iIii1I11I1II1
if 24 - 24: OOooOOo . I1IiiI % i11iIiiIii
if 43 - 43: OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i + OoO0O00 . I1Ii111 . iII111i
iIIiI11iI1Ii1 = lisp_crypto_keys_by_rloc_decap [ iii11 ]
if ( iIIiI11iI1Ii1 == IiI1 ) : continue
if 1 - 1: iII111i / OoO0O00 / OoOoOO00 * Oo0Ooo * OoooooooOO
if 59 - 59: iII111i
if 14 - 14: oO0o . IiII + iIii1I11I1II1 - i1IIi
if 46 - 46: i11iIiiIii * II111iiii / i11iIiiIii % i11iIiiIii * II111iiii + i11iIiiIii
OOoO0o = iIIiI11iI1Ii1 [ 1 ]
if ( packet_icv != OOoO0o . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( iii11 , False ) ) )
continue
if 20 - 20: iII111i * I1IiiI * iII111i - o0oOOo0O0Ooo + i1IIi + ooOoO0o
if 49 - 49: II111iiii * I1IiiI / oO0o
lprint ( "Changing decap crypto key to {}" . format ( red ( iii11 , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = iIIiI11iI1Ii1
if 50 - 50: Ii1I + O0 . I1IiiI * Oo0Ooo
return
if 15 - 15: Oo0Ooo
if 53 - 53: OoooooooOO * O0 / iII111i * ooOoO0o % I1Ii111 + OOooOOo
if 95 - 95: I1Ii111 % OoOoOO00 . IiII * iII111i % Ii1I
if 18 - 18: iIii1I11I1II1 / ooOoO0o / I1Ii111 % oO0o * Ii1I
if 14 - 14: oO0o
if 72 - 72: iIii1I11I1II1 / II111iiii * II111iiii + I1IiiI + iIii1I11I1II1 + oO0o
if 46 - 46: I1Ii111
if 23 - 23: Oo0Ooo * IiII - I1Ii111 . OoooooooOO
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 78 - 78: OoOoOO00 - iIii1I11I1II1
if 20 - 20: i1IIi
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
IiIII = dns_name . split ( "." )
IiIII = "." . join ( IiIII [ 1 : : ] )
return ( IiIII == lisp_decent_dns_suffix )
if 100 - 100: O0
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
if 38 - 38: Ii1I
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
def lisp_get_decent_index ( eid ) :
oOoo0OooOOo00 = eid . print_prefix ( )
I1iI1IIi1 = hashlib . sha256 ( oOoo0OooOOo00 ) . hexdigest ( )
iI11I = int ( I1iI1IIi1 , 16 ) % lisp_decent_modulus
return ( iI11I )
if 58 - 58: oO0o - iIii1I11I1II1 * i11iIiiIii / i11iIiiIii % I11i
if 69 - 69: iII111i * i1IIi
if 100 - 100: Oo0Ooo + Oo0Ooo - II111iiii
if 4 - 4: iII111i / OoO0O00 . i11iIiiIii * II111iiii - Ii1I * IiII
if 45 - 45: OoO0O00
if 15 - 15: iII111i * o0oOOo0O0Ooo * Ii1I % IiII
if 31 - 31: ooOoO0o . IiII + I1ii11iIi11i * II111iiii * iII111i + Oo0Ooo
def lisp_get_decent_dns_name ( eid ) :
iI11I = lisp_get_decent_index ( eid )
return ( str ( iI11I ) + "." + lisp_decent_dns_suffix )
if 35 - 35: oO0o + I1ii11iIi11i / o0oOOo0O0Ooo
if 78 - 78: i11iIiiIii
if 21 - 21: iII111i / ooOoO0o - i11iIiiIii % iII111i
if 94 - 94: OoooooooOO / iII111i * ooOoO0o / i1IIi * i11iIiiIii * II111iiii
if 98 - 98: Ii1I * Ii1I / IiII
if 1 - 1: OOooOOo
if 47 - 47: i11iIiiIii - I11i
if 38 - 38: Oo0Ooo % OoooooooOO + iII111i
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
i1OO0o = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
iI11I = lisp_get_decent_index ( i1OO0o )
return ( str ( iI11I ) + "." + lisp_decent_dns_suffix )
if 31 - 31: OoO0O00 + I1Ii111 / iIii1I11I1II1
if 11 - 11: ooOoO0o - OoOoOO00
if 19 - 19: O0 . OoOoOO00 - i1IIi . oO0o
if 96 - 96: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoO0O00 * iIii1I11I1II1 + ooOoO0o - ooOoO0o
if 4 - 4: OoO0O00 - OOooOOo
if 21 - 21: I1Ii111 * i11iIiiIii
if 63 - 63: oO0o + OoOoOO00
if 50 - 50: o0oOOo0O0Ooo / Oo0Ooo * ooOoO0o * Ii1I
if 97 - 97: I1IiiI / oO0o + I1Ii111 + I1Ii111
if 86 - 86: o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * ooOoO0o
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 20 - 20: Ii1I * iII111i / ooOoO0o
I11iiIi1i1 = 28 if packet . inner_version == 4 else 48
IIi1iiIIii1Ii = packet . packet [ I11iiIi1i1 : : ]
i11I1iII = lisp_trace ( )
if ( i11I1iII . decode ( IIi1iiIIii1Ii ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 16 - 16: O0 . OoOoOO00 . iII111i + Oo0Ooo
if 89 - 89: I11i - OoO0O00 . IiII - OoO0O00 - I1ii11iIi11i % I1IiiI
I1II111 = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 94 - 94: OoO0O00
if 78 - 78: iIii1I11I1II1 / I1IiiI + iIii1I11I1II1 . I1ii11iIi11i / I1ii11iIi11i + IiII
if 92 - 92: i11iIiiIii * iII111i
if 9 - 9: O0 * IiII / Ii1I + OoO0O00
if 75 - 75: OOooOOo * OoOoOO00
if 82 - 82: Ii1I
if ( I1II111 != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : I1II111 += ":{}" . format ( packet . encap_port )
if 83 - 83: I1IiiI
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
if 45 - 45: I11i - iIii1I11I1II1
if 20 - 20: OoOoOO00
iIIiI11iI1Ii1 = { }
iIIiI11iI1Ii1 [ "node" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 84 - 84: OoOoOO00
o0O0iiiI111i1 = packet . outer_source
if ( o0O0iiiI111i1 . is_null ( ) ) : o0O0iiiI111i1 = lisp_myrlocs [ 0 ]
iIIiI11iI1Ii1 [ "srloc" ] = o0O0iiiI111i1 . print_address_no_iid ( )
if 42 - 42: O0 . ooOoO0o + OOooOOo . iIii1I11I1II1 * OoO0O00 . iII111i
if 35 - 35: II111iiii + I11i
if 15 - 15: Oo0Ooo . i1IIi - o0oOOo0O0Ooo - oO0o / o0oOOo0O0Ooo
if 97 - 97: oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if ( iIIiI11iI1Ii1 [ "node" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
iIIiI11iI1Ii1 [ "srloc" ] += ":{}" . format ( packet . inner_sport )
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
iIIiI11iI1Ii1 [ "hn" ] = lisp_hostname
iii11 = ed + "-ts"
iIIiI11iI1Ii1 [ iii11 ] = lisp_get_timestamp ( )
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
if 9 - 9: i1IIi % iII111i / Ii1I
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if ( I1II111 == "?" and iIIiI11iI1Ii1 [ "node" ] == "ETR" ) :
I111I = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( I111I != None and len ( I111I . rloc_set ) >= 1 ) :
I1II111 = I111I . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
iIIiI11iI1Ii1 [ "drloc" ] = I1II111
if 92 - 92: OoO0O00 - iII111i
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + oO0o
if ( I1II111 == "?" and reason != None ) :
iIIiI11iI1Ii1 [ "drloc" ] += " ({})" . format ( reason )
if 70 - 70: I1Ii111 % iIii1I11I1II1
if 74 - 74: i1IIi % i11iIiiIii + oO0o
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
if 34 - 34: Oo0Ooo . i1IIi
if 97 - 97: I11i
if ( rloc_entry != None ) :
iIIiI11iI1Ii1 [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
iIIiI11iI1Ii1 [ "hops" ] = rloc_entry . recent_rloc_probe_hops
if 89 - 89: iII111i % OoOoOO00 . Oo0Ooo
if 20 - 20: oO0o % OoOoOO00
if 93 - 93: I1ii11iIi11i - Ii1I % i1IIi / i1IIi
if 82 - 82: OOooOOo
if 27 - 27: I1Ii111 / IiII - i1IIi * Ii1I
if 90 - 90: ooOoO0o
I1i1III1i = packet . inner_source . print_address ( )
O000iI1ii1I = packet . inner_dest . print_address ( )
if ( i11I1iII . packet_json == [ ] ) :
oo00Oo = { }
oo00Oo [ "seid" ] = I1i1III1i
oo00Oo [ "deid" ] = O000iI1ii1I
oo00Oo [ "paths" ] = [ ]
i11I1iII . packet_json . append ( oo00Oo )
if 100 - 100: iII111i * i1IIi . iII111i / O0 / OoO0O00 - oO0o
if 65 - 65: OoOoOO00 + ooOoO0o * OoO0O00 % OoooooooOO + OoooooooOO * OoooooooOO
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
if 55 - 55: Oo0Ooo - OOooOOo - O0
for oo00Oo in i11I1iII . packet_json :
if ( oo00Oo [ "deid" ] != O000iI1ii1I ) : continue
oo00Oo [ "paths" ] . append ( iIIiI11iI1Ii1 )
break
if 40 - 40: OoOoOO00 - OOooOOo
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
if 35 - 35: II111iiii
if 15 - 15: I11i * iIii1I11I1II1 + OOooOOo % IiII . o0oOOo0O0Ooo % Oo0Ooo
if 96 - 96: O0
if 15 - 15: i1IIi . iIii1I11I1II1
if 3 - 3: II111iiii * i11iIiiIii * i1IIi - i1IIi
if 11 - 11: I1IiiI % Ii1I * i11iIiiIii % OOooOOo + II111iiii
o0O0OO = False
if ( len ( i11I1iII . packet_json ) == 1 and iIIiI11iI1Ii1 [ "node" ] == "ETR" and
i11I1iII . myeid ( packet . inner_dest ) ) :
oo00Oo = { }
oo00Oo [ "seid" ] = O000iI1ii1I
oo00Oo [ "deid" ] = I1i1III1i
oo00Oo [ "paths" ] = [ ]
i11I1iII . packet_json . append ( oo00Oo )
o0O0OO = True
if 9 - 9: OoooooooOO - Oo0Ooo - I1ii11iIi11i * o0oOOo0O0Ooo * I11i
if 27 - 27: OoOoOO00 % OoO0O00 * oO0o . II111iiii - i11iIiiIii
if 56 - 56: OOooOOo . IiII - OOooOOo / i11iIiiIii * I1ii11iIi11i
if 66 - 66: oO0o + ooOoO0o
if 1 - 1: ooOoO0o
if 61 - 61: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i + Oo0Ooo
i11I1iII . print_trace ( )
IIi1iiIIii1Ii = i11I1iII . encode ( )
if 75 - 75: Ii1I
if 79 - 79: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo / I11i . I11i / ooOoO0o
if 99 - 99: oO0o + I11i % i1IIi . iII111i
if 58 - 58: Oo0Ooo % i11iIiiIii . Oo0Ooo / Oo0Ooo - I1IiiI . Ii1I
if 65 - 65: OoO0O00
if 16 - 16: IiII % I1IiiI % iIii1I11I1II1 . I1IiiI . I1ii11iIi11i - IiII
if 6 - 6: I1Ii111 + OoO0O00 + O0 * OoOoOO00 . iIii1I11I1II1 . I1Ii111
if 93 - 93: ooOoO0o % iIii1I11I1II1 + I1ii11iIi11i
oOO0ooO = i11I1iII . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "srloc" ]
if ( I1II111 == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( oOO0ooO ) )
i11I1iII . return_to_sender ( lisp_socket , oOO0ooO , IIi1iiIIii1Ii )
return ( False )
if 91 - 91: I11i
if 24 - 24: o0oOOo0O0Ooo
if 78 - 78: OOooOOo + iIii1I11I1II1 - OoO0O00 + ooOoO0o + Oo0Ooo / Oo0Ooo
if 15 - 15: iIii1I11I1II1 . I1Ii111 * OoooooooOO * O0 % OOooOOo
if 53 - 53: Ii1I
if 63 - 63: I11i % OoOoOO00
IiI = i11I1iII . packet_length ( )
if 46 - 46: iIii1I11I1II1 . II111iiii / OoooooooOO - ooOoO0o * iII111i
if 52 - 52: I11i + iII111i
if 9 - 9: OoOoOO00 % II111iiii . I11i * Oo0Ooo
if 53 - 53: II111iiii / i1IIi + OoooooooOO * O0
if 62 - 62: IiII . O0
if 87 - 87: I1ii11iIi11i / oO0o / IiII . OOooOOo
O0oOoOOOOo = packet . packet [ 0 : I11iiIi1i1 ]
OoOoO = struct . pack ( "HH" , socket . htons ( IiI ) , 0 )
O0oOoOOOOo = O0oOoOOOOo [ 0 : I11iiIi1i1 - 4 ] + OoOoO
if ( packet . inner_version == 6 and iIIiI11iI1Ii1 [ "node" ] == "ETR" and
len ( i11I1iII . packet_json ) == 2 ) :
I1iIIIiI = O0oOoOOOOo [ I11iiIi1i1 - 8 : : ] + IIi1iiIIii1Ii
I1iIIIiI = lisp_udp_checksum ( I1i1III1i , O000iI1ii1I , I1iIIIiI )
O0oOoOOOOo = O0oOoOOOOo [ 0 : I11iiIi1i1 - 8 ] + I1iIIIiI [ 0 : 8 ]
if 66 - 66: OoOoOO00 . Ii1I / i11iIiiIii / ooOoO0o
if 76 - 76: OoO0O00 % OoO0O00 / I1ii11iIi11i * ooOoO0o * o0oOOo0O0Ooo - I1Ii111
if 53 - 53: OoO0O00 % Oo0Ooo . i1IIi
if 34 - 34: Ii1I - o0oOOo0O0Ooo * i1IIi
if 7 - 7: OoO0O00 * I1ii11iIi11i / I1Ii111
if 98 - 98: II111iiii % I1ii11iIi11i
if ( o0O0OO ) :
if ( packet . inner_version == 4 ) :
O0oOoOOOOo = O0oOoOOOOo [ 0 : 12 ] + O0oOoOOOOo [ 16 : 20 ] + O0oOoOOOOo [ 12 : 16 ] + O0oOoOOOOo [ 22 : 24 ] + O0oOoOOOOo [ 20 : 22 ] + O0oOoOOOOo [ 24 : : ]
if 48 - 48: iII111i % oO0o + oO0o - Oo0Ooo . OOooOOo
else :
O0oOoOOOOo = O0oOoOOOOo [ 0 : 8 ] + O0oOoOOOOo [ 24 : 40 ] + O0oOoOOOOo [ 8 : 24 ] + O0oOoOOOOo [ 42 : 44 ] + O0oOoOOOOo [ 40 : 42 ] + O0oOoOOOOo [ 44 : : ]
if 38 - 38: iII111i
if 66 - 66: iII111i + Oo0Ooo + i1IIi * Oo0Ooo
oOo0OOOOOO = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = oOo0OOOOOO
if 18 - 18: O0 - IiII
if 5 - 5: I1ii11iIi11i * iII111i + II111iiii * Oo0Ooo * O0 - I1IiiI
if 71 - 71: i11iIiiIii % I1IiiI + I1ii11iIi11i + II111iiii + OoooooooOO + oO0o
if 12 - 12: I1IiiI + I1Ii111
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
I11iiIi1i1 = 2 if packet . inner_version == 4 else 4
Oo0OoOOOO0 = 20 + IiI if packet . inner_version == 4 else IiI
Ii11i1Iiii11 = struct . pack ( "H" , socket . htons ( Oo0OoOOOO0 ) )
O0oOoOOOOo = O0oOoOOOOo [ 0 : I11iiIi1i1 ] + Ii11i1Iiii11 + O0oOoOOOOo [ I11iiIi1i1 + 2 : : ]
if 24 - 24: IiII % Ii1I / ooOoO0o
if 52 - 52: iII111i % iIii1I11I1II1 - Oo0Ooo - iIii1I11I1II1 * I1ii11iIi11i - OoO0O00
if 26 - 26: i11iIiiIii % I11i % o0oOOo0O0Ooo % OoOoOO00 / iII111i - OOooOOo
if 17 - 17: i1IIi - Ii1I . ooOoO0o % I1Ii111 . OoooooooOO / oO0o
if ( packet . inner_version == 4 ) :
Oo0ooooO0o00 = struct . pack ( "H" , 0 )
O0oOoOOOOo = O0oOoOOOOo [ 0 : 10 ] + Oo0ooooO0o00 + O0oOoOOOOo [ 12 : : ]
Ii11i1Iiii11 = lisp_ip_checksum ( O0oOoOOOOo [ 0 : 20 ] )
O0oOoOOOOo = Ii11i1Iiii11 + O0oOoOOOOo [ 20 : : ]
if 91 - 91: ooOoO0o % I1ii11iIi11i
if 60 - 60: O0 * Oo0Ooo * IiII % OoOoOO00 . OoOoOO00
if 4 - 4: I1Ii111 % I1Ii111 * O0
if 54 - 54: I1ii11iIi11i - IiII . OoO0O00 + I1ii11iIi11i / I1IiiI
if 91 - 91: OOooOOo % Oo0Ooo
packet . packet = O0oOoOOOOo + IIi1iiIIii1Ii
return ( True )
if 44 - 44: iIii1I11I1II1 . OOooOOo
if 57 - 57: II111iiii + I1Ii111
if 42 - 42: OoOoOO00 % O0
if 70 - 70: iIii1I11I1II1 * Oo0Ooo - I1IiiI / OoO0O00 + OoOoOO00
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
if 26 - 26: Ii1I * I11i / I11i
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
def lisp_allow_gleaning ( eid , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False )
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
for iIIiI11iI1Ii1 in lisp_glean_mappings :
if ( iIIiI11iI1Ii1 . has_key ( "instance-id" ) ) :
o0OOoOO = eid . instance_id
OO000Oo , O00O00o0O0O = iIIiI11iI1Ii1 [ "instance-id" ]
if ( o0OOoOO < OO000Oo or o0OOoOO > O00O00o0O0O ) : continue
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if ( iIIiI11iI1Ii1 . has_key ( "eid-prefix" ) ) :
ooo0OO = copy . deepcopy ( iIIiI11iI1Ii1 [ "eid-prefix" ] )
ooo0OO . instance_id = eid . instance_id
if ( eid . is_more_specific ( ooo0OO ) == False ) : continue
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if ( iIIiI11iI1Ii1 . has_key ( "rloc-prefix" ) ) :
if ( rloc != None and rloc . is_more_specific ( iIIiI11iI1Ii1 [ "rloc-prefix" ] )
== False ) : continue
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
return ( True , iIIiI11iI1Ii1 [ "rloc-probe" ] )
if 16 - 16: I11i
return ( False , False )
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
if 61 - 61: O0 % iII111i
if 41 - 41: I1Ii111 * OoooooooOO
if 76 - 76: OoooooooOO * II111iiii . II111iiii / o0oOOo0O0Ooo - iII111i
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
def lisp_glean_map_cache ( eid , rloc , encap_port ) :
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
if 57 - 57: i1IIi
if 41 - 41: I11i / Ii1I
IIII = lisp_map_cache . lookup_cache ( eid , True )
if ( IIII ) :
IIII . last_refresh_time = lisp_get_timestamp ( )
if 1 - 1: II111iiii / iII111i
oO0OOoOOo0OO0oOOo = IIII . rloc_set [ 0 ]
if ( oO0OOoOOo0OO0oOOo . rloc . is_exact_match ( rloc ) and
oO0OOoOOo0OO0oOOo . translated_port == encap_port ) : return
if 35 - 35: I1ii11iIi11i + OoOoOO00 / OoOoOO00 . oO0o
ooo0OO = green ( eid . print_address ( ) , False )
iIIIIIi11Ii = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Gleaned EID {} RLOC changed to {}" . format ( ooo0OO , iIIIIIi11Ii ) )
oO0OOoOOo0OO0oOOo . delete_from_rloc_probe_list ( IIII . eid , IIII . group )
else :
IIII = lisp_mapping ( "" , "" , [ ] )
IIII . eid . copy_address ( eid )
IIII . mapping_source . copy_address ( rloc )
IIII . map_cache_ttl = LISP_GLEAN_TTL
IIII . gleaned = True
ooo0OO = green ( eid . print_address ( ) , False )
iIIIIIi11Ii = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( ooo0OO , iIIIIIi11Ii ) )
IIII . add_cache ( )
if 71 - 71: IiII % I1IiiI + OoOoOO00 . II111iiii / I11i
if 46 - 46: OoOoOO00
if 56 - 56: iIii1I11I1II1 - iIii1I11I1II1
if 46 - 46: o0oOOo0O0Ooo
if 67 - 67: OOooOOo - i11iIiiIii / oO0o * i11iIiiIii
if 88 - 88: Ii1I - OoO0O00 * OoooooooOO - I1IiiI * I1ii11iIi11i
IiI1I1iii11 = lisp_rloc ( )
IiI1I1iii11 . store_translated_rloc ( rloc , encap_port )
IiI1I1iii11 . add_to_rloc_probe_list ( IIII . eid , IIII . group )
IiI1I1iii11 . priority = 253
IiI1I1iii11 . mpriority = 255
oooo0O = [ IiI1I1iii11 ]
IIII . rloc_set = oooo0O
IIII . build_best_rloc_set ( )
if 52 - 52: oO0o % iII111i - I1IiiI - o0oOOo0O0Ooo
if 66 - 66: o0oOOo0O0Ooo - Oo0Ooo - OoooooooOO * o0oOOo0O0Ooo + I1Ii111
if 82 - 82: I11i * i1IIi / Ii1I + O0
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
test_redundant_router.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.lib.base import (Account,
Router,
NetworkOffering,
Network,
VirtualMachine,
ServiceOffering,
Host)
from marvin.lib.utils import cleanup_resources
from marvin.lib.common import (get_domain,
get_template,
get_zone,
get_process_status)
import time
import multiprocessing
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
class TestCreateRvRNetworkOffering(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkOffering,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetworkOffering(self):
"""Test create RvR supported network offering
"""
# Steps to validate
# 1. create a network offering
# - all services by VirtualRouter
# - enable RedundantRouter servicecapability
# 2. enable the network offering
# Validate the following
# 1. Redundant Router offering should be created successfully and
# listed in listNetworkOfferings response
# assert if RvR capability is enabled
self.debug("Creating network offering with redundant VR capability")
try:
network_offering = NetworkOffering.create(
self.apiclient,
self.testdata["nw_off_isolated_RVR"],
conservemode=True
)
except Exception as e:
self.fail("Create network offering failed! - %s" % e)
self.debug("Enabling network offering - %s" % network_offering.name)
# Enable Network offering
network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(network_offering)
self.debug("Checking if the network offering created successfully?")
network_offs = NetworkOffering.list(
self.apiclient,
id=network_offering.id,
listall=True
)
self.assertEqual(
isinstance(network_offs, list),
True,
"List network offering should not return empty response"
)
self.assertEqual(
len(network_offs),
1,
"List network off should have newly created network off"
)
for service in network_offs[0].service:
if service.name == 'SourceNat':
self.debug("Verifying SourceNat capabilites")
for capability in service.capability:
if capability.name == 'RedundantRouter':
self.assertTrue(capability.value == 'true')
self.debug("RedundantRouter is enabled")
return
class TestCreateRvRNetwork(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestCreateRvRNetwork, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetwork(self):
"""Test create network with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (PRIMARY or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Redundant states: %s, %s" % (
primary_router.redundantstate,
backup_router.redundantstate
))
self.assertEqual(
primary_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(PRIMARY & BACKUP)"
)
self.assertEqual(
primary_router.redundantstate,
"PRIMARY",
"Redundant state of router should be PRIMARY"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
primary_router.guestipaddress,
backup_router.guestipaddress,
"Both (PRIMARY & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
primary_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (PRIMARY & BACKUP) routers should not have same guestMAC"
)
return
class TestCreateRvRNetworkNonDefaultGuestCidr(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkNonDefaultGuestCidr,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns"])
def test_createRvRNetwork(self):
"""Test create network with non-default guest cidr with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# - gw = 192.168.2.1 and cidr = 192.168.2.0/23
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (PRIMARY or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
netmask='255.255.254.0',
gateway='192.168.2.1'
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.assertEqual(
nw_response.gateway,
'192.168.2.1',
"The gateway should be 192.168.2.1"
)
self.assertEqual(
nw_response.cidr,
'192.168.2.0/23',
"Guest cidr should be 192.168.2.0/23 but is %s" % nw_response.cidr
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.assertEqual(
primary_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(PRIMARY & BACKUP)"
)
self.assertEqual(
primary_router.redundantstate,
"PRIMARY",
"Redundant state of router should be PRIMARY"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
primary_router.guestipaddress,
backup_router.guestipaddress,
"Both (PRIMARY & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
primary_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (PRIMARY & BACKUP) routers should not have same guestMAC"
)
return
class TestRVRInternals(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRVRInternals, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
# @attr(tags=["advanced", "advancedns", "ssh"])
@attr(tags=["TODO"])
def test_redundantVR_internals(self):
"""Test redundant router internals
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# 2. listRouters in above network
# 3. deployVM in above user account in the created network
# 4. login to both Redundant Routers
# 5. login to user VM
# 6. delete user account
# Validate the following:
# 1. listNetworks lists network in Allocated state
# 2. listRouters lists no routers created yet
# 3. listRouters returns Primary and Backup routers
# 4. ssh in to both routers and verify:
# - PRIMARY router has eth2 with public Ip address
# - BACKUP router has only guest eth0 and link local eth1
# - Broadcast on PRIMARY eth2 is non-zero (0.0.0.0)
# - execute checkrouter.sh in router home and check if it is status
# "PRIMARY|BACKUP" as returned by the listRouters API
# 5. DNS of the user VM is set to RedundantRouter Gateway
# (/etc/resolv.conf)
# Check that the default gateway for the guest is the rvr gateway
# and not the guestIp of either of the RvRs
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Fetching the host details for double hop into router")
hosts = Host.list(
self.apiclient,
id=primary_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
primary_host = hosts[0]
self.debug("Host for primary router: %s" % primary_host.name)
self.debug("Host for primary router: %s" % primary_host.ipaddress)
hosts = Host.list(
self.apiclient,
id=backup_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
backup_host = hosts[0]
self.debug("Host for backup router: %s" % backup_host.name)
self.debug("Host for backup router: %s" % backup_host.ipaddress)
self.debug(primary_router.linklocalip)
# Check eth2 port for primary router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
primary_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
primary_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
primary_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.debug("Router's public Ip: %s" % primary_router.publicip)
self.assertEqual(
res.count("state UP"),
1,
"PRIMARY router's public interface should be UP"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
# Check eth2 port for backup router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
backup_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
backup_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
backup_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.assertEqual(
res.count("state DOWN"),
1,
"BACKUP router's public interface should be DOWN"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should not return empty response"
)
vm = vms[0]
self.assertNotEqual(
vm.nic[0].gateway,
primary_router.publicip,
"The gateway of user VM should be same as primary router"
)
self.assertNotEqual(
vm.nic[0].gateway,
backup_router.publicip,
"The gateway of user VM should be same as backup router"
)
return
class TestRvRRedundancy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRvRRedundancy, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls.network_offering_for_update=NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering_for_update)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls.network_offering_for_update.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.insert(0, self.account)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_01_stopPrimaryRvR(self):
"""Test stop primary RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stopRouter that is Primary. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old PRIMARY
# router in redundantstate=UNKNOWN, and the old BACKUP router as
# new PRIMARY
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the PRIMARY router")
try:
Router.stop(self.apiclient, id=primary_router.id)
except Exception as e:
self.fail("Failed to stop primary router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the primary router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.debug("Starting the old PRIMARY router")
try:
Router.start(self.apiclient, id=primary_router.id)
self.debug("old PRIMARY router started")
except Exception as e:
self.fail("Failed to start primary router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_02_stopBackupRvR(self):
"""Test stop backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stopRouter that is BACKUP. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old PRIMARY
# router in redundantstate=UNKNOWN
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the BACKUP router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop backup router: %s" % e)
# wait for VR update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the backup router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.debug("Starting the old BACKUP router")
try:
Router.start(self.apiclient, id=backup_router.id)
self.debug("old BACKUP router started")
except Exception as e:
self.fail("Failed to stop primary router: %s" % e)
# wait for VR to start and update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
backup_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_03_rebootPrimaryRvR(self):
"""Test reboot primary RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. reboot router that is PRIMARY. Router reboots state
# successfully
# 6. lists old PRIMARY router in redundantstate=BACKUP and the old
# BACKUP router as new PRIMARY + public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the primary router")
try:
Router.reboot(self.apiclient, id=primary_router.id)
except Exception as e:
self.fail("Failed to reboot PRIMARY router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_04_rebootBackupRvR(self):
"""Test reboot backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. reboot router that is BACKUP. Router reboots state
# successfully
# 6. lists old BACKUP router in redundantstate=BACKUP, and the old
# PRIMARY router is still PRIMARY+ public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the backup router")
try:
Router.reboot(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to reboot BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the Primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_05_stopBackupRvR_startInstance(self):
"""Test stop backup RVR and start instance
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stop router that is BACKUP.
# 6. listRouters in the account and in the network
# 7. deployVM in the user account in the created network
# 8. listRouters in the account and in the network
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
backup_router = routers[1]
else:
backup_router = routers[0]
self.debug("Stopping the backup router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate,
'UNKNOWN',
"Redundant state of the backup router\
should be UNKNOWN but is %s" %
routers[0].redundantstate)
# Spawn an instance in that network
vm_2 = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
vms = VirtualMachine.list(
self.apiclient,
id=vm_2.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
return
def updateNetwork(self, conn):
try:
self.network.update(
self.api_client,
networkofferingid=self.network_offering_for_update.id,
updateinsequence=True,
forced=True,
changecidr=False
)
except Exception as e:
conn.send("Failed to update network: %s due to %s"%(self.network.name, e))
conn.send("update Network Complete")
return
def get_primary_and_backupRouter(self):
retry = 4
primary_router = backup_router=None
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
if len(routers) < 2:
continue
if not (routers[0].redundantstate == 'PRIMARY' or routers[1].redundantstate == 'PRIMARY'):
continue;
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
break
else:
primary_router = routers[1]
backup_router = routers[0]
break
self.info("primary_router: %s, backup_router: %s" % (primary_router, backup_router))
return primary_router, backup_router
def chek_for_new_backupRouter(self,old_backup_router):
primary_router, backup_router = self.get_primary_and_backupRouter()
retry = 4
self.info("Checking if new router is getting created.")
self.info("old_backup_router:"+old_backup_router.name+" new_backup_router:"+backup_router.name)
while old_backup_router.name == backup_router.name:
self.debug("waiting for new router old router:"+backup_router.name)
retry = retry-1
if retry == 0:
break;
time.sleep(self.testdata["sleep"])
primary_router, backup_router = self.get_primary_and_backupRouter()
if retry == 0:
self.fail("New router creation taking too long, timed out")
def wait_until_router_stabilises(self):
retry=4
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
self.info("waiting until state of the routers is stable")
if routers[0].redundantstate != 'UNKNOWN' and routers[1].redundantstate != 'UNKNOWN':
return
elif retry==0:
self.fail("timedout while waiting for routers to stabilise")
return
time.sleep(self.testdata["sleep"])
@attr(tags=["bharat"])
def test_06_updateVRs_in_sequence(self):
"""Test update network and check if VRs are updated in sequence
"""
# Steps to validate
# update network to a new offering
# check if the primary router is running while backup is starting.
# check if the backup is running while primary is starting.
# check if both the routers are running after the update is complete.
#clean up the network to make sure it is in proper state.
self.network.restart(self.apiclient,cleanup=True)
time.sleep(self.testdata["sleep"])
self.wait_until_router_stabilises()
old_primary_router, old_backup_router = self.get_primary_and_backupRouter()
self.info("old_primary_router:"+old_primary_router.name+" old_backup_router"+old_backup_router.name)
#chek if the network is in correct state
self.assertEqual(old_primary_router.state, "Running", "The primary router is not running, network is not in a correct state to start the test")
self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test")
worker, monitor = multiprocessing.Pipe()
worker_process = multiprocessing.Process(target=self.updateNetwork, args=(worker,))
worker_process.start()
if not worker_process.is_alive():
message = monitor.recv()
if "Complete" not in message:
self.fail(message)
self.info("Network update Started, the old backup router will get destroyed and a new router will be created")
self.chek_for_new_backupRouter(old_backup_router)
primary_router, new_backup_router=self.get_primary_and_backupRouter()
#the state of the primary router should be running. while backup is being updated
self.assertEqual(primary_router.state, "Running", "State of the primary router is not running")
self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate)
self.info("Old backup router:"+old_backup_router.name+" is destroyed and new router:"+new_backup_router.name+" got created")
#wait for the new backup to become primary.
retry = 4
while new_backup_router.name != primary_router.name:
retry = retry-1
if retry == 0:
break
time.sleep(self.testdata["sleep"])
self.info("wating for backup router to become primary router name:"+new_backup_router.name)
primary_router, backup_router = self.get_primary_and_backupRouter()
if retry == 0:
self.fail("timed out while waiting for new backup router to change state to PRIMARY.")
#new backup router has become primary.
self.info("newly created router:"+new_backup_router.name+" has changed state to Primary")
self.info("old primary router:"+old_primary_router.name+"is destroyed")
#old primary will get destroyed and a new backup will be created.
#wait until new backup changes state from unknown to backup
primary_router, backup_router = self.get_primary_and_backupRouter()
retry = 4
while backup_router.redundantstate != 'BACKUP':
retry = retry-1
self.info("waiting for router:"+backup_router.name+" to change state to Backup")
if retry == 0:
break
time.sleep(self.testdata["sleep"])
primary_router, backup_router = self.get_primary_and_backupRouter()
self.assertEqual(primary_router.state, "Running", "State of the primary router is not running")
self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate)
if retry == 0:
self.fail("timed out while waiting for new backup rotuer to change state to PRIMARY.")
#the network update is complete.finally both the router should be running.
new_primary_router, new_backup_router=self.get_primary_and_backupRouter()
self.assertEqual(new_primary_router.state, "Running", "State of the primary router:"+new_primary_router.name+" is not running")
self.assertEqual(new_backup_router.state, "Running", "State of the backup router:"+new_backup_router.name+" is not running")
worker_process.join()
|
test_vyos_rebooting_create_vm.py | '''
Test for tracking http://dev.zstack.io/browse/ZSTAC-5595
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.vm_operations as vm_ops
import test_stub
import threading
vm = None
vm2 = None
def async_exec_reboot_vr(uuid):
vm_ops.reboot_vm(uuid)
def test():
global vm, vm2
test_stub.skip_if_vr_not_vyos("vr")
test_lib.clean_up_all_vr()
vm = test_stub.create_basic_vm()
vm.check()
vr_vm = test_lib.lib_find_vr_by_vm(vm.vm)[0]
vm.destroy()
t = threading.Thread(target=async_exec_reboot_vr, args=(vr_vm.uuid,))
t.start()
vm2 = test_stub.create_basic_vm(wait_vr_running=False)
vm2.check()
vm2.destroy()
test_util.test_pass('Create VM when vyos is rebooting Test Success')
#Will be called what ever test result is
def env_recover():
pass
#Will be called only if exception happens in test().
def error_cleanup():
global vm,vm2
if vm:
try:
vm2.destroy()
vm.destroy()
except:
pass
|
example4-quees.py | import threading
import time
import queue
from datetime import datetime
# vytvoření fronty
q = queue.Queue()
# simulace konzumenta
def consumer():
while True:
job = q.get()
print(f'Starting consuming {job} [{datetime.utcnow().strftime("%H:%M:%S.%f")[:-3]}]')
time.sleep(0.4)
print(f'Consumed {job} [{datetime.utcnow().strftime("%H:%M:%S.%f")[:-3]}]')
q.task_done()
# spuštění konzumenta
threading.Thread(target=consumer, daemon=True, name="první").start()
# vytvoření úloh v producentovi
for job in range(10):
print(f'Producing {job}')
q.put(job)
# čekání na zpracování všech zpráv ve frontě
q.join()
print('Done')
# from pprint import pprint
# pprint(numbers)
# (jverner@tacticus) - (~/GIT/EXPERIMENTS/threading-root-cz) $ python3.9eve example4-quees.py
# Producing 0
# Producing 1
# Starting consuming 0 [08:52:38.770]
# Producing 2
# Producing 3
# Producing 4
# Producing 5
# Producing 6
# Producing 7
# Producing 8
# Producing 9
# Consumed 0 [08:52:39.171]
# Starting consuming 1 [08:52:39.171]
# Consumed 1 [08:52:39.572]
# Starting consuming 2 [08:52:39.572]
# Consumed 2 [08:52:39.972]
# Starting consuming 3 [08:52:39.972]
# Consumed 3 [08:52:40.373]
# Starting consuming 4 [08:52:40.373]
# Consumed 4 [08:52:40.774]
# Starting consuming 5 [08:52:40.774]
# Consumed 5 [08:52:41.174]
# Starting consuming 6 [08:52:41.175]
# Consumed 6 [08:52:41.575]
# Starting consuming 7 [08:52:41.575]
# Consumed 7 [08:52:41.976]
# Starting consuming 8 [08:52:41.976]
# Consumed 8 [08:52:42.376]
# Starting consuming 9 [08:52:42.377]
# Consumed 9 [08:52:42.777]
# Done
|
periodic_auto_refresh.py | # Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""
The code in this file provides a drop-in replacement for spanner_v1.Transaction,
but one that auto-refreshes every 8.5 seconds, to deal with Cloud Spanner's server's
max idle time of 10 seconds, per:
https://cloud.google.com/spanner/docs/reference/rest/v1/TransactionOptions#idle-transactions
It handles concurrency concerns by using an event loop, a queue and callbacks. If the queue
is empty for 8.5 seconds, it'll ping Cloud Spanner by sending the recommended:
execute_sql('SELECT 1')
and then reading the result back.
"""
import queue
import threading
import time
class PeriodicAutoRefresher:
def __init__(self, period_secs=10, ping_fn=None):
self.__period_secs = period_secs
self.__Q = queue.Queue()
self.__ping_fn = ping_fn
self.__start_time = time.time()
pth = threading.Thread(target=self.__event_loop, name='period-auto-refresh')
pth.start()
self.__pth = pth
def __event_loop(self):
while True:
try:
head = self.__Q.get(block=True, timeout=self.__period_secs)
if not head:
return
callback, fn, args, kwargs = head
res, exc = None, None
try:
res = fn(*args, **kwargs)
except Exception as e:
exc = e
finally:
callback(res, exc)
except queue.Empty:
self.__ping_fn()
def stop(self):
# Block until put returns.
self.__Q.put(None)
self.__pth.join()
def run_op(self, callback, fn, *args, **kwargs):
self.__Q.put_nowait((callback, fn, args, kwargs))
class PeriodicAutoRefreshingTransaction:
"""
PeriodicAutoRefreshingTransaction is the drop-in replacement for spanner_v1.Transaction
but with a max-idle duration of 8.5 seconds, since the last use time of the underlying
Transaction, else we'll perform a ping to Cloud Spanner with 'SELECT 1'.
It becomes active after .begin() has been invoked.
"""
def __init__(self, txn):
self.__txn = txn
def begin(self):
res = self.__txn.begin()
self.__par = PeriodicAutoRefresher(period_secs=8.5, ping_fn=self.__ping)
return res
def __ping(self):
if self.__txn.committed or self.__txn._rolled_back:
print('Already committed or rolledback so cannot ping Cloud Spanner')
return
print('Pinging Cloud Spanner at %s' % time.time())
res = self.__txn.execute_sql('SELECT 1')
if res:
for it in res:
_ = it
def execute_sql(self, *args, **kwargs):
return self.__on_event_queue(self.__txn.execute_sql, *args, **kwargs)
def execute_update(self, *args, **kwargs):
return self.__on_event_queue(self.__txn.execute_update, *args, **kwargs)
def commit(self):
res = self.__on_event_queue(self.__txn.commit)
self.stop()
return res
def rollback(self):
res = self.__on_event_queue(self.__txn.rollback)
self.stop()
return res
@property
def committed(self):
# For now it is alright to access Transaction._rolled_back
# even though it is unexported. We've filed a follow-up issue:
# https://github.com/googleapis/python-spanner/issues/13
return self.__txn and self.__txn.committed
@property
def _rolled_back(self):
# For now it is alright to access Transaction._rolled_back
# even though it is unexported. We've filed a follow-up issue:
# https://github.com/googleapis/python-spanner/issues/13
return self.__txn and self.__txn._rolled_back
def stop(self):
if self.__par:
self.__par.stop()
self.__par = None
def __on_event_queue(self, fn, *args, **kwargs):
ready = threading.Event()
res_exc = {}
# Using a lambda here because a defined closure would have scope/visibility
# problems trying to set res_exc, even if we used 'global res_exc'. A lambda solves
# the issue due to different scoping.
# We have to propagate the underlying results and exceptions from
# the asynchronously running callback, converting it to a synchronous call.
callback = lambda in_res, in_exc: (res_exc.setdefault('res', in_res), res_exc.setdefault('exc', in_exc), ready.set()) # noqa
self.__par.run_op(callback, fn, *args, **kwargs)
ready.wait()
res, exc = res_exc['res'], res_exc['exc']
if exc:
raise exc
return res
|
supervisor.py | from .execute import *
from .constants import REMOTEBOT_PORT
from multiprocessing import Process
import bz2
import asyncio
import marshal
current_process = None
def __dummy__():
print("Dummy code, should not see")
pass
def run_routine(payload):
try:
echo.__ip__ = payload["monitor_ip"]
echo.__port__ = payload["monitor_port"]
__dummy__.__code__ = payload["code"]
__dummy__()
except Exception as e:
echo(e)
async def handle_routine(reader, writer):
global current_process
global __dummy__
data = await reader.read()
payload = marshal.loads(bz2.decompress(data))
# __dummy__.__code__ = marshal.loads(bz2.decompress(data))
addr = writer.get_extra_info('peername')
print("Received routine from", addr)
if not payload["monitor_ip"]:
payload["monitor_ip"] = addr[0]
if current_process and current_process.is_alive():
print("Terminating previous process")
current_process.terminate()
run_motor(1, 0, "stop")
run_motor(2, 0, "stop")
print("Starting new process")
current_process = Process(target=run_routine, args=(payload,))
current_process.start()
def supervisor(bind="0.0.0.0", port=REMOTEBOT_PORT):
try:
loop
except NameError:
loop = asyncio.get_event_loop()
else:
if loop and loop.is_closed():
loop = asyncio.new_event_loop()
coro = asyncio.start_server(handle_routine, bind, port, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == "__main__":
supervisor()
|
jsonrpc.py | """
Copyright (c) 2016 Sourcegraph
Copyright (c) 2019 Seven Bridges. See LICENSE
This code is assembled and reorganized
Retrieved from: https://github.com/sourcegraph/python-langserver/blob/master/langserver/jsonrpc.py
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import logging
import queue
import threading
from collections import deque
logger = logging.getLogger(__name__)
class JSONRPC2ProtocolError(Exception):
pass
class ReadWriter:
def __init__(self, reader, writer):
self.reader = reader
self.writer = writer
def readline(self, *args):
return self.reader.readline(*args).decode("utf-8")
def read(self, *args):
return self.reader.read(*args).decode("utf-8")
def write(self, out):
self.writer.write(out.encode("utf-8"))
self.writer.flush()
class TCPReadWriter(ReadWriter):
def readline(self, *args):
data = self.reader.readline(*args)
return data.decode("utf-8")
def read(self, *args):
return self.reader.read(*args).decode("utf-8")
def write(self, out):
self.writer.write(out.encode())
self.writer.flush()
class JSONRPC2Connection:
def __init__(self, conn=None):
self.conn = conn
self._msg_buffer = deque()
self._next_id = 1
def _read_header_content_length(self, line):
if len(line) < 2 or line[-2:] != "\r\n":
raise JSONRPC2ProtocolError("Line endings must be \\r\\n")
if line.startswith("Content-Length: "):
_, value = line.split("Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise JSONRPC2ProtocolError(
"Invalid Content-Length header: {}".format(value))
def _receive(self):
line = self.conn.readline()
if line == "":
raise EOFError()
length = self._read_header_content_length(line)
# Keep reading headers until we find the sentinel line for the JSON
# request.
while line != "\r\n":
line = self.conn.readline()
body = self.conn.read(length)
logger.debug("RECV %s", body)
return json.loads(body)
def read_message(self, want=None):
"""Read a JSON RPC message sent over the current connection.
If id is None, the next available message is returned.
"""
if want is None:
if self._msg_buffer:
return self._msg_buffer.popleft()
return self._receive()
# First check if our buffer contains something we want.
msg = deque_find_and_pop(self._msg_buffer, want)
if msg:
return msg
# We need to keep receiving until we find something we want.
# Things we don't want are put into the buffer for future callers.
while True:
msg = self._receive()
if want(msg):
return msg
self._msg_buffer.append(msg)
def _send(self, body):
body = json.dumps(body, separators=(",", ":"))
content_length = len(body)
response = (
"Content-Length: {}\r\n"
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{}".format(content_length, body))
self.conn.write(response)
logger.debug("SEND %s", body)
def write_response(self, rid, result):
body = {
"jsonrpc": "2.0",
"id": rid,
"result": result,
}
self._send(body)
def write_error(self, rid, code, message, data=None):
e = {
"code": code,
"message": message,
}
if data is not None:
e["data"] = data
body = {
"jsonrpc": "2.0",
"id": rid,
"error": e,
}
self._send(body)
def send_request(self, method: str, params):
rid = self._next_id
self._next_id += 1
body = {
"jsonrpc": "2.0",
"id": rid,
"method": method,
"params": params,
}
self._send(body)
return self.read_message(want=lambda msg: msg.get("id") == rid)
def send_notification(self, method: str, params):
body = {
"jsonrpc": "2.0",
"method": method,
"params": params,
}
self._send(body)
def send_request_batch(self, requests):
"""Pipelines requests and returns responses.
The responses is a generator where the nth response corresponds
with the nth request. Users must read the generator until the
end, otherwise you will leak a thread.
"""
# We communicate the request ids using a thread safe queue.
# It also allows us to bound the number of concurrent requests.
q = queue.Queue(100)
def send():
for method, params in requests:
rid = self._next_id
self._next_id += 1
q.put(rid)
body = {
"jsonrpc": "2.0",
"id": rid,
"method": method,
"params": params,
}
self._send(body)
# Sentinel value to indicate we are done
q.put(None)
threading.Thread(target=send).start()
while True:
rid = q.get()
if rid is None:
break
yield self.read_message(want=lambda msg: msg.get("id") == rid)
def deque_find_and_pop(d, f):
idx = None
for i, v in enumerate(d):
if f(v):
idx = i
break
if idx is None:
return None
d.rotate(-idx)
v = d.popleft()
d.rotate(idx)
return v
|
artifact_service.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of an Artifact{Staging,Retrieval}Service.
The staging service here can be backed by any beam filesystem.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import concurrent.futures
import contextlib
import hashlib
import os
import queue
import sys
import tempfile
import threading
import typing
from io import BytesIO
from typing import Callable
import grpc
from future.moves.urllib.request import urlopen
from apache_beam.io import filesystems
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.utils import proto_utils
if typing.TYPE_CHECKING:
from typing import BinaryIO # pylint: disable=ungrouped-imports
from typing import Iterable
from typing import MutableMapping
class ArtifactRetrievalService(
beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer):
_DEFAULT_CHUNK_SIZE = 2 << 20
def __init__(
self,
file_reader, # type: Callable[[str], BinaryIO],
chunk_size=None,
):
self._file_reader = file_reader
self._chunk_size = chunk_size or self._DEFAULT_CHUNK_SIZE
def ResolveArtifacts(self, request, context=None):
return beam_artifact_api_pb2.ResolveArtifactsResponse(
replacements=request.artifacts)
def GetArtifact(self, request, context=None):
if request.artifact.type_urn == common_urns.artifact_types.FILE.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.ArtifactFilePayload)
read_handle = self._file_reader(payload.path)
elif request.artifact.type_urn == common_urns.artifact_types.URL.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload, beam_runner_api_pb2.ArtifactUrlPayload)
# TODO(Py3): Remove the unneeded contextlib wrapper.
read_handle = contextlib.closing(urlopen(payload.url))
elif request.artifact.type_urn == common_urns.artifact_types.EMBEDDED.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.EmbeddedFilePayload)
read_handle = BytesIO(payload.data)
else:
raise NotImplementedError(request.artifact.type_urn)
with read_handle as fin:
while True:
chunk = fin.read(self._chunk_size)
if not chunk:
break
yield beam_artifact_api_pb2.GetArtifactResponse(data=chunk)
class ArtifactStagingService(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceServicer):
def __init__(
self,
file_writer, # type: Callable[[str, Optional[str]], Tuple[BinaryIO, str]]
):
self._lock = threading.Lock()
self._jobs_to_stage = {}
self._file_writer = file_writer
def register_job(
self,
staging_token, # type: str
dependency_sets # type: MutableMapping[Any, List[beam_runner_api_pb2.ArtifactInformation]]
):
if staging_token in self._jobs_to_stage:
raise ValueError('Already staging %s' % staging_token)
with self._lock:
self._jobs_to_stage[staging_token] = (
dict(dependency_sets), threading.Event())
def resolved_deps(self, staging_token, timeout=None):
with self._lock:
dependency_sets, event = self._jobs_to_stage[staging_token]
try:
if not event.wait(timeout):
raise concurrent.futures.TimeoutError()
return dependency_sets
finally:
with self._lock:
del self._jobs_to_stage[staging_token]
def ReverseArtifactRetrievalService(self, responses, context=None):
staging_token = next(responses).staging_token
with self._lock:
try:
dependency_sets, event = self._jobs_to_stage[staging_token]
except KeyError:
if context:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details('No such staging token: %r' % staging_token)
raise
requests = _QueueIter()
class ForwardingRetrievalService(object):
def ResolveArtifactss(self, request):
requests.put(
beam_artifact_api_pb2.ArtifactRequestWrapper(
resolve_artifact=request))
return next(responses).resolve_artifact_response
def GetArtifact(self, request):
requests.put(
beam_artifact_api_pb2.ArtifactRequestWrapper(get_artifact=request))
while True:
response = next(responses)
yield response.get_artifact_response
if response.is_last:
break
def resolve():
try:
for key, dependencies in dependency_sets.items():
dependency_sets[key] = list(
resolve_as_files(
ForwardingRetrievalService(),
lambda name: self._file_writer(
os.path.join(staging_token, name)),
dependencies))
requests.done()
except: # pylint: disable=bare-except
requests.abort()
raise
finally:
event.set()
t = threading.Thread(target=resolve)
t.daemon = True
t.start()
return requests
def resolve_as_files(retrieval_service, file_writer, dependencies):
"""Translates a set of dependencies into file-based dependencies."""
# Resolve until nothing changes. This ensures that they can be fetched.
resolution = retrieval_service.ResolveArtifactss(
beam_artifact_api_pb2.ResolveArtifactsRequest(
artifacts=dependencies,
# Anything fetchable will do.
# TODO(robertwb): Take advantage of shared filesystems, urls.
preferred_urns=[],
))
dependencies = resolution.replacements
# Fetch each of the dependencies, using file_writer to store them as
# file-based artifacts.
# TODO(robertwb): Consider parallelizing the actual writes.
for dep in dependencies:
if dep.role_urn == common_urns.artifact_roles.STAGING_TO.urn:
base_name = os.path.basename(
proto_utils.parse_Bytes(
dep.role_payload,
beam_runner_api_pb2.ArtifactStagingToRolePayload).staged_name)
else:
base_name = None
unique_name = '-'.join(
filter(
None,
[hashlib.sha256(dep.SerializeToString()).hexdigest(), base_name]))
file_handle, path = file_writer(unique_name)
with file_handle as fout:
for chunk in retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=dep)):
fout.write(chunk.data)
yield beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=path).SerializeToString(),
role_urn=dep.role_urn,
role_payload=dep.role_payload)
def offer_artifacts(
artifact_staging_service, artifact_retrieval_service, staging_token):
"""Offers a set of artifacts to an artifact staging service, via the
ReverseArtifactRetrievalService API.
The given artifact_retrieval_service should be able to resolve/get all
artifacts relevant to this job.
"""
responses = _QueueIter()
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
staging_token=staging_token))
requests = artifact_staging_service.ReverseArtifactRetrievalService(responses)
try:
for request in requests:
if request.HasField('resolve_artifact'):
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
resolve_artifact_response=artifact_retrieval_service.
ResolveArtifacts(request.resolve_artifact)))
elif request.HasField('get_artifact'):
for chunk in artifact_retrieval_service.GetArtifact(
request.get_artifact):
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
get_artifact_response=chunk))
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
get_artifact_response=beam_artifact_api_pb2.GetArtifactResponse(
data=b''),
is_last=True))
responses.done()
except: # pylint: disable=bare-except
responses.abort()
raise
class BeamFilesystemHandler(object):
def __init__(self, root):
self._root = root
def file_reader(self, path):
return filesystems.FileSystems.open(path)
def file_writer(self, name=None):
full_path = filesystems.FileSystems.join(self._root, name)
return filesystems.FileSystems.create(full_path), full_path
def resolve_artifacts(artifacts, service, dest_dir):
if not artifacts:
return artifacts
else:
return [
maybe_store_artifact(artifact, service,
dest_dir) for artifact in service.ResolveArtifacts(
beam_artifact_api_pb2.ResolveArtifactsRequest(
artifacts=artifacts)).replacements
]
def maybe_store_artifact(artifact, service, dest_dir):
if artifact.type_urn in (common_urns.artifact_types.URL.urn,
common_urns.artifact_types.EMBEDDED.urn):
return artifact
elif artifact.type_urn == common_urns.artifact_types.FILE.urn:
payload = beam_runner_api_pb2.ArtifactFilePayload.FromString(
artifact.type_payload)
if os.path.exists(
payload.path) and payload.sha256 and payload.sha256 == sha256(
payload.path) and False:
return artifact
else:
return store_artifact(artifact, service, dest_dir)
else:
return store_artifact(artifact, service, dest_dir)
def store_artifact(artifact, service, dest_dir):
hasher = hashlib.sha256()
with tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) as fout:
for block in service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=artifact)):
hasher.update(block.data)
fout.write(block.data)
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=fout.name, sha256=hasher.hexdigest()).SerializeToString(),
role_urn=artifact.role_urn,
role_payload=artifact.role_payload)
def sha256(path):
hasher = hashlib.sha256()
with open(path, 'rb') as fin:
for block in iter(lambda: fin.read(4 << 20), b''):
hasher.update(block)
return hasher.hexdigest()
class _QueueIter(object):
_END = object()
def __init__(self):
self._queue = queue.Queue()
def put(self, item):
self._queue.put(item)
def done(self):
self._queue.put(self._END)
self._queue.put(StopIteration)
def abort(self, exn=None):
if exn is None:
exn = sys.exc_info()[1]
self._queue.put(self._END)
self._queue.put(exn)
def __iter__(self):
return self
def __next__(self):
item = self._queue.get()
if item is self._END:
raise self._queue.get()
else:
return item
if sys.version_info < (3, ):
next = __next__
|
tktasks.py | #!/usr/bin/env python
"""Desktop Tasks app using Tkinter.
This uses a background thread to be notified of incoming changes.
It demonstrates, among others:
- How to call await() in a loop in a background thread efficiently:
Use make_cursor_map() to feed the 'deltamap' return value back
into the 'datastores' parameter for the next await() call.
- How to communicate in a safe way between the background thread and
the Tk main loop: Use a Queue plus a virtual event.
- How to avoid duplicate screen updates: Keep track of the revision
that was displayed.
- How to save and restore a datastore to/from a disk file.
- How to detect whether the network goes offline or comes back online
(in approximation).
"""
import json
import os
import sys
import time
import random
from threading import Thread
from Queue import Queue, Empty
from Tkinter import Tk, Frame, Button, Checkbutton, Entry, Label, BooleanVar
from Tkconstants import W, E, BOTH, END
# We use HTTPError as an approximation for "no network", even though
# this isn't always true -- sometimes it means "bad request" and
# sometimes we may get other exceptions.
from urllib3.exceptions import HTTPError
from dropbox.client import (
DropboxClient,
ErrorResponse,
)
from dropbox.datastore import (
DatastoreManager, Date,
DatastoreError, DatastoreNotFoundError,
)
# Virtual event to wake up the Tk main loop.
REFRESH_EVENT = '<<refresh-datastore>>'
# Filename where to store the data.
SERIALIZED_DATASTORE = 'my_tasks.json'
class TaskList(Frame):
def __init__(self, master, client):
Frame.__init__(self, master)
# Connect to Dropbox and open datastore.
self.manager = DatastoreManager(client)
# Try to load serialized datastore first.
datastore = self.load_serialized_datastore(SERIALIZED_DATASTORE)
if datastore is not None:
try:
datastore.load_deltas()
except DatastoreNotFoundError:
print 'This datastore has been deleted. Exiting now.'
sys.exit(1)
except HTTPError:
print 'We are offline. Proceed with caution.'
else:
datastore = self.manager.open_default_datastore()
self.datastore = datastore
self.table = self.datastore.get_table('tasks')
# Set up communication with background thread.
self.queue = Queue() # Holds deltas sent from background thread.
self.display_rev = 0 # Last revision displayed.
self.refresh() # Initial display update.
self.bind(REFRESH_EVENT, self.refresh) # Respond to background thread.
# Create, configure and start background thread.
self.bg_thread = Thread(name='bgthread', target=self.bg_run)
self.bg_thread.setDaemon(True)
self.bg_thread.start()
def load_serialized_datastore(self, filename):
try:
f = open(filename, 'rb')
except IOError as exc:
# Don't print an error if the file doesn't exist.
if os.path.exists(filename):
print 'Cannot load saved datastore:', exc
return None
with f:
try:
data = json.load(f)
id, handle, rev, snapshot = data
except ValueError as exc:
print 'Bad JSON on %s: %s' % (filename, exc)
return None
datastore = self.manager.open_raw_datastore(id, handle)
# If this fails, the save file is bad -- you must manually delete it.
datastore.apply_snapshot(rev, snapshot)
print 'Loaded datastore from', filename
return datastore
def save_serialized_datastore(self, datastore, filename):
id = datastore.get_id()
handle = datastore.get_handle()
rev = datastore.get_rev()
snapshot = datastore.get_snapshot()
data = [id, handle, rev, snapshot]
try:
f = open(filename, 'wb')
except IOError as exc:
print 'Cannot save datastore:', exc
return
with f:
json.dump(data, f)
print 'Saved datastore to', filename
def bg_run(self):
# This code runs in a background thread. No other code does.
deltamap = None
backoff = 0
while True:
cursor_map = DatastoreManager.make_cursor_map([self.datastore], deltamap)
try:
_, _, deltamap = self.manager.await(datastores=cursor_map)
except Exception as exc:
if isinstance(exc, HTTPError):
if not backoff:
print 'We have gone offline.'
else:
print 'We are still offline.'
else:
print 'bg_run():', repr(exc), str(exc)
# Randomized exponential backoff, clipped to 5 minutes.
backoff = min(backoff*2, 300) + random.random()
time.sleep(backoff)
continue
else:
if backoff:
print 'We have come back online.'
backoff = 0
if deltamap and self.datastore in deltamap:
deltas = deltamap[self.datastore]
if deltas is None:
# Stop the bg thread.
print 'This datastore has been deleted.'
print 'Please exit.'
break
if deltas:
self.queue.put(deltas)
self.event_generate(REFRESH_EVENT, when='tail')
def save(self, event=None):
self.save_serialized_datastore(self.datastore, SERIALIZED_DATASTORE)
def refresh(self, event=None):
# This is called directly when we have made a change,
# and when the background thread sends a REFRESH_EVENT.
self.load_queue() # Update the datastore.
if self.datastore.get_rev() == self.display_rev:
return # Nothing to do.
self.forget() # Hide the frame to reduce flashing.
for w in self.winfo_children():
w.destroy() # Delete the old widgets.
self.redraw() # Create new widgets.
self.pack(fill=BOTH, expand=1) # Show the frame.
self.display_rev = self.datastore.get_rev()
title = self.datastore.get_title()
mtime = self.datastore.get_mtime()
if not title:
title = 'My Tasks'
if mtime:
fmtime = mtime.to_datetime_local().strftime('%H:%M, %d %b %Y')
title = '%s (%s)' % (title, fmtime)
self.master.title(title)
self.input.focus_set()
def load_queue(self):
# Incorporate queued deltas into the datastore.
while True:
try:
deltas = self.queue.get_nowait()
except Empty:
break
else:
self.datastore.apply_deltas(deltas)
def redraw(self):
# Even though there are never more than three widgets per row,
# we have four columns, to allow the taskname label and the
# input widget to stretch.
self.grid_columnconfigure(2, weight=1)
row = 0
# Add a new row of widgets for each task.
for rec in sorted(self.table.query(), key=lambda rec: rec.get('created')):
# Extract the fields we need.
completed = rec.get('completed')
taskname = rec.get('taskname')
# Create a button with an 'X' in it, to delete the task.
close_btn = Button(self, text='X',
command=lambda rec=rec: self.delete_rec(rec))
close_btn.grid(row=row, column=0)
# Create a checkbox, to mark it completed (or not).
var = BooleanVar(self, value=completed)
completed_btn = Checkbutton(self, variable=var,
command=lambda rec=rec, var=var:
self.toggle_rec(rec, var))
completed_btn.grid(row=row, column=1)
# Create a label showing the task name.
taskname_lbl = Label(self, text=taskname, anchor=W)
taskname_lbl.grid(row=row, column=2, columnspan=2, sticky=W)
row += 1 # Bump row index.
# Add a final row with the input and button to add new tasks.
self.input = Entry(self)
self.input.bind('<Return>', self.add_rec)
self.input.grid(row=row, column=0, columnspan=3, sticky=W+E)
add_btn = Button(self, text='Add Task', command=self.add_rec)
add_btn.grid(row=row, column=3)
# Add save button. (Auto-save is left as an exercise.)
save_btn = Button(self, text='Save local snapshot', command=self.save)
save_btn.grid(row=row+1, column=0, columnspan=3, sticky=W)
def add_rec(self, event=None):
# Callback to add a new task.
self.do_transaction(self.table.insert,
completed=False, taskname=self.input.get(), created=Date())
def delete_rec(self, rec):
# Callback to delete a task.
self.do_transaction(rec.delete_record)
def toggle_rec(self, rec, var):
# Callback to toggle a task's completed flag.
try:
self.do_transaction(rec.set, 'completed', var.get())
finally:
# In case the transaction failed, flip the variable back.
var.set(rec.get('completed'))
def do_transaction(self, func, *args, **kwds):
self.update_idletasks() # Refresh screen without handling more input.
def call_func():
func(*args, **kwds)
try:
self.datastore.transaction(call_func, max_tries=4)
except Exception as exc:
# Maybe the server is down, or we experience extreme conflicts.
# NOTE: A more user-friendly way would be to show an error dialog.
print 'do_transaction():', repr(exc)
else:
self.refresh()
def main():
if not sys.argv[1:]:
print >>sys.stderr, 'Usage: tktasks.py ACCESS_TOKEN'
print >>sys.stderr, 'You can use shtasks.py to get an access token.'
sys.exit(2)
access_token = sys.argv[1]
client = DropboxClient(access_token)
root = Tk()
root.title('My Tasks')
root.geometry('250x300+10+10')
task_list = TaskList(root, client)
root.mainloop()
if __name__ == '__main__':
main()
|
testMarshal.py | """Testing pasing object between multiple COM threads
Uses standard COM marshalling to pass objects between threads. Even
though Python generally seems to work when you just pass COM objects
between threads, it shouldnt.
This shows the "correct" way to do it.
It shows that although we create new threads to use the Python.Interpreter,
COM marshalls back all calls to that object to the main Python thread,
which must be running a message loop (as this sample does).
When this test is run in "free threaded" mode (at this stage, you must
manually mark the COM objects as "ThreadingModel=Free", or run from a
service which has marked itself as free-threaded), then no marshalling
is done, and the Python.Interpreter object start doing the "expected" thing
- ie, it reports being on the same thread as its caller!
Python.exe needs a good way to mark itself as FreeThreaded - at the moment
this is a pain in the but!
"""
import threading, traceback
import win32com.client
import win32event, win32api
import pythoncom
import unittest
from .testServers import InterpCase
freeThreaded = 1
class ThreadInterpCase(InterpCase):
def _testInterpInThread(self, stopEvent, interp):
try:
self._doTestInThread(interp)
finally:
win32event.SetEvent(stopEvent)
def _doTestInThread(self, interp):
pythoncom.CoInitialize()
myThread = win32api.GetCurrentThreadId()
if freeThreaded:
interp = pythoncom.CoGetInterfaceAndReleaseStream(
interp, pythoncom.IID_IDispatch
)
interp = win32com.client.Dispatch(interp)
interp.Exec("import win32api")
# print "The test thread id is %d, Python.Interpreter's thread ID is %d" % (myThread, interp.Eval("win32api.GetCurrentThreadId()"))
pythoncom.CoUninitialize()
def BeginThreadsSimpleMarshal(self, numThreads):
"""Creates multiple threads using simple (but slower) marshalling.
Single interpreter object, but a new stream is created per thread.
Returns the handles the threads will set when complete.
"""
interp = win32com.client.Dispatch("Python.Interpreter")
events = []
threads = []
for i in range(numThreads):
hEvent = win32event.CreateEvent(None, 0, 0, None)
events.append(hEvent)
interpStream = pythoncom.CoMarshalInterThreadInterfaceInStream(
pythoncom.IID_IDispatch, interp._oleobj_
)
t = threading.Thread(
target=self._testInterpInThread, args=(hEvent, interpStream)
)
t.setDaemon(1) # so errors dont cause shutdown hang
t.start()
threads.append(t)
interp = None
return threads, events
#
# NOTE - this doesnt quite work - Im not even sure it should, but Greg reckons
# you should be able to avoid the marshal per thread!
# I think that refers to CoMarshalInterface though...
def BeginThreadsFastMarshal(self, numThreads):
"""Creates multiple threads using fast (but complex) marshalling.
The marshal stream is created once, and each thread uses the same stream
Returns the handles the threads will set when complete.
"""
interp = win32com.client.Dispatch("Python.Interpreter")
if freeThreaded:
interp = pythoncom.CoMarshalInterThreadInterfaceInStream(
pythoncom.IID_IDispatch, interp._oleobj_
)
events = []
threads = []
for i in range(numThreads):
hEvent = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self._testInterpInThread, args=(hEvent, interp))
t.setDaemon(1) # so errors dont cause shutdown hang
t.start()
events.append(hEvent)
threads.append(t)
return threads, events
def _DoTestMarshal(self, fn, bCoWait=0):
# print "The main thread is %d" % (win32api.GetCurrentThreadId())
threads, events = fn(2)
numFinished = 0
while 1:
try:
if bCoWait:
rc = pythoncom.CoWaitForMultipleHandles(0, 2000, events)
else:
# Specifying "bWaitAll" here will wait for messages *and* all events
# (which is pretty useless)
rc = win32event.MsgWaitForMultipleObjects(
events, 0, 2000, win32event.QS_ALLINPUT
)
if (
rc >= win32event.WAIT_OBJECT_0
and rc < win32event.WAIT_OBJECT_0 + len(events)
):
numFinished = numFinished + 1
if numFinished >= len(events):
break
elif rc == win32event.WAIT_OBJECT_0 + len(events): # a message
# This is critical - whole apartment model demo will hang.
pythoncom.PumpWaitingMessages()
else: # Timeout
print(
"Waiting for thread to stop with interfaces=%d, gateways=%d"
% (pythoncom._GetInterfaceCount(), pythoncom._GetGatewayCount())
)
except KeyboardInterrupt:
break
for t in threads:
t.join(2)
self.assertFalse(t.is_alive(), "thread failed to stop!?")
threads = None # threads hold references to args
# Seems to be a leak here I can't locate :(
# self.assertEqual(pythoncom._GetInterfaceCount(), 0)
# self.assertEqual(pythoncom._GetGatewayCount(), 0)
def testSimpleMarshal(self):
self._DoTestMarshal(self.BeginThreadsSimpleMarshal)
def testSimpleMarshalCoWait(self):
self._DoTestMarshal(self.BeginThreadsSimpleMarshal, 1)
# def testFastMarshal(self):
# self._DoTestMarshal(self.BeginThreadsFastMarshal)
if __name__ == "__main__":
unittest.main("testMarshal")
|
dirichlet_distributions.py | """Statistical Manifold of Dirichlet distributions with the Fisher metric."""
import logging
import math
import multiprocessing
from scipy.integrate import odeint
from scipy.integrate import solve_bvp
from scipy.stats import dirichlet
import geomstats.backend as gs
import geomstats.errors
from geomstats.algebra_utils import from_vector_to_diagonal_matrix
from geomstats.geometry.embedded_manifold import EmbeddedManifold
from geomstats.geometry.euclidean import Euclidean
from geomstats.geometry.riemannian_metric import RiemannianMetric
N_STEPS = 100
MAX_TIME = 300
class DirichletDistributions(EmbeddedManifold):
"""Class for the manifold of Dirichlet distributions.
This is :math: Dirichlet = `(R_+^*)^dim`, the positive quadrant of the
dim-dimensional Euclidean space.
Attributes
----------
dim : int
Dimension of the manifold of Dirichlet distributions.
embedding_manifold : Manifold
Embedding manifold.
"""
def __init__(self, dim):
super(DirichletDistributions, self).__init__(
dim=dim,
embedding_manifold=Euclidean(dim=dim))
self.metric = DirichletMetric(dim=dim)
def belongs(self, point):
"""Evaluate if a point belongs to the manifold of Dirichlet distributions.
Check that point defines parameters for a Dirichlet distributions,
i.e. belongs to the positive quadrant of the Euclidean space.
Parameters
----------
point : array-like, shape=[..., dim]
Point to be checked.
Returns
-------
belongs : array-like, shape=[...,]
Boolean indicating whether point represents a Dirichlet
distribution.
"""
point_dim = point.shape[-1]
belongs = point_dim == self.dim
belongs = gs.logical_and(
belongs, gs.all(gs.greater(point, 0.), axis=-1))
return belongs
def random_point(self, n_samples=1, bound=5.):
"""Sample parameters of Dirichlet distributions.
The uniform distribution on [0, bound]^dim is used.
Parameters
----------
n_samples : int
Number of samples.
Optional, default: 1.
bound : float
Side of the square where the Dirichlet parameters are sampled.
Optional, default: 5.
Returns
-------
samples : array-like, shape=[..., dim]
Sample of points representing Dirichlet distributions.
"""
size = (self.dim,) if n_samples == 1 else (n_samples, self.dim)
return bound * gs.random.rand(*size)
def sample(self, point, n_samples=1):
"""Sample from the Dirichlet distribution.
Sample from the Dirichlet distribution with parameters provided
by point. This gives n_samples points in the simplex.
Parameters
----------
point : array-like, shape=[..., dim]
Point representing a Dirichlet distribution.
n_samples : int
Number of points to sample for each set of parameters in point.
Optional, default: 1.
Returns
-------
samples : array-like, shape=[..., n_samples]
Sample from the Dirichlet distributions.
"""
geomstats.errors.check_belongs(point, self)
point = gs.to_ndarray(point, to_ndim=2)
samples = []
for param in point:
samples.append(gs.array(
dirichlet.rvs(param, size=n_samples)))
return samples[0] if len(point) == 1 else gs.stack(samples)
def point_to_pdf(self, point):
"""Compute pdf associated to point.
Compute the probability density function of the Dirichlet
distribution with parameters provided by point.
Parameters
----------
point : array-like, shape=[..., dim]
Point representing a beta distribution.
Returns
-------
pdf : function
Probability density function of the Dirichlet distribution with
parameters provided by point.
"""
geomstats.errors.check_belongs(point, self)
def pdf(x):
"""Generate parameterized function for normal pdf.
Parameters
----------
x : array-like, shape=[n_points, dim]
Points of the simplex at which to compute the probability
density function.
Returns
-------
pdf_at_x : array-like, shape=[..., n_points]
Values of pdf at x for each value of the parameters provided
by point.
"""
pdf_at_x = []
for param in point:
pdf_at_x.append([
gs.array(dirichlet.pdf(pt, param)) for pt in x])
pdf_at_x = gs.stack(pdf_at_x, axis=0)
return pdf_at_x
return pdf
class DirichletMetric(RiemannianMetric):
"""Class for the Fisher information metric on Dirichlet distributions."""
def __init__(self, dim):
super(DirichletMetric, self).__init__(dim=dim)
def metric_matrix(self, base_point=None):
"""Compute the inner-product matrix.
Compute the inner-product matrix of the Fisher information metric
at the tangent space at base point.
Parameters
----------
base_point : array-like, shape=[..., dim]
Base point.
Returns
-------
mat : array-like, shape=[..., dim, dim]
Inner-product matrix.
"""
if base_point is None:
raise ValueError('A base point must be given to compute the '
'metric matrix')
base_point = gs.to_ndarray(base_point, to_ndim=2)
n_points = base_point.shape[0]
mat_ones = gs.ones((n_points, self.dim, self.dim))
poly_sum = gs.polygamma(1, gs.sum(base_point, -1))
mat_diag = from_vector_to_diagonal_matrix(
gs.polygamma(1, base_point))
mat = mat_diag - gs.einsum('i,ijk->ijk', poly_sum, mat_ones)
return gs.squeeze(mat)
def christoffels(self, base_point):
"""Compute the Christoffel symbols.
Compute the Christoffel symbols of the Fisher information metric.
References
----------
.. [LPP2021] A. Le Brigant, S. C. Preston, S. Puechmorel. Fisher-Rao
geometry of Dirichlet Distributions. Differential Geometry
and its Applications, 74, 101702, 2021.
Parameters
----------
base_point : array-like, shape=[..., dim]
Base point.
Returns
-------
christoffels : array-like, shape=[..., dim, dim, dim]
Christoffel symbols.
"""
base_point = gs.to_ndarray(base_point, to_ndim=2)
n_points = base_point.shape[0]
def coefficients(ind_k):
param_k = base_point[..., ind_k]
param_sum = gs.sum(base_point, -1)
c1 = 1 / gs.polygamma(1, param_k) / (
1 / gs.polygamma(1, param_sum)
- gs.sum(1 / gs.polygamma(1, base_point), -1))
c2 = - c1 * gs.polygamma(2, param_sum) / gs.polygamma(1, param_sum)
mat_ones = gs.ones((n_points, self.dim, self.dim))
mat_diag = from_vector_to_diagonal_matrix(
- gs.polygamma(2, base_point) / gs.polygamma(1, base_point))
arrays = [gs.zeros((1, ind_k)),
gs.ones((1, 1)),
gs.zeros((1, self.dim - ind_k - 1))]
vec_k = gs.tile(gs.hstack(arrays), (n_points, 1))
val_k = gs.polygamma(2, param_k) / gs.polygamma(1, param_k)
vec_k = gs.einsum('i,ij->ij', val_k, vec_k)
mat_k = from_vector_to_diagonal_matrix(vec_k)
mat = gs.einsum('i,ijk->ijk', c2, mat_ones)\
- gs.einsum('i,ijk->ijk', c1, mat_diag) + mat_k
return 1 / 2 * mat
christoffels = []
for ind_k in range(self.dim):
christoffels.append(coefficients(ind_k))
christoffels = gs.stack(christoffels, 1)
return gs.squeeze(christoffels)
def jacobian_christoffels(self, base_point):
"""Compute the Jacobian of the Christoffel symbols.
Compute the Jacobian of the Christoffel symbols of the
Fisher information metric.
Parameters
----------
base_point : array-like, shape=[..., dim]
Base point.
Returns
-------
jac : array-like, shape=[..., dim, dim, dim, dim]
Jacobian of the Christoffel symbols.
:math: 'jac[..., i, j, k, l] = dGamma^i_{jk} / dx_l'
"""
n_dim = base_point.ndim
param = gs.transpose(base_point)
sum_param = gs.sum(param, 0)
term_1 = 1 / gs.polygamma(1, param)
term_2 = 1 / gs.polygamma(1, sum_param)
term_3 = - gs.polygamma(2, param) / gs.polygamma(1, param)**2
term_4 = - gs.polygamma(2, sum_param) / gs.polygamma(1, sum_param)**2
term_5 = term_3 / term_1
term_6 = term_4 / term_2
term_7 = (gs.polygamma(2, param)**2 - gs.polygamma(1, param) *
gs.polygamma(3, param)) / gs.polygamma(1, param)**2
term_8 = (gs.polygamma(2, sum_param)**2 - gs.polygamma(1, sum_param) *
gs.polygamma(3, sum_param)) / gs.polygamma(1, sum_param)**2
term_9 = term_2 - gs.sum(term_1, 0)
jac_1 = term_1 * term_8 / term_9
jac_1_mat = gs.squeeze(
gs.tile(jac_1, (self.dim, self.dim, self.dim, 1, 1)))
jac_2 = - term_6 / term_9**2 * gs.einsum(
'j...,i...->ji...', term_4 - term_3, term_1)
jac_2_mat = gs.squeeze(
gs.tile(jac_2, (self.dim, self.dim, 1, 1, 1)))
jac_3 = term_3 * term_6 / term_9
jac_3_mat = gs.transpose(
from_vector_to_diagonal_matrix(gs.transpose(jac_3)))
jac_3_mat = gs.squeeze(
gs.tile(jac_3_mat, (self.dim, self.dim, 1, 1, 1)))
jac_4 = 1 / term_9**2 * gs.einsum(
'k...,j...,i...->kji...', term_5, term_4 - term_3, term_1)
jac_4_mat = gs.transpose(
from_vector_to_diagonal_matrix(gs.transpose(jac_4)))
jac_5 = - gs.einsum('j...,i...->ji...', term_7, term_1) / term_9
jac_5_mat = from_vector_to_diagonal_matrix(
gs.transpose(jac_5))
jac_5_mat = gs.transpose(from_vector_to_diagonal_matrix(
jac_5_mat))
jac_6 = - gs.einsum('k...,j...->kj...', term_5, term_3) / term_9
jac_6_mat = gs.transpose(from_vector_to_diagonal_matrix(
gs.transpose(jac_6)))
jac_6_mat = gs.transpose(from_vector_to_diagonal_matrix(
gs.transpose(jac_6_mat, [0, 1, 3, 2])), [0, 1, 3, 4, 2]) \
if n_dim > 1 else from_vector_to_diagonal_matrix(
jac_6_mat)
jac_7 = - from_vector_to_diagonal_matrix(gs.transpose(term_7))
jac_7_mat = from_vector_to_diagonal_matrix(jac_7)
jac_7_mat = gs.transpose(
from_vector_to_diagonal_matrix(jac_7_mat))
jac = 1 / 2 * (
jac_1_mat + jac_2_mat + jac_3_mat +
jac_4_mat + jac_5_mat + jac_6_mat + jac_7_mat)
return gs.transpose(jac, [3, 1, 0, 2]) if n_dim == 1 else \
gs.transpose(jac, [4, 3, 1, 0, 2])
def _geodesic_ivp(self, initial_point, initial_tangent_vec,
n_steps=N_STEPS):
"""Solve geodesic initial value problem.
Compute the parameterized function for the geodesic starting at
initial_point with initial velocity given by initial_tangent_vec.
This is acheived by integrating the geodesic equation.
Parameters
----------
initial_point : array-like, shape=[..., dim]
Initial point.
initial_tangent_vec : array-like, shape=[..., dim]
Tangent vector at initial point.
Returns
-------
path : function
Parameterized function for the geodesic curve starting at
initial_point with velocity initial_tangent_vec.
"""
initial_point = gs.to_ndarray(initial_point, to_ndim=2)
initial_tangent_vec = gs.to_ndarray(initial_tangent_vec, to_ndim=2)
n_initial_points = initial_point.shape[0]
n_initial_tangent_vecs = initial_tangent_vec.shape[0]
if n_initial_points > n_initial_tangent_vecs:
raise ValueError('There cannot be more initial points than '
'initial tangent vectors.')
if n_initial_tangent_vecs > n_initial_points:
if n_initial_points > 1:
raise ValueError('For several initial tangent vectors, '
'specify either one or the same number of '
'initial points.')
initial_point = gs.tile(initial_point, (n_initial_tangent_vecs, 1))
def ivp(state, _):
"""Reformat the initial value problem geodesic ODE."""
position, velocity = state[:self.dim], state[self.dim:]
state = gs.stack([position, velocity])
vel, acc = self.geodesic_equation(state, _)
eq = (vel, acc)
return gs.hstack(eq)
def path(t):
"""Generate parameterized function for geodesic curve.
Parameters
----------
t : array-like, shape=[n_times,]
Times at which to compute points of the geodesics.
Returns
-------
geodesic : array-like, shape=[..., n_times, dim]
Values of the geodesic at times t.
"""
t = gs.to_ndarray(t, to_ndim=1)
n_times = len(t)
geod = []
if n_times < n_steps:
t_int = gs.linspace(0, 1, n_steps + 1)
tangent_vecs = gs.einsum(
'i,...k->...ik', t, initial_tangent_vec)
for point, vec in zip(initial_point, tangent_vecs):
point = gs.tile(point, (n_times, 1))
exp = []
for pt, vc in zip(point, vec):
initial_state = gs.hstack([pt, vc])
solution = odeint(
ivp, initial_state, t_int, ())
exp.append(solution[-1, :self.dim])
exp = exp[0] if n_times == 1 else gs.stack(exp)
geod.append(exp)
else:
t_int = t
for point, vec in zip(initial_point, initial_tangent_vec):
initial_state = gs.hstack([point, vec])
solution = odeint(
ivp, initial_state, t_int, ())
geod.append(solution[:, :self.dim])
return geod[0] if len(initial_point) == 1 else \
gs.stack(geod)
return path
def exp(self, tangent_vec, base_point, n_steps=N_STEPS):
"""Compute the exponential map.
Comute the exponential map associated to the Fisher information metric
by solving the initial value problem associated to the geodesic
ordinary differential equation (ODE) using the Christoffel symbols.
Parameters
----------
tangent_vec : array-like, shape=[..., dim]
Tangent vector at base point.
base_point : array-like, shape=[..., dim]
Base point.
n_steps : int
Number of steps for integration.
Optional, default: 100.
Returns
-------
exp : array-like, shape=[..., dim]
End point of the geodesic starting at base_point with
initial velocity tangent_vec and stopping at time 1.
"""
stop_time = 1.
geodesic = self._geodesic_ivp(base_point, tangent_vec, n_steps)
exp = geodesic(stop_time)
return exp
def _geodesic_bvp(self, initial_point, end_point, n_steps=N_STEPS,
jacobian=False, max_time=MAX_TIME):
"""Solve geodesic boundary problem.
Compute the parameterized function for the geodesic starting at
initial_point and ending at end_point. This is acheived by integrating
the geodesic equation.
Parameters
----------
initial_point : array-like, shape=[..., dim]
Initial point.
end_point : array-like, shape=[..., dim]
End point.
jacobian : boolean.
If True, the explicit value of the jacobian is used to solve
the geodesic boundary value problem.
Optional, default: False.
max_time : float.
Maximum time in which the boundary value problem should be
solved, in seconds. If it takes longer, the process is terminated.
Optional, default: 300 seconds i.e. 5 minutes.
Returns
-------
path : function
Parameterized function for the geodesic curve starting at
initial_point and ending at end_point.
"""
initial_point = gs.to_ndarray(initial_point, to_ndim=2)
end_point = gs.to_ndarray(end_point, to_ndim=2)
n_initial_points = initial_point.shape[0]
n_end_points = end_point.shape[0]
if n_initial_points > n_end_points:
if n_end_points > 1:
raise ValueError('For several initial points, specify either'
'one or the same number of end points.')
end_point = gs.tile(end_point, (n_initial_points, 1))
elif n_end_points > n_initial_points:
if n_initial_points > 1:
raise ValueError('For several end points, specify either '
'one or the same number of initial points.')
initial_point = gs.tile(initial_point, (n_end_points, 1))
def bvp(_, state):
"""Reformat the boundary value problem geodesic ODE.
Parameters
----------
state : array-like, shape[2 * dim,]
Vector of the state variables: position and speed.
_ : unused
Any (time).
"""
position, velocity = state[:self.dim].T, state[self.dim:].T
state = gs.stack([position, velocity])
vel, acc = self.geodesic_equation(state, _)
eq = (vel, acc)
return gs.transpose(gs.hstack(eq))
def boundary_cond(
state_0, state_1, point_0, point_1):
"""Boundary condition for the geodesic ODE."""
return gs.hstack((state_0[:self.dim] - point_0,
state_1[:self.dim] - point_1))
def jac(_, state):
"""Jacobian of bvp function.
Parameters
----------
state : array-like, shape=[2*dim, ...]
Vector of the state variables (position and speed)
_ : unused
Any (time).
Returns
-------
jac : array-like, shape=[dim, dim, ...]
"""
n_dim = state.ndim
n_times = state.shape[1] if n_dim > 1 else 1
position, velocity = state[:self.dim], state[self.dim:]
dgamma = self.jacobian_christoffels(gs.transpose(position))
df_dposition = - gs.einsum(
'j...,...ijkl,k...->il...', velocity, dgamma, velocity)
gamma = self.christoffels(gs.transpose(position))
df_dvelocity = - 2 * gs.einsum(
'...ijk,k...->ij...', gamma, velocity)
jac_nw = gs.zeros((self.dim, self.dim, state.shape[1])) \
if n_dim > 1 else gs.zeros((self.dim, self.dim))
jac_ne = gs.squeeze(gs.transpose(gs.tile(
gs.eye(self.dim), (n_times, 1, 1))))
jac_sw = df_dposition
jac_se = df_dvelocity
jac = gs.concatenate((
gs.concatenate((jac_nw, jac_ne), axis=1),
gs.concatenate((jac_sw, jac_se), axis=1)), axis=0)
return jac
def path(t):
"""Generate parameterized function for geodesic curve.
Parameters
----------
t : array-like, shape=[n_times,]
Times at which to compute points of the geodesics.
Returns
-------
geodesic : array-like, shape=[..., n_times, dim]
Values of the geodesic at times t.
"""
t = gs.to_ndarray(t, to_ndim=1)
geod = []
def initialize(point_0, point_1):
"""Initialize the solution of the boundary value problem."""
lin_init = gs.zeros([2 * self.dim, n_steps])
lin_init[:self.dim, :] = gs.transpose(
gs.linspace(point_0, point_1, n_steps))
lin_init[self.dim:, :-1] = n_steps * (
lin_init[:self.dim, 1:] - lin_init[:self.dim, :-1])
lin_init[self.dim:, -1] = lin_init[self.dim:, -2]
return lin_init
t_int = gs.linspace(0., 1., n_steps)
fun_jac = jac if jacobian else None
for ip, ep in zip(initial_point, end_point):
def bc(y0, y1, ip=ip, ep=ep):
return boundary_cond(y0, y1, ip, ep)
def process_function(return_dict, ip=ip, ep=ep):
solution = solve_bvp(
bvp, bc, t_int, initialize(ip, ep), fun_jac=fun_jac)
solution_at_t = solution.sol(t)
geodesic = solution_at_t[:self.dim, :]
geod.append(gs.squeeze(gs.transpose(geodesic)))
return_dict[0] = geod
manager = multiprocessing.Manager()
return_dict = manager.dict()
process = multiprocessing.Process(
target=process_function, args=(return_dict,))
process.start()
process.join(max_time)
if process.is_alive():
process.terminate()
logging.info('Maximum time of {} seconds reached. '
'Process terminated. '
'Result is inaccurate.'.format(max_time))
geod.append(math.nan * gs.zeros((n_steps, self.dim)))
else:
geod = return_dict[0]
return geod[0] if len(initial_point) == 1 else gs.stack(geod)
return path
def log(self, point, base_point, n_steps=N_STEPS, jacobian=False,
max_time=MAX_TIME):
"""Compute the logarithm map.
Compute logarithm map associated to the Fisher information metric by
solving the boundary value problem associated to the geodesic ordinary
differential equation (ODE) using the Christoffel symbols.
Parameters
----------
point : array-like, shape=[..., dim]
Point.
base_point : array-like, shape=[..., dim]
Base po int.
n_steps : int
Number of steps for integration.
Optional, default: 100.
jacobian : boolean.
If True, the explicit value of the jacobian is used to solve
the geodesic boundary value problem.
Optional, default: False.
max_time : float.
Maximum time in which the boundary value problem should be
solved, in seconds. If it takes longer, the process is terminated.
Optional, default: 300 seconds i.e. 5 minutes.
Returns
-------
tangent_vec : array-like, shape=[..., dim]
Initial velocity of the geodesic starting at base_point and
reaching point at time 1.
"""
t = gs.linspace(0., 1., n_steps)
geodesic = self._geodesic_bvp(
initial_point=base_point, end_point=point, jacobian=jacobian,
max_time=max_time)
geodesic_at_t = geodesic(t)
log = n_steps * (geodesic_at_t[..., 1, :] - geodesic_at_t[..., 0, :])
return gs.squeeze(gs.stack(log))
def geodesic(self, initial_point, end_point=None,
initial_tangent_vec=None, n_steps=N_STEPS,
jacobian=False, max_time=MAX_TIME):
"""Generate parameterized function for the geodesic curve.
Geodesic curve defined by either:
- an initial point and an initial tangent vector,
- an initial point and an end point.
Parameters
----------
initial_point : array-like, shape=[..., dim]
Point on the manifold, initial point of the geodesic.
end_point : array-like, shape=[..., dim], optional
Point on the manifold, end point of the geodesic. If None,
an initial tangent vector must be given.
initial_tangent_vec : array-like, shape=[..., dim],
Tangent vector at base point, the initial speed of the geodesics.
Optional, default: None.
If None, an end point must be given and a logarithm is computed.
jacobian : boolean.
If True, the explicit value of the jacobian is used to solve
the geodesic boundary value problem.
Optional, default: False.
max_time : float.
Maximum time in which the boundary value problem should be
solved, in seconds. If it takes longer, the process is terminated.
Optional, default: 300 seconds i.e. 5 minutes.
Returns
-------
path : callable
Time parameterized geodesic curve. If a batch of initial
conditions is passed, the output array's first dimension
represents time, and the second corresponds to the different
initial conditions.
"""
if end_point is None and initial_tangent_vec is None:
raise ValueError('Specify an end point or an initial tangent '
'vector to define the geodesic.')
if end_point is not None:
if initial_tangent_vec is not None:
raise ValueError('Cannot specify both an end point '
'and an initial tangent vector.')
path = self._geodesic_bvp(initial_point, end_point, n_steps,
jacobian=jacobian, max_time=max_time)
if initial_tangent_vec is not None:
path = self._geodesic_ivp(
initial_point, initial_tangent_vec, n_steps)
return path
|
server_tests.py | #!/usr/bin/python2.7
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts up an appserver and runs end-to-end tests against it.
Instead of running this script directly, use the 'server_tests' shell script,
which sets up the PYTHONPATH and other necessary environment variables.
The actual test cases reside in server_test_cases.py.
Use -k to select particular test classes or methods by a substring match:
tools/server_tests -k ConfigTests
tools/server_tests -k test_delete_and_restore
Specify -v to show the name of each test as it runs (rather than just dots).
Specify -s to see the messages printed by all tests as they run (by default,
stdout/stderr will be captured and then shown only for failing tests).
"""
import os
import pytest
import re
import signal
import smtpd
import subprocess
import sys
import tempfile
import threading
import time
from model import *
import remote_api
import setup_pf as setup
class ProcessRunner(threading.Thread):
"""A thread that starts a subprocess, collects its output, and stops it."""
READY_RE = re.compile('') # this output means the process is ready
ERROR_RE = re.compile('ERROR|CRITICAL') # output indicating failure
OMIT_RE = re.compile('INFO |WARNING ') # don't bother showing these lines
# this output is for appserver's port error
BIND_RE = re.compile('BindError: Unable to bind (.*):(\d+)')
debug = False # set to True to see all log messages, ignoring OMIT_RE
def __init__(self, name, args):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.process = None # subprocess.Popen instance
self.ready = False # process is running and ready
self.failed = False # process emitted an error message in its output
self.output = []
def run(self):
"""Starts the subprocess and collects its output while it runs."""
self.process = subprocess.Popen(
self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
# Each subprocess needs a thread to be watching it and absorbing its
# output; otherwise it will block when its stdout pipe buffer fills.
self.start_watching_output(self.process.stdout)
self.start_watching_output(self.process.stderr)
self.process.wait()
def start_watching_output(self, output):
stdout_thread = threading.Thread(target=self.watch_output, args=(output,))
stdout_thread.setDaemon(True)
stdout_thread.start()
def watch_output(self, output):
while self.process.poll() is None:
line = output.readline()
if not line: # process finished
return
if self.READY_RE.search(line):
self.ready = True
if not self.debug and self.OMIT_RE.search(line): # omit these lines
continue
if self.ERROR_RE.search(line): # something went wrong
self.failed = True
if line.strip():
self.output.append(line.strip('\n'))
def stop(self):
"""Terminates the subprocess and returns its status code."""
if self.process: # started
if self.isAlive(): # still running
os.kill(self.process.pid, signal.SIGINT)
else:
self.failed = self.process.returncode != 0
self.clean_up()
if self.failed:
self.flush_output()
print >>sys.stderr, '%s failed (status %s).\n' % (
self.name, self.process.returncode)
else:
print >>sys.stderr, '%s stopped.' % self.name
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, lines_to_print = [], self.output
if lines_to_print:
sys.stderr.write('\n--- output from %s ---\n' % self.name)
sys.stderr.write('\n'.join(lines_to_print) + '\n\n')
def wait_until_ready(self, timeout=10):
"""Waits until the subprocess has logged that it is ready."""
fail_time = time.time() + timeout
while self.isAlive() and not self.ready and time.time() < fail_time:
for jiffy in range(10): # wait one second, aborting early if ready
if not self.ready:
time.sleep(0.1)
if not self.ready:
self.flush_output() # after each second, show output
if self.ready:
print >>sys.stderr, '%s started.' % self.name
else:
raise RuntimeError('%s failed to start.' % self.name)
def clean_up(self):
pass
class AppServerRunner(ProcessRunner):
"""Manages a dev_appserver subprocess."""
READY_RE = re.compile('Starting module "default" running at|Running application')
OMIT_RE = re.compile(
'INFO |WARNING |DeprecationWarning: get_request_cpu_usage')
def __init__(self, port, smtp_port):
self.__datastore_file = tempfile.NamedTemporaryFile()
ProcessRunner.__init__(self, 'appserver', [
os.environ['PYTHON'],
os.path.join(os.environ['APPENGINE_DIR'], 'dev_appserver.py'),
os.environ['APP_DIR'],
'--port=%s' % port,
'--datastore_path=%s' % self.__datastore_file.name,
'--require_indexes',
'--smtp_host=localhost',
'--smtp_port=%d' % smtp_port,
# By default, if we perform a datastore write and a query in this
# order, the query may see the data before the write is applied.
# This is the behavior in the production, but it is inconvenient
# to perform server tests, because we often perform a database
# write then test if it's visible in the web page. This flag makes
# sure that the query see the data after the write is applied.
'--datastore_consistency_policy=consistent',
])
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, original_output = [], self.output
if original_output:
original_output_text = '\n'.join(original_output)
match = self.BIND_RE.search(original_output_text, re.MULTILINE)
if match:
host = match.group(1)
port = match.group(2)
sys.stderr.write('%s failed %s port %s is already in use.\n' %
(self.name, host, port))
sys.stderr.write('Please turn down local Person Finder ' +
'server or the server test if any.\n\n')
else:
sys.stderr.write('\n--- output from %s ---\n' % self.name)
sys.stderr.write(original_output_text + '\n\n')
class MailThread(threading.Thread):
"""Runs an SMTP server and stores the incoming messages."""
messages = []
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
self.stop_requested = False
def run(self):
class MailServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
print >>sys.stderr, 'mail from:', mailfrom, 'to:', rcpttos
MailThread.messages.append(
{'from': mailfrom, 'to': rcpttos, 'data': data})
try:
server = MailServer(('localhost', self.port), None)
except Exception, e:
print >>sys.stderr, 'SMTP server failed: %s' % e
sys.exit(-1)
print >>sys.stderr, 'SMTP server started.'
while not self.stop_requested:
smtpd.asyncore.loop(timeout=0.5, count=1)
print >>sys.stderr, 'SMTP server stopped.'
def stop(self):
self.stop_requested = True
def wait_until_ready(self, timeout=10):
pass
def flush_output(self):
pass
class PyTestPlugin:
"""A plugin for pytest that does the setup and teardown for server tests."""
def __init__(self):
self.threads = []
def pytest_addoption(self, parser):
group = parser.getgroup(
'server_tests', 'App Engine server testing', after='general')
group.addoption('--server',
help='appserver URL (default: localhost:8081)')
group.addoption('--port', type='int', default=8081,
help='appserver port number (default: 8081)')
group.addoption('--mailport', type='int', default=8025,
help='SMTP server port number (default: 8025)')
def pytest_configure(self, config):
options = config.option
url = options.server or 'localhost:%d' % options.port
secure, host, port, path = remote_api.parse_url(url)
if host == 'localhost':
# We need to start up a clean new appserver for testing.
self.threads.append(AppServerRunner(options.port, options.mailport))
self.threads.append(MailThread(options.mailport))
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.wait_until_ready()
# Connect to the datastore.
remote_api.connect(url, server_type='local')
# Reset the datastore for the first test.
reset_data()
# Give the tests access to configuration information.
config.hostport = '%s:%d' % (host, port)
config.mail_server = MailThread
def pytest_unconfigure(self, config):
for thread in self.threads:
if hasattr(thread, 'flush_output'):
thread.flush_output()
for thread in self.threads:
thread.stop()
thread.join()
def pytest_runtest_setup(self):
MailThread.messages = []
def reset_data():
"""Reset the datastore to a known state, populated with test data."""
setup.reset_datastore()
db.put([
Authorization.create(
'haiti', 'test_key', domain_write_permission='test.google.com'),
Authorization.create(
'haiti', 'domain_test_key',
domain_write_permission='mytestdomain.com'),
Authorization.create(
'haiti', 'reviewed_test_key',
domain_write_permission='test.google.com',
mark_notes_reviewed=True),
Authorization.create(
'haiti', 'not_allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=False),
Authorization.create(
'haiti', 'allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=True),
Authorization.create(
'haiti', 'other_key', domain_write_permission='other.google.com'),
Authorization.create(
'haiti', 'read_key', read_permission=True),
Authorization.create(
'haiti', 'full_read_key', full_read_permission=True),
Authorization.create(
'haiti', 'search_key', search_permission=True),
Authorization.create(
'haiti', 'subscribe_key', subscribe_permission=True),
Authorization.create(
'*', 'global_test_key',
domain_write_permission='globaltestdomain.com'),
# An API key which can be used for SMS API.
Authorization.create(
'*',
'sms_key',
search_permission=True,
domain_write_permission='*'),
])
def monkeypatch_pytest_terminal_reporter():
"""Improves the output produced by _pytest.terminal.TerminalReporter."""
import _pytest.terminal
def write_sep(self, sep, title=None, **markup):
if sep == '_':
markup['cyan'] = 1 # highlight the failed test name in cyan
self._tw.line() # put a blank line before the failure report
self._tw.sep(sep, title, **markup)
_pytest.terminal.TerminalReporter.write_sep = write_sep
if __name__ == '__main__':
monkeypatch_pytest_terminal_reporter()
# Run the tests, using sys.exit to set exit status (nonzero for failure).
sys.exit(pytest.main(plugins=[PyTestPlugin()]))
|
main BETA.py | import serial # For Bluetooth
from nanpy import (ArduinoApi, SerialManager) # For Arduino
import RPi.GPIO as GPIO # For Raspberry Pi
from threading import Thread
from time import sleep, time
import os
# Bluetooth Serial Conniction
ser = serial.Serial('/dev/ttyS0', 9600, timeout = 0)
# Arduino Nanpy Conniction
connection = SerialManager()
a = ArduinoApi(connection = connection)
# Python Variables
sysMode = 0
serialRead = 0
check_mode = 1
buzz_delay = 0
# GPIO Variables
smoke_in = 35
TRIG = 3
ECHO = 5
# GPIO Pins
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(smoke_in, GPIO.IN)
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
# Arduino Variables
office = 13
r_office = 4
kitchen = 12
r_kitchen = 2
meeting_room = 3
r_meeting_room = 5
waiting_room = 7
r_waiting_room = 14
employee_room = 9
bath_room = 8
buzzer = 22
fan = 52
# Arduino Pins
a.pinMode(office, a.OUTPUT)
a.pinMode(r_office, a.INPUT)
a.pinMode(kitchen, a.OUTPUT)
a.pinMode(r_kitchen, a.INPUT)
a.pinMode(meeting_room, a.OUTPUT)
a.pinMode(r_meeting_room, a.INPUT)
a.pinMode(waiting_room, a.OUTPUT)
a.pinMode(r_waiting_room, a.INPUT)
a.pinMode(employee_room, a.OUTPUT)
a.pinMode(bath_room, a.OUTPUT)
a.pinMode(buzzer, a.OUTPUT)
a.pinMode(fan, a.OUTPUT)
# Buzzer Delay
def buzzer_delay(delay):
buzz_delay = 1
if buzz_delay == 1:
a.digitalWrite(buzzer, a.HIGH)
sleep(delay)
a.digitalWrite(buzzer, a.LOW)
buzz_delay = 0
# Light Resistor Function
def r_room(ldr, room):
if a.digitalRead(ldr) == True:
a.digitalWrite(room, a.LOW)
elif a.digitalRead(ldr) == False:
a.digitalWrite(room, a.HIGH)
# Manual Control
def m_room(on, off, room):
if serialRead == on:
a.digitalWrite(room, a.HIGH)
elif serialRead == off:
a.digitalWrite(room, a.LOW)
# Mobile Check
def m_check(room, on, off):
if a.digitalRead(room) == True:
ser.write(on)
elif a.digitalRead(room) == False:
ser.write(off)
# Lights Thread
def lights():
while True:
# Global Variables
global sysMode
global serialRead
global office
global r_office
global kitchen
global r_kitchen
global meeting_room
global r_meeting_room
global waiting_room
global r_waiting_room
global employee_room
global bath_room
global check_mode
# Bluetooth Serial Variable
serialRead = ser.readline()
serialRead = serialRead.decode().strip()
# System Mode Check
if serialRead == "9":
sysMode = 1
elif serialRead == "0":
sysMode = 0
# Automatic Mode
if sysMode == 0:
r_room(r_office, office)
r_room(r_kitchen, kitchen)
r_room(r_meeting_room, meeting_room)
r_room(r_waiting_room, waiting_room)
check_mode = 0
# Manual Mode
elif sysMode == 1:
# Check The Light Status
if check_mode < 50:
m_check(waiting_room, b"D", b"d")
m_check(kitchen, b"G", b"g")
m_check(office, b"V", b"v")
m_check(bath_room, b"H", b"h")
m_check(employee_room, b"K", b"k")
m_check(meeting_room, b"F\n", b"f\n")
check_mode = check_mode + 1
# Manual Mode Serial Check
m_room("1", "2", waiting_room)
m_room("3", "4", kitchen)
m_room("5", "6", office)
m_room("7", "8", bath_room)
m_room("e", "r", employee_room)
m_room("m", "t", meeting_room)
# Smoke Thread
def smoke():
while True:
# Global Variables
global buzzer
global buzz_delay
# Get Smoke Status
if GPIO.input(35) == False:
ser.write(b"S")
buzzer_delay(2)
a.digitalWrite(buzzer, a.LOW)
os.system('echo "SmartX detected smoke or gas in your apartment\nPlease be safe" | mail -s "Smoke or gas detected in your apartment" moksha.elghabaty@hotmail.com')
ser.write(b"s")
sleep(15)
os.system('echo "there is no smoke now in your apartment" | mail -s "No smoke or gas now in your apartment" moksha.elghabaty@hotmail.com')
else:
buzz_delay = 0
# Temprature Thread
def temp():
while True:
# Get The Temprature Vlaue
tempfile = open("/sys/bus/w1/devices/28-000002f53b3e/w1_slave")
thetext = tempfile.read()
tempfile.close()
tempdata = thetext.split("\n")[1].split(" ")[9]
temprature = float(tempdata[2:])
temprature = temprature / 1000
temprature = int(temprature)
#print temprature
if temprature > 30:
a.digitalWrite(fan, a.HIGH)
ser.write(b"Z")
else:
a.digitalWrite(fan, a.LOW)
ser.write(b"z")
# Motion Thread
def motion():
while True:
# Global Variables
global buzzer
global buzz_delay
# Get Motion Value
GPIO.output(TRIG, True)
sleep(0.00001)
GPIO.output(TRIG, False)
start = time()
while GPIO.input(ECHO)==0:
start = time()
while GPIO.input(ECHO)==1:
stop = time()
elapsed = stop-start
distance = (elapsed * 34300)/2
sleep(0.5)
int_distance = int(distance)
if int_distance < 8:
buzzer_delay(2)
ser.write(b"W")
else:
buzz_delay = 0
ser.write(b"w")
# Run The Threads
if __name__ == '__main__':
Thread(target = lights).start()
Thread(target = smoke).start()
Thread(target = temp).start()
Thread(target = motion).start() |
test_profile.py | from __future__ import annotations
import dataclasses
import sys
import threading
from collections.abc import Iterator, Sequence
from time import sleep
import pytest
from tlz import first
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.profile import (
call_stack,
create,
identifier,
info_frame,
ll_get_stack,
llprocess,
merge,
plot_data,
process,
watch,
)
def test_basic():
def test_g():
sleep(0.01)
def test_h():
sleep(0.02)
def test_f():
for i in range(100):
test_g()
test_h()
thread = threading.Thread(target=test_f)
thread.daemon = True
thread.start()
state = create()
for i in range(100):
sleep(0.02)
frame = sys._current_frames()[thread.ident]
process(frame, None, state)
assert state["count"] == 100
d = state
while len(d["children"]) == 1:
d = first(d["children"].values())
assert d["count"] == 100
assert "test_f" in str(d["description"])
g = [c for c in d["children"].values() if "test_g" in str(c["description"])][0]
h = [c for c in d["children"].values() if "test_h" in str(c["description"])][0]
assert g["count"] < h["count"]
assert 95 < g["count"] + h["count"] <= 100
pd = plot_data(state)
assert len(set(map(len, pd.values()))) == 1 # all same length
assert len(set(pd["color"])) > 1 # different colors
@pytest.mark.skipif(
WINDOWS, reason="no low-level profiler support for Windows available"
)
def test_basic_low_level():
pytest.importorskip("stacktrace")
state = create()
for i in range(100):
sleep(0.02)
frame = sys._current_frames()[threading.get_ident()]
llframes = {threading.get_ident(): ll_get_stack(threading.get_ident())}
for f in llframes.values():
if f is not None:
llprocess(f, None, state)
assert state["count"] == 100
children = state.get("children")
assert children
expected = "<low-level>"
for k, v in zip(children.keys(), children.values()):
desc = v.get("description")
assert desc
filename = desc.get("filename")
assert expected in k and filename == expected
def test_merge():
a1 = {
"count": 5,
"identifier": "root",
"description": "a",
"children": {
"b": {
"count": 3,
"description": "b-func",
"identifier": "b",
"children": {},
},
"c": {
"count": 2,
"description": "c-func",
"identifier": "c",
"children": {},
},
},
}
a2 = {
"count": 4,
"description": "a",
"identifier": "root",
"children": {
"d": {
"count": 2,
"description": "d-func",
"children": {},
"identifier": "d",
},
"c": {
"count": 2,
"description": "c-func",
"children": {},
"identifier": "c",
},
},
}
expected = {
"count": 9,
"identifier": "root",
"description": "a",
"children": {
"b": {
"count": 3,
"description": "b-func",
"identifier": "b",
"children": {},
},
"d": {
"count": 2,
"description": "d-func",
"identifier": "d",
"children": {},
},
"c": {
"count": 4,
"description": "c-func",
"identifier": "c",
"children": {},
},
},
}
assert merge(a1, a2) == expected
def test_merge_empty():
assert merge() == create()
assert merge(create()) == create()
assert merge(create(), create()) == create()
def test_call_stack():
frame = sys._current_frames()[threading.get_ident()]
L = call_stack(frame)
assert isinstance(L, list)
assert all(isinstance(s, str) for s in L)
assert "test_call_stack" in str(L[-1])
def test_identifier():
frame = sys._current_frames()[threading.get_ident()]
assert identifier(frame) == identifier(frame)
assert identifier(None) == identifier(None)
def test_watch():
start = time()
def stop():
return time() > start + 0.500
start_threads = threading.active_count()
log = watch(interval="10ms", cycle="50ms", stop=stop)
start = time() # wait until thread starts up
while threading.active_count() <= start_threads:
assert time() < start + 2
sleep(0.01)
sleep(0.5)
assert 1 < len(log) < 10
start = time()
while threading.active_count() > start_threads:
assert time() < start + 2
sleep(0.01)
@dataclasses.dataclass(frozen=True)
class FakeCode:
co_filename: str
co_name: str
co_firstlineno: int
co_lnotab: bytes
co_lines_seq: Sequence[tuple[int, int, int | None]]
co_code: bytes
def co_lines(self) -> Iterator[tuple[int, int, int | None]]:
yield from self.co_lines_seq
FAKE_CODE = FakeCode(
co_filename="<stdin>",
co_name="example",
co_firstlineno=1,
# https://github.com/python/cpython/blob/b68431fadb3150134ac6ccbf501cdfeaf4c75678/Objects/lnotab_notes.txt#L84
# generated from:
# def example():
# for i in range(1):
# if i >= 0:
# pass
# example.__code__.co_lnotab
co_lnotab=b"\x00\x01\x0c\x01\x08\x01\x04\xfe",
# generated with list(example.__code__.co_lines())
co_lines_seq=[
(0, 12, 2),
(12, 20, 3),
(20, 22, 4),
(22, 24, None),
(24, 28, 2),
],
# used in dis.findlinestarts as bytecode_len = len(code.co_code)
# https://github.com/python/cpython/blob/6f345d363308e3e6ecf0ad518ea0fcc30afde2a8/Lib/dis.py#L457
co_code=bytes(28),
)
@dataclasses.dataclass(frozen=True)
class FakeFrame:
f_lasti: int
f_code: FakeCode
f_lineno: int | None = None
f_back: FakeFrame | None = None
f_globals: dict[str, object] = dataclasses.field(default_factory=dict)
@pytest.mark.parametrize(
"f_lasti,f_lineno",
[
(-1, 1),
(0, 2),
(1, 2),
(11, 2),
(12, 3),
(21, 4),
(22, 4),
(23, 4),
(24, 2),
(25, 2),
(26, 2),
(27, 2),
(100, 2),
],
)
def test_info_frame_f_lineno(f_lasti: int, f_lineno: int) -> None:
assert info_frame(FakeFrame(f_lasti=f_lasti, f_code=FAKE_CODE)) == {
"filename": "<stdin>",
"name": "example",
"line_number": f_lineno,
"line": "",
}
@pytest.mark.parametrize(
"f_lasti,f_lineno",
[
(-1, 1),
(0, 2),
(1, 2),
(11, 2),
(12, 3),
(21, 4),
(22, 4),
(23, 4),
(24, 2),
(25, 2),
(26, 2),
(27, 2),
(100, 2),
],
)
def test_call_stack_f_lineno(f_lasti: int, f_lineno: int) -> None:
assert call_stack(FakeFrame(f_lasti=f_lasti, f_code=FAKE_CODE)) == [
f' File "<stdin>", line {f_lineno}, in example\n\t'
]
|
__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import sys
import threading
import time
import uuid
import warnings
from collections import namedtuple
from functools import wraps
import numpy as np
import zmq
from zmq.utils import jsonapi
__all__ = ['__version__', 'BertClient', 'ConcurrentBertClient']
# in the future client version must match with server version
__version__ = '1.8.6'
if sys.version_info >= (3, 0):
from ._py3_var import *
else:
from ._py2_var import *
_Response = namedtuple('_Response', ['id', 'content'])
Response = namedtuple('Response', ['id', 'embedding', 'tokens'])
class BertClient(object):
def __init__(self, ip='localhost', port=5555, port_out=5556,
output_fmt='ndarray', show_server_config=False,
identity=None, check_version=True, check_length=True,
check_token_info=True, ignore_all_checks=False,
timeout=-1):
""" A client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `ignore_all_checks=True`
You can also use it as a context manager:
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
bc.encode(...)
# bc is automatically closed out of the context
:type timeout: int
:type check_version: bool
:type check_length: bool
:type check_token_info: bool
:type ignore_all_checks: bool
:type identity: str
:type show_server_config: bool
:type output_fmt: str
:type port_out: int
:type port: int
:type ip: str
:param ip: the ip address of the server
:param port: port for pushing data from client to server, must be consistent with the server side config
:param port_out: port for publishing results from server to client, must be consistent with the server side config
:param output_fmt: the output format of the sentence encodes, either in numpy array or python List[List[float]] (ndarray/list)
:param show_server_config: whether to show server configs when first connected
:param identity: the UUID of this client
:param check_version: check if server has the same version as client, raise AttributeError if not the same
:param check_length: check if server `max_seq_len` is less than the sentence length before sent
:param check_token_info: check if server can return tokenization
:param ignore_all_checks: ignore all checks, set it to True if you are not sure whether the server is ready when constructing BertClient()
:param timeout: set the timeout (milliseconds) for receive operation on the client, -1 means no timeout and wait until result returns
"""
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUSH)
self.sender.setsockopt(zmq.LINGER, 0)
self.identity = identity or str(uuid.uuid4()).encode('ascii')
self.sender.connect('tcp://%s:%d' % (ip, port))
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.LINGER, 0)
self.receiver.setsockopt(zmq.SUBSCRIBE, self.identity)
self.receiver.connect('tcp://%s:%d' % (ip, port_out))
self.request_id = 0
self.timeout = timeout
self.pending_request = set()
self.pending_response = {}
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
self.output_fmt = output_fmt
self.port = port
self.port_out = port_out
self.ip = ip
self.length_limit = 0
self.token_info_available = False
if not ignore_all_checks and (check_version or show_server_config or check_length or check_token_info):
s_status = self.server_status
if check_version and s_status['server_version'] != self.status['client_version']:
raise AttributeError('version mismatch! server version is %s but client version is %s!\n'
'consider "pip install -U bert-serving-server bert-serving-client"\n'
'or disable version-check by "BertClient(check_version=False)"' % (
s_status['server_version'], self.status['client_version']))
if check_length:
if s_status['max_seq_len'] is not None:
self.length_limit = int(s_status['max_seq_len'])
else:
self.length_limit = None
if check_token_info:
self.token_info_available = bool(s_status['show_tokens_to_client'])
if show_server_config:
self._print_dict(s_status, 'server config:')
def close(self):
"""
Gently close all connections of the client. If you are using BertClient as context manager,
then this is not necessary.
"""
self.sender.close()
self.receiver.close()
self.context.term()
def _send(self, msg, msg_len=0):
self.request_id += 1
self.sender.send_multipart([self.identity, msg, b'%d' % self.request_id, b'%d' % msg_len])
self.pending_request.add(self.request_id)
return self.request_id
def _recv(self, wait_for_req_id=None):
try:
while True:
# a request has been returned and found in pending_response
if wait_for_req_id in self.pending_response:
response = self.pending_response.pop(wait_for_req_id)
return _Response(wait_for_req_id, response)
# receive a response
response = self.receiver.recv_multipart()
request_id = int(response[-1])
# if not wait for particular response then simply return
if not wait_for_req_id or (wait_for_req_id == request_id):
self.pending_request.remove(request_id)
return _Response(request_id, response)
elif wait_for_req_id != request_id:
self.pending_response[request_id] = response
# wait for the next response
except Exception as e:
raise e
finally:
if wait_for_req_id in self.pending_request:
self.pending_request.remove(wait_for_req_id)
def _recv_ndarray(self, wait_for_req_id=None):
request_id, response = self._recv(wait_for_req_id)
arr_info, arr_val = jsonapi.loads(response[1]), response[2]
X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
return Response(request_id, self.formatter(X.reshape(arr_info['shape'])), arr_info.get('tokens', ''))
@property
def status(self):
"""
Get the status of this BertClient instance
:rtype: dict[str, str]
:return: a dictionary contains the status of this BertClient instance
"""
return {
'identity': self.identity,
'num_request': self.request_id,
'num_pending_request': len(self.pending_request),
'pending_request': self.pending_request,
'output_fmt': self.output_fmt,
'port': self.port,
'port_out': self.port_out,
'server_ip': self.ip,
'client_version': __version__,
'timeout': self.timeout
}
def _timeout(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if 'blocking' in kwargs and not kwargs['blocking']:
# override client timeout setting if `func` is called in non-blocking way
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
else:
self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)
try:
return func(self, *args, **kwargs)
except zmq.error.Again as _e:
t_e = TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" and "port_out" correct? '
'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout)
if _py2:
raise t_e
else:
_raise(t_e, _e)
finally:
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
return arg_wrapper
@property
@_timeout
def server_status(self):
"""
Get the current status of the server connected to this client
:return: a dictionary contains the current status of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_CONFIG')
return jsonapi.loads(self._recv(req_id).content[1])
@_timeout
def encode(self, texts, blocking=True, is_tokenized=False, show_tokens=False):
""" Encode a list of strings to a list of vectors
`texts` should be a list of strings, each of which represents a sentence.
If `is_tokenized` is set to True, then `texts` should be list[list[str]],
outer list represents sentence and inner list represent tokens in the sentence.
Note that if `blocking` is set to False, then you need to fetch the result manually afterwards.
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
# encode untokenized sentences
bc.encode(['First do it',
'then do it right',
'then do it better'])
# encode tokenized sentences
bc.encode([['First', 'do', 'it'],
['then', 'do', 'it', 'right'],
['then', 'do', 'it', 'better']], is_tokenized=True)
:type is_tokenized: bool
:type show_tokens: bool
:type blocking: bool
:type timeout: bool
:type texts: list[str] or list[list[str]]
:param is_tokenized: whether the input texts is already tokenized
:param show_tokens: whether to include tokenization result from the server. If true, the return of the function will be a tuple
:param texts: list of sentence to be encoded. Larger list for better efficiency.
:param blocking: wait until the encoded result is returned from the server. If false, will immediately return.
:param timeout: throw a timeout error when the encoding takes longer than the predefined timeout.
:return: encoded sentence/token-level embeddings, rows correspond to sentences
:rtype: numpy.ndarray or list[list[float]]
"""
if is_tokenized:
self._check_input_lst_lst_str(texts)
else:
self._check_input_lst_str(texts)
if self.length_limit is None:
warnings.warn('server does not put a restriction on "max_seq_len", '
'it will determine "max_seq_len" dynamically according to the sequences in the batch. '
'you can restrict the sequence length on the client side for better efficiency')
elif self.length_limit and not self._check_length(texts, self.length_limit, is_tokenized):
warnings.warn('some of your sentences have more tokens than "max_seq_len=%d" set on the server, '
'as consequence you may get less-accurate or truncated embeddings.\n'
'here is what you can do:\n'
'- disable the length-check by create a new "BertClient(check_length=False)" '
'when you do not want to display this warning\n'
'- or, start a new server with a larger "max_seq_len"' % self.length_limit)
req_id = self._send(jsonapi.dumps(texts), len(texts))
if not blocking:
return None
r = self._recv_ndarray(req_id)
if self.token_info_available and show_tokens:
return r.embedding, r.tokens
elif not self.token_info_available and show_tokens:
warnings.warn('"show_tokens=True", but the server does not support showing tokenization info to clients.\n'
'here is what you can do:\n'
'- start a new server with "bert-serving-start -show_tokens_to_client ..."\n'
'- or, use "encode(show_tokens=False)"')
return r.embedding
def fetch(self, delay=.0):
""" Fetch the encoded vectors from server, use it with `encode(blocking=False)`
Use it after `encode(texts, blocking=False)`. If there is no pending requests, will return None.
Note that `fetch()` does not preserve the order of the requests! Say you have two non-blocking requests,
R1 and R2, where R1 with 256 samples, R2 with 1 samples. It could be that R2 returns first.
To fetch all results in the original sending order, please use `fetch_all(sort=True)`
:type delay: float
:param delay: delay in seconds and then run fetcher
:return: a generator that yields request id and encoded vector in a tuple, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
time.sleep(delay)
while self.pending_request:
yield self._recv_ndarray()
def fetch_all(self, sort=True, concat=False):
""" Fetch all encoded vectors from server, use it with `encode(blocking=False)`
Use it `encode(texts, blocking=False)`. If there is no pending requests, it will return None.
:type sort: bool
:type concat: bool
:param sort: sort results by their request ids. It should be True if you want to preserve the sending order
:param concat: concatenate all results into one ndarray
:return: encoded sentence/token-level embeddings in sending order
:rtype: numpy.ndarray or list[list[float]]
"""
if self.pending_request:
tmp = list(self.fetch())
if sort:
tmp = sorted(tmp, key=lambda v: v.id)
tmp = [v.embedding for v in tmp]
if concat:
if self.output_fmt == 'ndarray':
tmp = np.concatenate(tmp, axis=0)
elif self.output_fmt == 'list':
tmp = [vv for v in tmp for vv in v]
return tmp
def encode_async(self, batch_generator, max_num_batch=None, delay=0.1, **kwargs):
""" Async encode batches from a generator
:param delay: delay in seconds and then run fetcher
:param batch_generator: a generator that yields list[str] or list[list[str]] (for `is_tokenized=True`) every time
:param max_num_batch: stop after encoding this number of batches
:param `**kwargs`: the rest parameters please refer to `encode()`
:return: a generator that yields encoded vectors in ndarray, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
def run():
cnt = 0
for texts in batch_generator:
self.encode(texts, blocking=False, **kwargs)
cnt += 1
if max_num_batch and cnt == max_num_batch:
break
t = threading.Thread(target=run)
t.start()
return self.fetch(delay)
@staticmethod
def _check_length(texts, len_limit, tokenized):
if tokenized:
# texts is already tokenized as list of str
return all(len(t) <= len_limit for t in texts)
else:
# do a simple whitespace tokenizer
return all(len(t.split()) <= len_limit for t in texts)
@staticmethod
def _check_input_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"%s" must be %s, but received %s' % (texts, type([]), type(texts)))
if not len(texts):
raise ValueError(
'"%s" must be a non-empty list, but received %s with %d elements' % (texts, type(texts), len(texts)))
for idx, s in enumerate(texts):
if not isinstance(s, _str):
raise TypeError('all elements in the list must be %s, but element %d is %s' % (type(''), idx, type(s)))
if not s.strip():
raise ValueError(
'all elements in the list must be non-empty string, but element %d is %s' % (idx, repr(s)))
if _py2:
texts[idx] = _unicode(texts[idx])
@staticmethod
def _check_input_lst_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"texts" must be %s, but received %s' % (type([]), type(texts)))
if not len(texts):
raise ValueError(
'"texts" must be a non-empty list, but received %s with %d elements' % (type(texts), len(texts)))
for s in texts:
BertClient._check_input_lst_str(s)
@staticmethod
def _print_dict(x, title=None):
if title:
print(title)
for k, v in x.items():
print('%30s\t=\t%-30s' % (k, v))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class BCManager():
def __init__(self, available_bc):
self.available_bc = available_bc
self.bc = None
def __enter__(self):
self.bc = self.available_bc.pop()
return self.bc
def __exit__(self, *args):
self.available_bc.append(self.bc)
class ConcurrentBertClient(BertClient):
def __init__(self, max_concurrency=10, **kwargs):
""" A thread-safe client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `check_version=False` and `check_length=False`
:type max_concurrency: int
:param max_concurrency: the maximum number of concurrent connections allowed
"""
try:
from bert_serving.client import BertClient
except ImportError:
raise ImportError('BertClient module is not available, it is required for serving HTTP requests.'
'Please use "pip install -U bert-serving-client" to install it.'
'If you do not want to use it as an HTTP server, '
'then remove "-http_port" from the command line.')
self.available_bc = [BertClient(**kwargs) for _ in range(max_concurrency)]
self.max_concurrency = max_concurrency
def close(self):
for bc in self.available_bc:
bc.close()
def _concurrent(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
try:
with BCManager(self.available_bc) as bc:
f = getattr(bc, func.__name__)
r = f if isinstance(f, dict) else f(*args, **kwargs)
return r
except IndexError:
raise RuntimeError('Too many concurrent connections!'
'Try to increase the value of "max_concurrency", '
'currently =%d' % self.max_concurrency)
return arg_wrapper
@_concurrent
def encode(self, **kwargs):
pass
@property
@_concurrent
def server_status(self):
pass
@property
@_concurrent
def status(self):
pass
def fetch(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def fetch_all(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def encode_async(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
|
ssh_utils.py | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# This file contains ssh Session class and support functions/classes.
import cmd
import os
import sys
import socket
import threading
from gppylib.commands.base import WorkerPool, REMOTE, ExecutionError
from gppylib.commands.unix import Hostname, Echo
sys.path.insert(1, sys.path[0] + '/lib')
from pexpect import pxssh
class HostNameError(Exception):
def __init__(self, msg, lineno = 0):
if lineno: self.msg = ('%s at line %d' % (msg, lineno))
else: self.msg = msg
def __str__(self):
return self.msg
class SSHError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
# Utility Functions
def ssh_prefix(host):
ssh = 'ssh -o "BatchMode yes" -o "StrictHostKeyChecking no" ' + host
return ssh
def get_hosts(hostsfile):
hostlist = HostList()
hostlist.parseFile(hostsfile)
return hostlist.get()
class HostList():
def __init__(self):
self.list = []
def get(self):
return self.list
def add(self, host, lineno=0):
'''Add a host to the hostlist.'''
# we don't allow the user@ syntax here
if host.find('@') >= 0:
raise HostNameError(host, lineno)
# MPP-13617 - check for ipv6
if host.find(':') >= 0:
try:
socket.inet_pton(socket.AF_INET6, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
# MPP-13617 - check for ipv4
if host.find('.') >= 0:
octs = host.split('.')
if len(octs) == 4 and False not in [o.isdigit() for o in octs]:
try:
socket.inet_pton(socket.AF_INET, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
self.list.append(host)
return self.list
def parseFile(self, path):
'''Add lines in a file to the hostlist.'''
with open(path) as fp:
for i, line in enumerate(fp):
line = line.strip()
if not line or line[0] == '#':
continue
self.add(line, i+1)
return self.list
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool()
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
# Session is a command session, derived from a base class cmd.Cmd
class Session(cmd.Cmd):
'''Implements a list of open ssh sessions ready to execute commands'''
verbose=False
hostList=[]
userName=None
echoCommand=False
class SessionError(StandardError): pass
class SessionCmdExit(StandardError): pass
def __init__(self, hostList=None, userName=None):
cmd.Cmd.__init__(self)
self.pxssh_list = []
self.prompt = '=> '
self.peerStringFormatRaw = None
if hostList:
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName
def peerStringFormat(self):
if self.peerStringFormatRaw: return self.peerStringFormatRaw
cnt = 0
for p in self.pxssh_list:
if cnt < len(p.x_peer): cnt = len(p.x_peer)
self.peerStringFormatRaw = "[%%%ds]" % cnt
return self.peerStringFormatRaw
def login(self, hostList=None, userName=None):
'''This is the normal entry point used to add host names to the object and log in to each of them'''
if self.verbose: print '\n[Reset ...]'
if not (self.hostList or hostList):
raise self.SessionError('No host list available to Login method')
if not (self.userName or userName):
raise self.SessionError('No user name available to Login method')
#Cleanup
self.clean()
if hostList: #We have a new hostlist to use, initialize it
self.hostList=[]
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName #We have a new userName to use
# MPP-6583. Save off term type and set to nothing before creating ssh process
origTERM = os.getenv('TERM', None)
os.putenv('TERM', '')
good_list = []
print_lock = threading.Lock()
def connect_host(host):
self.hostList.append(host)
p = pxssh.pxssh(options={"StrictHostKeyChecking": "no",
"BatchMode": "yes"})
try:
# The sync_multiplier value is passed onto pexpect.pxssh which is used to determine timeout
# values for prompt verification after an ssh connection is established.
p.login(host, self.userName, sync_multiplier=1.0)
p.x_peer = host
p.x_pid = p.pid
good_list.append(p)
if self.verbose:
with print_lock:
print '[INFO] login %s' % host
except:
with print_lock:
print '[ERROR] unable to login to %s' % host
print 'hint: use gpssh-exkeys to setup public-key authentication between hosts'
thread_list = []
for host in hostList:
t = threading.Thread(target=connect_host, args=(host,))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
# Restore terminal type
if origTERM:
os.putenv('TERM', origTERM)
self.pxssh_list = good_list
def close(self):
return self.clean()
def reset(self):
'''reads from all the ssh connections to make sure we dont have any pending cruft'''
for s in self.pxssh_list:
s.readlines()
def clean(self):
net_return_code = self.closePxsshList(self.pxssh_list)
self.pxssh_list = []
return net_return_code
def emptyline(self):
pass
def escapeLine(self,line):
'''Escape occurrences of \ and $ as needed and package the line as an "eval" shell command'''
line = line.strip()
if line == 'EOF' or line == 'exit' or line == 'quit':
raise self.SessionCmdExit()
line = line.split('\\')
line = '\\\\'.join(line)
line = line.split('"')
line = '\\"'.join(line)
line = line.split('$')
line = '\\$'.join(line)
line = 'eval "' + line + '" < /dev/null'
return line
def executeCommand(self,command):
commandoutput=[]
if self.echoCommand:
escapedCommand = command.replace('"', '\\"')
command = 'echo "%s"; %s' % (escapedCommand, command)
#Execute the command in all of the ssh sessions
for s in self.pxssh_list:
s.sendline(command)
#Wait for each command and retrieve the output
for s in self.pxssh_list:
#Wait for each command to finish
#!! TODO verify that this is a tight wait loop and find another way to do this
while not s.prompt(120) and s.isalive() and not s.eof(): pass
for s in self.pxssh_list:
#Split the output into an array of lines so that we can add text to the beginning of
# each line
output = s.before.split('\n')
output = output[1:-1]
commandoutput.append(output)
return commandoutput.__iter__()
# Interactive command line handler
# Override of base class, handles commands that aren't recognized as part of a predefined set
# The "command" argument is a command line to be executed on all available command sessions
# The output of the command execution is printed to the standard output, prepended with
# the hostname of each machine from which the output came
def default(self, command):
line = self.escapeLine(command)
if self.verbose: print command
#Execute the command on our ssh sessions
commandoutput=self.executeCommand(command)
self.writeCommandOutput(commandoutput)
def writeCommandOutput(self,commandoutput):
'''Takes a list of output lists as an iterator and writes them to standard output,
formatted with the hostname from which each output array was obtained'''
for s in self.pxssh_list:
output = commandoutput.next()
#Write the output
if len(output) == 0:
print (self.peerStringFormat() % s.x_peer)
else:
for line in output:
print (self.peerStringFormat() % s.x_peer), line
def closePxsshList(self,list):
lock = threading.Lock()
return_codes = [0]
def closePxsshOne(p, return_codes):
p.logout()
with lock:
return_codes.append(p.exitstatus)
th = []
for p in list:
t = threading.Thread(target=closePxsshOne, args=(p, return_codes))
t.start()
th.append(t)
for t in th:
t.join()
return max(return_codes)
|
test_deadlock.py | from fast_map import fast_map
from threading import Thread, Lock
import time
def f(x):
global lock_G
print('f entered with x =', x)
with lock_G:
print('lock_G was acquired, x =', x)
time.sleep(1)
return x
def release_lock_after_delay(lock):
time.sleep(1)
lock.release()
lock_G = Lock()
lock_G.acquire()
# Thread(target=release_lock_after_delay, daemon=True, args=[lock_G]).start()
gen = fast_map(f, range(3))
time.sleep(1)
print('releasing lock')
lock_G.release()
for i in gen:
print(i)
print('checking global lock (will not recognize that the lock is released)')
for i in fast_map(f, range(3)):
print(i)
print('global lock done')
|
Action.py | import time
from modules.base.Configuration import *
from modules.base.Instances import *
from plugins.dfplayer.Platform import Platform
import threading
@configuration
class DFPlayerActionConfiguration(ActionConfiguration):
'''Configuration settings for the DFPlayer'''
@validator('platform')
def check_platform(cls, v):
platform_name = "dfplayer"
if v != platform_name:
raise ValueError("wrong script platform: " + platform_name + ", is: " + v)
return v
class DfPlayerState(BaseState):
'''Represents the state of the DFPlayer'''
is_playing = False
'''Actually, there is only a TX connection implemented, paytime is hardcoded to 5s'''
class Action(BaseAction, Logging):
'''To invoke this action, pass this values:
values:
- command: set_vol
- volume: 30
or, values:
- command: next_track
'''
def __init__(self, parent: Platform, config: DFPlayerActionConfiguration) -> None:
super().__init__(parent, config)
self.platform = parent
self.player = parent.player
self.state = DfPlayerState()
def invoke(self, call_stack: CallStack):
if self.configuration.variables is not None:
call_stack.with_keys(self.configuration.variables)
self.state.is_playing = True
self.on_state_changed(call_stack)
command_name = call_stack.get("{{command}}")
if type(command_name) is str:
if hasattr(self.player, command_name):
method = getattr(self.player, command_name)
if command_name == "set_vol":
method(call_stack.get("{{volume}}"))
elif command_name == "set_eq":
method(call_stack.get("{{equalizer}}"))
elif command_name == "set_mode":
method(call_stack.get("{{mode}}"))
elif command_name == "set_folder":
method(call_stack.get("{{folder_index}}"))
else:
method()
else:
self.log_error("Unknown command: " + command_name)
else:
self.log_error("Unknown command: " + str(command_name))
super().invoke(call_stack)
def update_state_delayed():
time.sleep(5)
self.state.is_playing = False
self.on_state_changed(call_stack)
loop_thread = threading.Thread(target=update_state_delayed)
loop_thread.start()
|
generate_tolerance_label.py | """ Tolerance label generation.
Author: chenxi-wang
"""
import os
import sys
import numpy as np
import time
import argparse
import multiprocessing as mp
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from data_utils import compute_point_dists
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', required=True, help='Dataset root')
parser.add_argument('--pos_ratio_thresh', type=float, default=0.8, help='Threshold of positive neighbor ratio[default: 0.8]')
parser.add_argument('--mu_thresh', type=float, default=0.55, help='Threshold of friction coefficient[default: 0.55]')
parser.add_argument('--num_workers', type=int, default=50, help='Worker number[default: 50]')
cfgs = parser.parse_args()
save_path = 'tolerance'
V = 300
A = 12
D = 4
radius_list = [0.001 * x for x in range(51)]
def manager(obj_name, pool_size=8):
# load models
label_path = '{}_labels.npz'.format(obj_name)
label = np.load(os.path.join(cfgs.dataset_root, 'grasp_label', label_path))
points = label['points']
scores = label['scores']
# create dict
tolerance = mp.Manager().dict()
dists = compute_point_dists(points, points)
params = params = (scores, dists)
# assign works
pool = []
process_cnt = 0
work_list = [x for x in range(len(points))]
for _ in range(pool_size):
point_ind = work_list.pop(0)
pool.append(mp.Process(target=worker, args=(obj_name, point_ind, params, tolerance)))
[p.start() for p in pool]
# refill
while len(work_list) > 0:
for ind, p in enumerate(pool):
if not p.is_alive():
pool.pop(ind)
point_ind = work_list.pop(0)
p = mp.Process(target=worker, args=(obj_name, point_ind, params, tolerance))
p.start()
pool.append(p)
process_cnt += 1
print('{}/{}'.format(process_cnt, len(points)))
break
while len(pool) > 0:
for ind, p in enumerate(pool):
if not p.is_alive():
pool.pop(ind)
process_cnt += 1
print('{}/{}'.format(process_cnt, len(points)))
break
# save tolerance
if not os.path.exists(save_path):
os.mkdir(save_path)
saved_tolerance = [None for _ in range(len(points))]
for i in range(len(points)):
saved_tolerance[i] = tolerance[i]
saved_tolerance = np.array(saved_tolerance)
np.save('{}/{}_tolerance.npy'.format(save_path, obj_name), saved_tolerance)
def worker(obj_name, point_ind, params, tolerance):
scores, dists = params
tmp_tolerance = np.zeros([V, A, D], dtype=np.float32)
tic = time.time()
for r in radius_list:
dist_mask = (dists[point_ind] <= r)
scores_in_ball = scores[dist_mask]
pos_ratio = ((scores_in_ball > 0) & (scores_in_ball <= cfgs.mu_thresh)).mean(axis=0)
tolerance_mask = (pos_ratio >= cfgs.pos_ratio_thresh)
if tolerance_mask.sum() == 0:
break
tmp_tolerance[tolerance_mask] = r
tolerance[point_ind] = tmp_tolerance
toc = time.time()
print("{}: point {} time".format(obj_name, point_ind), toc - tic)
if __name__ == '__main__':
obj_list = ['%03d' % x for x in range(88)]
for obj_name in obj_list:
p = mp.Process(target=manager, args=(obj_name, cfgs.num_workers))
p.start()
p.join() |
client.py | #!/usr/bin/env python3
# Hydrus is released under WTFPL
# You just DO WHAT THE FUCK YOU WANT TO.
# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md
try:
if True:
import sys
print(f"In venv?: {hasattr(sys, 'real_prefix') or sys.base_prefix != sys.prefix}")
import locale
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
# initialise Qt here, important it is done early
from hydrus import QtPorting as QP
from hydrus import HydrusConstants as HC
from hydrus import HydrusPaths
from hydrus import HydrusGlobals as HG
import os
import argparse
argparser = argparse.ArgumentParser( description = 'hydrus network client (console)' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--temp_dir', help = 'override the program\'s temporary directory' )
argparser.add_argument( '--no_daemons', action='store_true', help = 'run without background daemons' )
argparser.add_argument( '--no_wal', action='store_true', help = 'run without WAL db journaling' )
argparser.add_argument( '--db_memory_journaling', action='store_true', help = 'run db journaling entirely in memory (DANGEROUS)' )
argparser.add_argument( '--db_synchronous_override', help = 'override SQLite Synchronous PRAGMA (range 0-3, default=2)' )
argparser.add_argument( '--no_db_temp_files', action='store_true', help = 'run db temp operations entirely in memory' )
result = argparser.parse_args()
if result.db_dir is None:
db_dir = HC.DEFAULT_DB_DIR
if not HydrusPaths.DirectoryIsWritable( db_dir ) or HC.RUNNING_FROM_MACOS_APP:
db_dir = HC.USERPATH_DB_DIR
else:
db_dir = result.db_dir
db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
try:
HydrusPaths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path "{}" exists! Check the location is correct and that you have permission to write to it!'.format( db_dir ) )
if not os.path.isdir( db_dir ):
raise Exception( 'The given db path "{}" is not a directory!'.format( db_dir ) )
if not HydrusPaths.DirectoryIsWritable( db_dir ):
raise Exception( 'The given db path "{}" is not a writable-to!'.format( db_dir ) )
HG.no_daemons = result.no_daemons
HG.no_wal = result.no_wal
HG.db_memory_journaling = result.db_memory_journaling
if result.db_synchronous_override is not None:
try:
db_synchronous_override = int( result.db_synchronous_override )
except ValueError:
raise Exception( 'db_synchronous_override must be an integer in the range 0-3' )
if db_synchronous_override not in range( 4 ):
raise Exception( 'db_synchronous_override must be in the range 0-3' )
HG.no_db_temp_files = result.no_db_temp_files
if result.temp_dir is not None:
HydrusPaths.SetEnvTempDir( result.temp_dir )
HydrusPaths.AddBaseDirToEnvPath()
from hydrus import HydrusPy2To3
HydrusPy2To3.do_2to3_test()
from hydrus import HydrusData
from hydrus import HydrusLogger
import traceback
try:
from twisted.internet import reactor
except:
HG.twisted_is_broke = True
except Exception as e:
import traceback
import os
try:
from hydrus import HydrusData
HydrusData.DebugPrint( 'Critical boot error occurred! Details written to crash.log!' )
HydrusData.PrintException( e )
except:
pass
error_trace = traceback.format_exc()
print( error_trace )
if 'db_dir' in locals() and os.path.exists( db_dir ):
emergency_dir = db_dir
else:
emergency_dir = os.path.expanduser( '~' )
possible_desktop = os.path.join( emergency_dir, 'Desktop' )
if os.path.exists( possible_desktop ) and os.path.isdir( possible_desktop ):
emergency_dir = possible_desktop
dest_path = os.path.join( emergency_dir, 'hydrus_crash.log' )
with open( dest_path, 'w', encoding = 'utf-8' ) as f:
f.write( error_trace )
print( 'Critical boot error occurred! Details written to hydrus_crash.log in either db dir or user dir!' )
import sys
sys.exit( 1 )
controller = None
with HydrusLogger.HydrusLogger( db_dir, 'client' ) as logger:
try:
HydrusData.Print( 'hydrus client started' )
if not HG.twisted_is_broke:
import threading
threading.Thread( target = reactor.run, name = 'twisted', kwargs = { 'installSignalHandlers' : 0 } ).start()
from hydrus import ClientController
controller = ClientController.Controller( db_dir )
controller.Run()
except:
HydrusData.Print( 'hydrus client failed' )
HydrusData.Print( traceback.format_exc() )
finally:
HG.view_shutdown = True
HG.model_shutdown = True
if controller is not None:
controller.pubimmediate( 'wake_daemons' )
if not HG.twisted_is_broke:
reactor.callFromThread( reactor.stop )
HydrusData.Print( 'hydrus client shut down' )
HG.shutdown_complete = True
if HG.restart:
HydrusData.RestartProcess()
|
reader.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
import time
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, _non_static_mode, cpu_places, _current_expected_place, _in_eager_without_dygraph_check
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset, Subset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
from .framework import _get_paddle_place, _get_paddle_place_list
from paddle.fluid.framework import _set_expected_place, _current_expected_place
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
# AutoTune Flags
USE_AUTOTUNE = False
TUNING_STEPS = 500
def set_autotune_config(use_autotune, tuning_steps=500):
global USE_AUTOTUNE
USE_AUTOTUNE = use_autotune
global TUNING_STEPS
TUNING_STEPS = tuning_steps
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class AuToTune(object):
def __init__(self, loader):
self.loader = loader
self.max_num_worker = multiprocessing.cpu_count() / 2
def __call__(self):
# use default loader
if (not USE_AUTOTUNE) or (not self.need_autotune()):
return self.loader.num_workers
# get autotune loader
auto_tune_loader = self.get_autotune_loader()
if auto_tune_loader is None:
return self.loader.num_workers
# pick the best num_workers
auto_tune_start = time.time()
logging.debug("========= DataLoader Auto Tune =========")
logging.debug("User config for DataLoader: " + str(
self.loader.num_workers))
best_num_workers = 0
min_cost = float("inf")
logging.debug("Tuning Range for num_workers: 0 ~ " + str(
self.max_num_worker))
num_workers = 0
while num_workers < self.max_num_worker:
auto_tune_loader.num_workers = num_workers
avg_cost = self.evaluate_reader_cost(auto_tune_loader)
if min_cost * 0.75 > avg_cost:
min_cost = avg_cost
best_num_workers = num_workers
else:
update_num = self.is_best(auto_tune_loader, best_num_workers,
min_cost, self.max_num_worker)
if update_num == best_num_workers:
break
else:
best_num_workers = update_num
logging.debug("num_workers: " + str(num_workers) + " avg_cost: " +
str(avg_cost))
num_workers += 2
logging.info("auto_tune dataLoader best_num_workers: " + str(
best_num_workers))
logging.debug("AutoTuning Cost for DataLoader: " + str(time.time(
) - auto_tune_start) + ' seconds')
# tune the default loader's num_workers
return best_num_workers
def need_autotune(self):
if (sys.platform == 'darwin' or sys.platform == 'win32'):
return False
else:
return True
def get_sub_dataset(self, dataset, batch_size):
num_samples = min(batch_size * TUNING_STEPS, len(dataset))
sub_dataset = Subset(dataset, indices=list(range(num_samples)))
return sub_dataset
def get_autotune_loader(self):
loader = self.loader
batch_size = self.loader.batch_sampler.batch_size
if isinstance(self.loader.batch_sampler,
paddle.io.DistributedBatchSampler):
dataset = self.loader.batch_sampler.dataset
sub_dataset = self.get_sub_dataset(dataset, batch_size)
loader.batch_sampler = paddle.io.DistributedBatchSampler(
dataset=sub_dataset,
batch_size=batch_size,
num_replicas=self.loader.batch_sampler.nranks,
rank=self.loader.batch_sampler.local_rank,
shuffle=self.loader.batch_sampler.shuffle,
drop_last=self.loader.batch_sampler.drop_last)
elif isinstance(self.loader.batch_sampler, paddle.io.BatchSampler):
dataset = self.loader.batch_sampler.sampler.data_source
sub_dataset = self.get_sub_dataset(dataset, batch_size)
loader.batch_sampler = paddle.io.BatchSampler(
dataset=sub_dataset,
batch_size=batch_size,
drop_last=self.loader.batch_sampler.drop_last)
else:
loader = None
return loader
def evaluate_reader_cost(self, reader):
costs = []
avg_cost = 0
start = time.time()
for i, data in enumerate(reader):
costs.append(time.time() - start)
start = time.time()
if len(costs) > 2:
avg_cost = sum(costs[2:]) / len(costs[2:])
else:
avg_cost = sum(costs[0:]) / len(costs[0:])
return avg_cost
def is_best(self, reader, best_workers, best_time, num_work_boundary):
step = 0
num_workers = best_workers + 1
boundary = 1
while num_workers < num_work_boundary and step < 5:
self.loader.num_workers = num_workers
time = self.evaluate_reader_cost(reader)
logging.debug("for back num_workers: " + str(num_workers) +
" avg_cost: " + str(time))
step += 1
if (time < best_time * 0.70 * boundary):
return num_workers
else:
num_workers += 1
boundary *= 0.80
return best_workers
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader supports map-style dataset and iterable-style dataset.
For map-style datast(can get a sample from dataset with a given
index), please see :code:`paddle.io.Dataset`.
For iterable-style datast(get samples from dataset iteratively,
like a Python iterator), please see :code:`paddle.io.IterableDataset`.
For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`
.. note::
GPU tensor operation is not supported in subprocess currently,
please don't use GPU tensor operations in pipeline which will
be performed in subprocess, such as dataset transforms, collte_fn,
etc. Numpy array and CPU tensor operation is supported.
**Disable automatic batching**
In certain cases such as some NLP tasks, instead of automatic batching,
handling batching manually in dataset is needed by users. For these
cases, automatic batching is disabled if both :attr:`batch_size` and
:attr:`batch_sampler` is set as None, each data got from :attr:`dataset`
should be batched data and will be processed with function define by
:attr:`collate_fn` or :attr:`default_collate_fn`.
.. note::
When automatic batching is disabled, :attr:`default_collate_fn` will
do nothing to data from dataset.
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`paddle.static.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|list(str)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None. If ``places`` is list of string,
the string in the list can be ``cpu``, ``gpu:x`` and ``gpu_pinned``,
where ``x`` is the index of the GPUs.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> Tensor, where
the key of the dict is the name of each fed Tensors. If
:attr:`return_list=True`, the return value on each device would
be a list(Tensor). :attr:`return_list` can only be True
in dynamic graph mode. Default True.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int|None): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
class SimpleNet(nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, image, label=None):
return self.fc(image)
simple_net = SimpleNet()
opt = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = F.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=True,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None,
persistent_workers=False):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
self.dataset = dataset
if not return_list and not _non_static_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
self.batch_size = None
elif batch_size is None:
self.batch_sampler = None
self.batch_size = None
else:
assert batch_size > 0, \
"batch_size should be None or a positive value when " \
"batch_sampler is not given"
self.batch_size = batch_size
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.drop_last = drop_last
self.auto_collate_batch = self.batch_sampler is not None
self.pin_memory = False
if _non_static_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
self._persistent_workers = persistent_workers
self._iterator = None
self.num_workers = AuToTune(self).__call__()
def __len__(self):
if self.dataset_kind == _DatasetKind.ITER:
raise ValueError("length of IterableDataset not supported")
else:
if self.auto_collate_batch:
return len(self.batch_sampler)
else:
return len(self.dataset)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
elif self._persistent_workers:
if self._iterator is None:
self._iterator = _DataLoaderIterMultiProcess(self)
else:
self._iterator._reset()
return self._iterator
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process.
Args:
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed Tensors. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
'''
Example in static graph mode
'''
import numpy as np
import paddle
import paddle.static as static
import paddle.nn.functional as F
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
paddle.enable_static()
def simple_net(image, label):
fc_tmp = static.nn.fc(image, size=CLASS_NUM)
cross_entropy = F.softmax_with_cross_entropy(image, label)
loss = paddle.mean(cross_entropy)
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except paddle.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places.
# - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = static.cuda_places() if USE_GPU else static.cpu_places()
set_data_source(loader, places)
exe = static.Executor(places[0])
exe.run(static.default_startup_program())
prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
Examples 2:
.. code-block:: python
'''
Example in dynamic graph mode.
'''
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether to use GPU
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])
yield batch_image, batch_label
def random_batch_reader():
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# set device
paddle.set_device('gpu' if USE_GPU else 'cpu')
# create network
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())
# create data loader
loader = paddle.io.DataLoader.from_generator(capacity=5)
loader.set_batch_generator(random_batch_reader())
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
Examples 3:
.. code-block:: python
'''
Example of `drop_last` using in static graph multi-cards mode
'''
import paddle
import paddle.static as static
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
paddle.enable_static()
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = static.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = paddle.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, static.cpu_places())
exe = static.Executor(paddle.CPUPlace())
prog = static.CompiledProgram(static.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if _non_static_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)|list(str)): places where the result
data should be converted. If places is list of string, the string in the list
can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where x is the index of the GPUs.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32,
pipe_command='cat',
use_var=[image, label])
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if _in_eager_without_dygraph_check():
return core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self, legacy_expected_place):
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self, legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
return data
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__(legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warning('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(
target=__thread_main__, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
r"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not _non_static_mode(
), "DatasetLoader is not supported in dygraph mode yet"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
PiVideoStream.py | """
Additional driver intallation (V4L2)
Execute in the terminal:
sudo modprobe bcm2835-v4l2
"""
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import cv2
class PiVideoStream:
def __init__(self, resolution = (480, 320), framerate=32):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
learner.py | from typing import Tuple
import glob
import os
import shutil
import signal
import threading
import time
from collections import OrderedDict, deque
from os.path import join
from queue import Empty, Queue, Full
from threading import Thread
import numpy as np
import psutil
import torch
from torch.nn.utils.rnn import PackedSequence, invert_permutation
from torch.multiprocessing import Process, Event as MultiprocessingEvent
if os.name == 'nt':
from sample_factory.utils import Queue as MpQueue
else:
from faster_fifo import Queue as MpQueue
from sample_factory.algorithms.appo.appo_utils import TaskType, list_of_dicts_to_dict_of_lists, memory_stats, cuda_envvars_for_policy, \
TensorBatcher, iter_dicts_recursively, copy_dict_structure, ObjectPool
from sample_factory.algorithms.appo.model import create_actor_critic
from sample_factory.algorithms.appo.aux_losses import CPCA
from sample_factory.algorithms.appo.population_based_training import PbtTask
from sample_factory.algorithms.utils.action_distributions import get_action_distribution, is_continuous_action_space
from sample_factory.algorithms.utils.algo_utils import calculate_gae, EPS
from sample_factory.algorithms.utils.pytorch_utils import to_scalar
from sample_factory.utils.decay import LinearDecay
from sample_factory.utils.timing import Timing
from sample_factory.utils.utils import log, AttrDict, experiment_dir, ensure_dir_exists, join_or_kill, safe_get, safe_put
# noinspection PyPep8Naming
def _build_pack_info_from_dones(dones: torch.Tensor, T: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Create the indexing info needed to make the PackedSequence based on the dones.
PackedSequences are PyTorch's way of supporting a single RNN forward
call where each input in the batch can have an arbitrary sequence length
They work as follows: Given the sequences [c], [x, y, z], [a, b],
we generate data [x, a, c, y, b, z] and batch_sizes [3, 2, 1]. The
data is a flattened out version of the input sequences (the ordering in
data is determined by sequence length). batch_sizes tells you that
for each index, how many sequences have a length of (index + 1) or greater.
This method will generate the new index ordering such that you can
construct the data for a PackedSequence from a (N*T, ...) tensor
via x.index_select(0, select_inds)
"""
num_samples = len(dones)
rollout_boundaries = dones.clone().detach()
rollout_boundaries[T - 1::T] = 1 # end of each rollout is the boundary
rollout_boundaries = rollout_boundaries.nonzero(as_tuple=False).squeeze(dim=1) + 1
first_len = rollout_boundaries[0].unsqueeze(0)
if len(rollout_boundaries) <= 1:
log.debug('Only one rollout boundary. This can happen if batch size is 1, probably not during the real training.')
rollout_lengths = first_len
else:
rollout_lengths = rollout_boundaries[1:] - rollout_boundaries[:-1]
rollout_lengths = torch.cat([first_len, rollout_lengths])
rollout_starts_orig = rollout_boundaries - rollout_lengths
# done=True for the last step in the episode, so done flags rolled 1 step to the right will indicate
# first frames in the episodes
is_new_episode = dones.clone().detach().view((-1, T))
is_new_episode = is_new_episode.roll(1, 1)
# roll() is cyclical, so done=True in the last position in the rollout will roll to 0th position
# we want to avoid it here. (note to self: is there a function that does two of these things at once?)
is_new_episode[:, 0] = 0
is_new_episode = is_new_episode.view((-1, ))
lengths, sorted_indices = torch.sort(rollout_lengths, descending=True)
# We will want these on the CPU for torch.unique_consecutive,
# so move now.
cpu_lengths = lengths.to(device='cpu', non_blocking=True)
# We need to keep the original unpermuted rollout_starts, because the permutation is later applied
# internally in the RNN implementation.
# From modules/rnn.py:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
# hx = self.permute_hidden(hx, sorted_indices)
rollout_starts_sorted = rollout_starts_orig.index_select(0, sorted_indices)
select_inds = torch.empty(num_samples, device=dones.device, dtype=torch.int64)
max_length = int(cpu_lengths[0].item())
# batch_sizes is *always* on the CPU
batch_sizes = torch.empty((max_length,), device='cpu', dtype=torch.int64)
offset = 0
prev_len = 0
num_valid_for_length = lengths.size(0)
unique_lengths = torch.unique_consecutive(cpu_lengths)
# Iterate over all unique lengths in reverse as they sorted
# in decreasing order
for i in range(len(unique_lengths) - 1, -1, -1):
valids = lengths[0:num_valid_for_length] > prev_len
num_valid_for_length = int(valids.float().sum().item())
next_len = int(unique_lengths[i])
batch_sizes[prev_len:next_len] = num_valid_for_length
new_inds = (
rollout_starts_sorted[0:num_valid_for_length].view(1, num_valid_for_length)
+ torch.arange(prev_len, next_len, device=rollout_starts_sorted.device).view(next_len - prev_len, 1)
).view(-1)
# for a set of sequences [1, 2, 3], [4, 5], [6, 7], [8]
# these indices will be 1,4,6,8,2,5,7,3
# (all first steps in all trajectories, then all second steps, etc.)
select_inds[offset:offset + new_inds.numel()] = new_inds
offset += new_inds.numel()
prev_len = next_len
# Make sure we have an index for all elements
assert offset == num_samples
assert is_new_episode.shape[0] == num_samples
return rollout_starts_orig, is_new_episode, select_inds, batch_sizes, sorted_indices
def build_rnn_inputs(x, dones_cpu, rnn_states, T: int):
"""
Create a PackedSequence input for an RNN such that each
set of steps that are part of the same episode are all part of
a batch in the PackedSequence.
Use the returned select_inds and build_core_out_from_seq to invert this.
:param x: A (N*T, -1) tensor of the data to build the PackedSequence out of
:param dones_cpu: A (N*T) tensor where dones[i] == 1.0 indicates an episode is done, a CPU-bound tensor
:param rnn_states: A (N*T, -1) tensor of the rnn_hidden_states
:param T: The length of the rollout
:return: tuple(x_seq, rnn_states, select_inds)
WHERE
x_seq is the PackedSequence version of x to pass to the RNN
rnn_states are the corresponding rnn state, zeroed on the episode boundary
inverted_select_inds can be passed to build_core_out_from_seq so the RNN output can be retrieved
"""
rollout_starts, is_new_episode, select_inds, batch_sizes, sorted_indices = _build_pack_info_from_dones(dones_cpu, T)
inverted_select_inds = invert_permutation(select_inds)
def device(t):
return t.to(device=x.device)
select_inds = device(select_inds)
inverted_select_inds = device(inverted_select_inds)
sorted_indices = device(sorted_indices)
rollout_starts = device(rollout_starts)
is_new_episode = device(is_new_episode)
x_seq = PackedSequence(x.index_select(0, select_inds), batch_sizes, sorted_indices)
# We zero-out rnn states for timesteps at the beginning of the episode.
# rollout_starts are indices of all starts of sequences
# (which can be due to episode boundary or just boundary of a rollout)
# (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts) gives us a zero for every beginning of
# the sequence that is actually also a start of a new episode, and by multiplying this RNN state by zero
# we ensure no information transfer across episode boundaries.
rnn_states = rnn_states.index_select(0, rollout_starts)
is_same_episode = (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts)
rnn_states = rnn_states * is_same_episode
return x_seq, rnn_states, inverted_select_inds
def build_core_out_from_seq(x_seq: PackedSequence, inverted_select_inds):
return x_seq.data.index_select(0, inverted_select_inds)
class LearnerWorker:
def __init__(
self, worker_idx, policy_id, cfg, obs_space, action_space, report_queue, policy_worker_queues, shared_buffers,
policy_lock, resume_experience_collection_cv,
):
log.info('Initializing the learner %d for policy %d', worker_idx, policy_id)
self.worker_idx = worker_idx
self.policy_id = policy_id
self.cfg = cfg
# PBT-related stuff
self.should_save_model = True # set to true if we need to save the model to disk on the next training iteration
self.load_policy_id = None # non-None when we need to replace our parameters with another policy's parameters
self.pbt_mutex = None # deferred initialization
self.new_cfg = None # non-None when we need to update the learning hyperparameters
self.terminate = False
self.num_batches_processed = 0
self.obs_space = obs_space
self.action_space = action_space
self.shared_buffers = shared_buffers
# deferred initialization
self.rollout_tensors = None
self.policy_versions = None
self.stop_experience_collection = None
self.stop_experience_collection_num_msgs = self.resume_experience_collection_num_msgs = 0
self.device = None
self.actor_critic = None
self.aux_loss_module = None
self.optimizer = None
self.policy_lock = policy_lock
self.resume_experience_collection_cv = resume_experience_collection_cv
self.task_queue = MpQueue()
self.report_queue = report_queue
self.initialized_event = MultiprocessingEvent()
self.initialized_event.clear()
self.model_saved_event = MultiprocessingEvent()
self.model_saved_event.clear()
# queues corresponding to policy workers using the same policy
# we send weight updates via these queues
self.policy_worker_queues = policy_worker_queues
self.experience_buffer_queue = None # deferred initialization
self.tensor_batch_pool = self.tensor_batcher = None
self.with_training = True # set to False for debugging no-training regime
self.train_in_background = self.cfg.train_in_background_thread # set to False for debugging
self.training_thread = None
self.train_thread_initialized = None
self.is_training = False
self.train_step = self.env_steps = 0
# decay rate at which summaries are collected
# save summaries every 20 seconds in the beginning, but decay to every 4 minutes in the limit, because we
# do not need frequent summaries for longer experiments
self.summary_rate_decay_seconds = LinearDecay([(0, 20), (100000, 120), (1000000, 240)])
self.last_summary_time = 0
self.last_saved_time = self.last_milestone_time = 0
self.discarded_experience_over_time = deque([], maxlen=30)
self.discarded_experience_timer = time.time()
self.num_discarded_rollouts = 0
self.process = Process(target=self._run, daemon=True)
if is_continuous_action_space(self.action_space) and self.cfg.exploration_loss == 'symmetric_kl':
raise NotImplementedError('KL-divergence exploration loss is not supported with '
'continuous action spaces. Use entropy exploration loss')
self.exploration_loss_func = None # deferred initialization
def start_process(self):
self.process.start()
def deferred_initialization(self):
self.rollout_tensors = self.shared_buffers.tensors
self.policy_versions = self.shared_buffers.policy_versions
self.stop_experience_collection = self.shared_buffers.stop_experience_collection
self.pbt_mutex = threading.Lock()
self.experience_buffer_queue = Queue()
self.tensor_batch_pool = ObjectPool()
self.tensor_batcher = TensorBatcher(self.tensor_batch_pool)
self.training_thread = Thread(target=self._train_loop) if self.train_in_background else None
self.train_thread_initialized = threading.Event()
if self.cfg.exploration_loss_coeff == 0.0:
self.exploration_loss_func = lambda action_distr, valids: 0.0
elif self.cfg.exploration_loss == 'entropy':
self.exploration_loss_func = self.entropy_exploration_loss
elif self.cfg.exploration_loss == 'symmetric_kl':
self.exploration_loss_func = self.symmetric_kl_exploration_loss
else:
raise NotImplementedError(f'{self.cfg.exploration_loss} not supported!')
def _init(self):
log.info('Waiting for the learner to initialize...')
self.train_thread_initialized.wait()
log.info('Learner %d initialized', self.worker_idx)
self.initialized_event.set()
def _terminate(self):
self.terminate = True
def _broadcast_model_weights(self):
state_dict = self.actor_critic.state_dict()
policy_version = self.train_step
log.debug('Broadcast model weights for model version %d', policy_version)
model_state = (policy_version, state_dict)
for q in self.policy_worker_queues:
q.put((TaskType.INIT_MODEL, model_state))
def _calculate_gae(self, buffer):
"""
Calculate advantages using Generalized Advantage Estimation.
This is leftover the from previous version of the algorithm.
Perhaps should be re-implemented in PyTorch tensors, similar to V-trace for uniformity.
"""
rewards = np.stack(buffer.rewards).squeeze() # [E, T]
dones = np.stack(buffer.dones).squeeze() # [E, T]
values_arr = np.stack(buffer.values).squeeze() # [E, T]
# calculating fake values for the last step in the rollout
# this will make sure that advantage of the very last action is always zero
values = []
for i in range(len(values_arr)):
last_value, last_reward = values_arr[i][-1], rewards[i, -1]
next_value = (last_value - last_reward) / self.cfg.gamma
values.append(list(values_arr[i]))
values[i].append(float(next_value)) # [T] -> [T+1]
# calculating returns and GAE
rewards = rewards.transpose((1, 0)) # [E, T] -> [T, E]
dones = dones.transpose((1, 0)) # [E, T] -> [T, E]
values = np.asarray(values).transpose((1, 0)) # [E, T+1] -> [T+1, E]
advantages, returns = calculate_gae(rewards, dones, values, self.cfg.gamma, self.cfg.gae_lambda)
# transpose tensors back to [E, T] before creating a single experience buffer
buffer.advantages = advantages.transpose((1, 0)) # [T, E] -> [E, T]
buffer.returns = returns.transpose((1, 0)) # [T, E] -> [E, T]
buffer.returns = buffer.returns[:, :, np.newaxis] # [E, T] -> [E, T, 1]
buffer.advantages = [torch.tensor(buffer.advantages).reshape(-1)]
buffer.returns = [torch.tensor(buffer.returns).reshape(-1)]
return buffer
def _prepare_train_buffer(self, rollouts, macro_batch_size, timing):
trajectories = [AttrDict(r['t']) for r in rollouts]
with timing.add_time('buffers'):
buffer = AttrDict()
# by the end of this loop the buffer is a dictionary containing lists of numpy arrays
for i, t in enumerate(trajectories):
for key, x in t.items():
if key not in buffer:
buffer[key] = []
buffer[key].append(x)
# convert lists of dict observations to a single dictionary of lists
for key, x in buffer.items():
if isinstance(x[0], (dict, OrderedDict)):
buffer[key] = list_of_dicts_to_dict_of_lists(x)
if not self.cfg.with_vtrace:
with timing.add_time('calc_gae'):
buffer = self._calculate_gae(buffer)
with timing.add_time('batching'):
# concatenate rollouts from different workers into a single batch efficiently
# that is, if we already have memory for the buffers allocated, we can just copy the data into
# existing cached tensors instead of creating new ones. This is a performance optimization.
use_pinned_memory = self.cfg.device == 'gpu'
buffer = self.tensor_batcher.cat(buffer, macro_batch_size, use_pinned_memory, timing)
with timing.add_time('buff_ready'):
self.shared_buffers.free_trajectory_buffers([r.traj_buffer_idx for r in rollouts])
with timing.add_time('tensors_gpu_float'):
device_buffer = self._copy_train_data_to_device(buffer)
with timing.add_time('squeeze'):
# will squeeze actions only in simple categorical case
tensors_to_squeeze = [
'actions', 'log_prob_actions', 'policy_version', 'policy_id', 'values',
'rewards', 'dones', 'rewards_cpu', 'dones_cpu',
]
for tensor_name in tensors_to_squeeze:
device_buffer[tensor_name].squeeze_()
# we no longer need the cached buffer, and can put it back into the pool
self.tensor_batch_pool.put(buffer)
return device_buffer
def _macro_batch_size(self, batch_size):
return self.cfg.num_batches_per_iteration * batch_size
def _process_macro_batch(self, rollouts, batch_size, timing):
macro_batch_size = self._macro_batch_size(batch_size)
assert macro_batch_size % self.cfg.rollout == 0
assert self.cfg.rollout % self.cfg.recurrence == 0
assert macro_batch_size % self.cfg.recurrence == 0
samples = env_steps = 0
for rollout in rollouts:
samples += rollout['length']
env_steps += rollout['env_steps']
with timing.add_time('prepare'):
buffer = self._prepare_train_buffer(rollouts, macro_batch_size, timing)
self.experience_buffer_queue.put((buffer, batch_size, samples, env_steps))
if not self.cfg.benchmark and self.cfg.train_in_background_thread:
# in PyTorch 1.4.0 there is an intense memory spike when the very first batch is being processed
# we wait here until this is over so we can continue queueing more batches onto a GPU without having
# a risk to run out of GPU memory
while self.num_batches_processed < 1:
log.debug('Waiting for the first batch to be processed')
time.sleep(0.5)
def _process_rollouts(self, rollouts, timing):
# batch_size can potentially change through PBT, so we should keep it the same and pass it around
# using function arguments, instead of using global self.cfg
batch_size = self.cfg.batch_size
rollouts_in_macro_batch = self._macro_batch_size(batch_size) // self.cfg.rollout
if len(rollouts) < rollouts_in_macro_batch:
return rollouts
to_discard = 0
to_process = []
policy_version = self.train_step
for r in rollouts:
mask = r.t['policy_id'] == self.policy_id
if np.any(mask):
rollout_newest_version = r.t['policy_version'][mask].max().item()
else:
log.error(
'Learner %d got a rollout without any transitions produced by policy %d. This must be a bug.',
self.policy_id, self.policy_id,
)
log.error('Rollout policy ids: %r', r.t['policy_id'])
rollout_newest_version = policy_version - self.cfg.max_policy_lag
if policy_version - rollout_newest_version >= self.cfg.max_policy_lag:
# the entire rollout is too old, discard it!
to_discard += 1
self.shared_buffers.free_trajectory_buffers([r.traj_buffer_idx])
else:
# There is some experience in the rollout that we can learn from.
# Old experience (older than max policy lag), experience from other policies (in case of policy
# change on episode boundary), and experience from inactive agents (policy id = -1) will be masked
# out during loss calculations.
to_process.append(r)
if to_discard > 0:
log.warning(
'Discarding %d old rollouts, cut by policy lag threshold %d (learner %d)',
to_discard, self.cfg.max_policy_lag, self.policy_id,
)
rollouts = to_process
self.num_discarded_rollouts += to_discard
if len(rollouts) >= rollouts_in_macro_batch:
# process newest rollouts
rollouts_to_process = rollouts[:rollouts_in_macro_batch]
rollouts = rollouts[rollouts_in_macro_batch:]
self._process_macro_batch(rollouts_to_process, batch_size, timing)
# log.info('Unprocessed rollouts: %d (%d samples)', len(rollouts), len(rollouts) * self.cfg.rollout)
return rollouts
def _get_minibatches(self, batch_size, experience_size):
"""Generating minibatches for training."""
assert self.cfg.rollout % self.cfg.recurrence == 0
assert experience_size % batch_size == 0, f'experience size: {experience_size}, batch size: {batch_size}'
if self.cfg.num_batches_per_iteration == 1:
return [None] # single minibatch is actually the entire buffer, we don't need indices
# indices that will start the mini-trajectories from the same episode (for bptt)
indices = np.arange(0, experience_size, self.cfg.recurrence)
indices = np.random.permutation(indices)
# complete indices of mini trajectories, e.g. with recurrence==4: [4, 16] -> [4, 5, 6, 7, 16, 17, 18, 19]
indices = [np.arange(i, i + self.cfg.recurrence) for i in indices]
indices = np.concatenate(indices)
assert len(indices) == experience_size
num_minibatches = experience_size // batch_size
minibatches = np.split(indices, num_minibatches)
return minibatches
@staticmethod
def _get_minibatch(buffer, indices):
if indices is None:
# handle the case of a single batch, where the entire buffer is a minibatch
return buffer
mb = AttrDict()
for item, x in buffer.items():
if isinstance(x, (dict, OrderedDict)):
mb[item] = AttrDict()
for key, x_elem in x.items():
mb[item][key] = x_elem[indices]
else:
mb[item] = x[indices]
return mb
def _should_save_summaries(self):
summaries_every_seconds = self.summary_rate_decay_seconds.at(self.train_step)
if time.time() - self.last_summary_time < summaries_every_seconds:
return False
return True
def _after_optimizer_step(self):
"""A hook to be called after each optimizer step."""
self.train_step += 1
self._maybe_save()
def _maybe_save(self):
if time.time() - self.last_saved_time >= self.cfg.save_every_sec or self.should_save_model:
self._save()
self.model_saved_event.set()
self.should_save_model = False
self.last_saved_time = time.time()
@staticmethod
def checkpoint_dir(cfg, policy_id):
checkpoint_dir = join(experiment_dir(cfg=cfg), f'checkpoint_p{policy_id}')
return ensure_dir_exists(checkpoint_dir)
@staticmethod
def get_checkpoints(checkpoints_dir):
checkpoints = glob.glob(join(checkpoints_dir, 'checkpoint_*'))
return sorted(checkpoints)
def _get_checkpoint_dict(self):
checkpoint = {
'train_step': self.train_step,
'env_steps': self.env_steps,
'model': self.actor_critic.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
if self.aux_loss_module is not None:
checkpoint['aux_loss_module'] = self.aux_loss_module.state_dict()
return checkpoint
def _save(self):
checkpoint = self._get_checkpoint_dict()
assert checkpoint is not None
checkpoint_dir = self.checkpoint_dir(self.cfg, self.policy_id)
tmp_filepath = join(checkpoint_dir, '.temp_checkpoint')
checkpoint_name = f'checkpoint_{self.train_step:09d}_{self.env_steps}.pth'
filepath = join(checkpoint_dir, checkpoint_name)
log.info('Saving %s...', tmp_filepath)
torch.save(checkpoint, tmp_filepath)
log.info('Renaming %s to %s', tmp_filepath, filepath)
os.rename(tmp_filepath, filepath)
while len(self.get_checkpoints(checkpoint_dir)) > self.cfg.keep_checkpoints:
oldest_checkpoint = self.get_checkpoints(checkpoint_dir)[0]
if os.path.isfile(oldest_checkpoint):
log.debug('Removing %s', oldest_checkpoint)
os.remove(oldest_checkpoint)
if self.cfg.save_milestones_sec > 0:
# milestones enabled
if time.time() - self.last_milestone_time >= self.cfg.save_milestones_sec:
milestones_dir = ensure_dir_exists(join(checkpoint_dir, 'milestones'))
milestone_path = join(milestones_dir, f'{checkpoint_name}.milestone')
log.debug('Saving a milestone %s', milestone_path)
shutil.copy(filepath, milestone_path)
self.last_milestone_time = time.time()
@staticmethod
def _policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high, valids):
clipped_ratio = torch.clamp(ratio, clip_ratio_low, clip_ratio_high)
loss_unclipped = ratio * adv
loss_clipped = clipped_ratio * adv
loss = torch.min(loss_unclipped, loss_clipped)
loss = torch.masked_select(loss, valids)
loss = -loss.mean()
return loss
def _value_loss(self, new_values, old_values, target, clip_value, valids):
value_clipped = old_values + torch.clamp(new_values - old_values, -clip_value, clip_value)
value_original_loss = (new_values - target).pow(2)
value_clipped_loss = (value_clipped - target).pow(2)
value_loss = torch.max(value_original_loss, value_clipped_loss)
value_loss = torch.masked_select(value_loss, valids)
value_loss = value_loss.mean()
value_loss *= self.cfg.value_loss_coeff
return value_loss
def entropy_exploration_loss(self, action_distribution, valids):
entropy = action_distribution.entropy()
entropy = torch.masked_select(entropy, valids)
entropy_loss = -self.cfg.exploration_loss_coeff * entropy.mean()
return entropy_loss
def symmetric_kl_exploration_loss(self, action_distribution, valids):
kl_prior = action_distribution.symmetric_kl_with_uniform_prior()
kl_prior = torch.masked_select(kl_prior, valids).mean()
if not torch.isfinite(kl_prior):
kl_prior = torch.zeros(kl_prior.shape)
kl_prior = torch.clamp(kl_prior, max=30)
kl_prior_loss = self.cfg.exploration_loss_coeff * kl_prior
return kl_prior_loss
def _prepare_observations(self, obs_tensors, gpu_buffer_obs):
for d, gpu_d, k, v, _ in iter_dicts_recursively(obs_tensors, gpu_buffer_obs):
device, dtype = self.actor_critic.device_and_type_for_input_tensor(k)
tensor = v.detach().to(device, copy=True).type(dtype)
gpu_d[k] = tensor
def _copy_train_data_to_device(self, buffer):
device_buffer = copy_dict_structure(buffer)
for key, item in buffer.items():
if key == 'obs':
self._prepare_observations(item, device_buffer['obs'])
else:
device_tensor = item.detach().to(self.device, copy=True, non_blocking=True)
device_buffer[key] = device_tensor.float()
device_buffer['dones_cpu'] = buffer.dones.to('cpu', copy=True, non_blocking=True).float()
device_buffer['rewards_cpu'] = buffer.rewards.to('cpu', copy=True, non_blocking=True).float()
return device_buffer
def _train(self, gpu_buffer, batch_size, experience_size, timing):
with torch.no_grad():
policy_version_before_train = self.train_step
early_stopping_tolerance = 1e-6
early_stop = False
prev_epoch_actor_loss = 1e9
epoch_actor_losses = []
# V-trace parameters
# noinspection PyArgumentList
rho_hat = torch.Tensor([self.cfg.vtrace_rho])
# noinspection PyArgumentList
c_hat = torch.Tensor([self.cfg.vtrace_c])
clip_ratio_high = 1.0 + self.cfg.ppo_clip_ratio # e.g. 1.1
# this still works with e.g. clip_ratio = 2, while PPO's 1-r would give negative ratio
clip_ratio_low = 1.0 / clip_ratio_high
clip_value = self.cfg.ppo_clip_value
gamma = self.cfg.gamma
recurrence = self.cfg.recurrence
if self.cfg.with_vtrace:
assert recurrence == self.cfg.rollout and recurrence > 1, \
'V-trace requires to recurrence and rollout to be equal'
num_sgd_steps = 0
stats_and_summaries = None
if not self.with_training:
return stats_and_summaries
for epoch in range(self.cfg.ppo_epochs):
with timing.add_time('epoch_init'):
if early_stop or self.terminate:
break
summary_this_epoch = force_summaries = False
minibatches = self._get_minibatches(batch_size, experience_size)
for batch_num in range(len(minibatches)):
with timing.add_time('minibatch_init'):
indices = minibatches[batch_num]
# current minibatch consisting of short trajectory segments with length == recurrence
mb = self._get_minibatch(gpu_buffer, indices)
# calculate policy head outside of recurrent loop
with timing.add_time('forward_head'):
head_outputs = self.actor_critic.forward_head(mb.obs)
# initial rnn states
with timing.add_time('bptt_initial'):
if self.cfg.use_rnn:
head_output_seq, rnn_states, inverted_select_inds = build_rnn_inputs(
head_outputs, mb.dones_cpu, mb.rnn_states, recurrence,
)
else:
rnn_states = mb.rnn_states[::recurrence]
# calculate RNN outputs for each timestep in a loop
with timing.add_time('bptt'):
if self.cfg.use_rnn:
with timing.add_time('bptt_forward_core'):
core_output_seq, _ = self.actor_critic.forward_core(head_output_seq, rnn_states)
core_outputs = build_core_out_from_seq(core_output_seq, inverted_select_inds)
else:
core_outputs, _ = self.actor_critic.forward_core(head_outputs, rnn_states)
num_trajectories = head_outputs.size(0) // recurrence
with timing.add_time('tail'):
assert core_outputs.shape[0] == head_outputs.shape[0]
# calculate policy tail outside of recurrent loop
result = self.actor_critic.forward_tail(core_outputs, with_action_distribution=True)
action_distribution = result.action_distribution
log_prob_actions = action_distribution.log_prob(mb.actions)
ratio = torch.exp(log_prob_actions - mb.log_prob_actions) # pi / pi_old
# super large/small values can cause numerical problems and are probably noise anyway
ratio = torch.clamp(ratio, 0.05, 20.0)
values = result.values.squeeze()
with torch.no_grad(): # these computations are not the part of the computation graph
# ignore experience from other agents (i.e. on episode boundary) and from inactive agents
valids = mb.policy_id == self.policy_id
# ignore experience that was older than the threshold even before training started
valids = valids & (policy_version_before_train - mb.policy_version < self.cfg.max_policy_lag)
if self.cfg.with_vtrace:
ratios_cpu = ratio.cpu()
values_cpu = values.cpu()
rewards_cpu = mb.rewards_cpu
dones_cpu = mb.dones_cpu
vtrace_rho = torch.min(rho_hat, ratios_cpu)
vtrace_c = torch.min(c_hat, ratios_cpu)
vs = torch.zeros((num_trajectories * recurrence))
adv = torch.zeros((num_trajectories * recurrence))
next_values = (values_cpu[recurrence - 1::recurrence] - rewards_cpu[recurrence - 1::recurrence]) / gamma
next_vs = next_values
with timing.add_time('vtrace'):
for i in reversed(range(self.cfg.recurrence)):
rewards = rewards_cpu[i::recurrence]
dones = dones_cpu[i::recurrence]
not_done = 1.0 - dones
not_done_times_gamma = not_done * gamma
curr_values = values_cpu[i::recurrence]
curr_vtrace_rho = vtrace_rho[i::recurrence]
curr_vtrace_c = vtrace_c[i::recurrence]
delta_s = curr_vtrace_rho * (rewards + not_done_times_gamma * next_values - curr_values)
adv[i::recurrence] = curr_vtrace_rho * (rewards + not_done_times_gamma * next_vs - curr_values)
next_vs = curr_values + delta_s + not_done_times_gamma * curr_vtrace_c * (next_vs - next_values)
vs[i::recurrence] = next_vs
next_values = curr_values
targets = vs
else:
# using regular GAE
adv = mb.advantages
targets = mb.returns
adv_mean = adv.mean()
adv_std = adv.std()
adv = (adv - adv_mean) / max(1e-3, adv_std) # normalize advantage
adv = adv.to(self.device)
with timing.add_time('losses'):
policy_loss = self._policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high, valids)
exploration_loss = self.exploration_loss_func(action_distribution, valids)
actor_loss = policy_loss + exploration_loss
epoch_actor_losses.append(actor_loss.item())
targets = targets.to(self.device)
old_values = mb.values
value_loss = self._value_loss(values, old_values, targets, clip_value, valids)
critic_loss = value_loss
loss = actor_loss + critic_loss
if self.aux_loss_module is not None:
with timing.add_time('aux_loss'):
aux_loss = self.aux_loss_module(
mb.actions.view(num_trajectories, recurrence, -1),
(1.0 - mb.dones).view(num_trajectories, recurrence, 1),
valids.view(num_trajectories, recurrence, -1),
head_outputs.view(num_trajectories, recurrence, -1),
core_outputs.view(num_trajectories, recurrence, -1),
)
loss = loss + aux_loss
high_loss = 30.0
if abs(to_scalar(policy_loss)) > high_loss or abs(to_scalar(value_loss)) > high_loss or abs(to_scalar(exploration_loss)) > high_loss:
log.warning(
'High loss value: %.4f %.4f %.4f %.4f (recommended to adjust the --reward_scale parameter)',
to_scalar(loss), to_scalar(policy_loss), to_scalar(value_loss), to_scalar(exploration_loss),
)
force_summaries = True
# update the weights
with timing.add_time('update'):
# following advice from https://youtu.be/9mS1fIYj1So set grad to None instead of optimizer.zero_grad()
for p in self.actor_critic.parameters():
p.grad = None
if self.aux_loss_module is not None:
for p in self.aux_loss_module.parameters():
p.grad = None
loss.backward()
if self.cfg.max_grad_norm > 0.0:
with timing.add_time('clip'):
torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.cfg.max_grad_norm)
if self.aux_loss_module is not None:
torch.nn.utils.clip_grad_norm_(self.aux_loss_module.parameters(), self.cfg.max_grad_norm)
curr_policy_version = self.train_step # policy version before the weight update
with self.policy_lock:
self.optimizer.step()
num_sgd_steps += 1
with torch.no_grad():
with timing.add_time('after_optimizer'):
self._after_optimizer_step()
# collect and report summaries
with_summaries = self._should_save_summaries() or force_summaries
if with_summaries and not summary_this_epoch:
stats_and_summaries = self._record_summaries(AttrDict(locals()))
summary_this_epoch = True
force_summaries = False
# end of an epoch
# this will force policy update on the inference worker (policy worker)
self.policy_versions[self.policy_id] = self.train_step
new_epoch_actor_loss = np.mean(epoch_actor_losses)
loss_delta_abs = abs(prev_epoch_actor_loss - new_epoch_actor_loss)
if loss_delta_abs < early_stopping_tolerance:
early_stop = True
log.debug(
'Early stopping after %d epochs (%d sgd steps), loss delta %.7f',
epoch + 1, num_sgd_steps, loss_delta_abs,
)
break
prev_epoch_actor_loss = new_epoch_actor_loss
epoch_actor_losses = []
return stats_and_summaries
def _record_summaries(self, train_loop_vars):
var = train_loop_vars
self.last_summary_time = time.time()
stats = AttrDict()
stats.valids_fraction = var.valids.float().mean()
stats.same_policy_fraction = (var.mb.policy_id == self.policy_id).float().mean()
grad_norm = sum(
p.grad.data.norm(2).item() ** 2
for p in self.actor_critic.parameters()
if p.grad is not None
) ** 0.5
stats.grad_norm = grad_norm
stats.loss = var.loss
stats.value = var.result.values.mean()
stats.entropy = var.action_distribution.entropy().mean()
stats.policy_loss = var.policy_loss
stats.value_loss = var.value_loss
stats.exploration_loss = var.exploration_loss
if self.aux_loss_module is not None:
stats.aux_loss = var.aux_loss
stats.adv_min = var.adv.min()
stats.adv_max = var.adv.max()
stats.adv_std = var.adv_std
stats.max_abs_logprob = torch.abs(var.mb.action_logits).max()
if hasattr(var.action_distribution, 'summaries'):
stats.update(var.action_distribution.summaries())
if var.epoch == self.cfg.ppo_epochs - 1 and var.batch_num == len(var.minibatches) - 1:
# we collect these stats only for the last PPO batch, or every time if we're only doing one batch, IMPALA-style
ratio_mean = torch.abs(1.0 - var.ratio).mean().detach()
ratio_min = var.ratio.min().detach()
ratio_max = var.ratio.max().detach()
# log.debug('Learner %d ratio mean min max %.4f %.4f %.4f', self.policy_id, ratio_mean.cpu().item(), ratio_min.cpu().item(), ratio_max.cpu().item())
value_delta = torch.abs(var.values - var.old_values)
value_delta_avg, value_delta_max = value_delta.mean(), value_delta.max()
# calculate KL-divergence with the behaviour policy action distribution
old_action_distribution = get_action_distribution(
self.actor_critic.action_space, var.mb.action_logits,
)
kl_old = var.action_distribution.kl_divergence(old_action_distribution)
kl_old_mean = kl_old.mean()
stats.kl_divergence = kl_old_mean
stats.value_delta = value_delta_avg
stats.value_delta_max = value_delta_max
stats.fraction_clipped = ((var.ratio < var.clip_ratio_low).float() + (var.ratio > var.clip_ratio_high).float()).mean()
stats.ratio_mean = ratio_mean
stats.ratio_min = ratio_min
stats.ratio_max = ratio_max
stats.num_sgd_steps = var.num_sgd_steps
# this caused numerical issues on some versions of PyTorch with second moment reaching infinity
adam_max_second_moment = 0.0
for key, tensor_state in self.optimizer.state.items():
adam_max_second_moment = max(tensor_state['exp_avg_sq'].max().item(), adam_max_second_moment)
stats.adam_max_second_moment = adam_max_second_moment
version_diff = (var.curr_policy_version - var.mb.policy_version)[var.mb.policy_id == self.policy_id]
stats.version_diff_avg = version_diff.mean()
stats.version_diff_min = version_diff.min()
stats.version_diff_max = version_diff.max()
for key, value in stats.items():
stats[key] = to_scalar(value)
return stats
def _update_pbt(self):
"""To be called from the training loop, same thread that updates the model!"""
with self.pbt_mutex:
if self.load_policy_id is not None:
assert self.cfg.with_pbt
log.debug('Learner %d loads policy from %d', self.policy_id, self.load_policy_id)
self.load_from_checkpoint(self.load_policy_id)
self.load_policy_id = None
if self.new_cfg is not None:
for key, value in self.new_cfg.items():
if self.cfg[key] != value:
log.debug('Learner %d replacing cfg parameter %r with new value %r', self.policy_id, key, value)
self.cfg[key] = value
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.cfg.learning_rate
param_group['betas'] = (self.cfg.adam_beta1, self.cfg.adam_beta2)
log.debug('Updated optimizer lr to value %.7f, betas: %r', param_group['lr'], param_group['betas'])
self.new_cfg = None
@staticmethod
def load_checkpoint(checkpoints, device):
if len(checkpoints) <= 0:
log.warning('No checkpoints found')
return None
else:
latest_checkpoint = checkpoints[-1]
# extra safety mechanism to recover from spurious filesystem errors
num_attempts = 3
for attempt in range(num_attempts):
try:
log.warning('Loading state from checkpoint %s...', latest_checkpoint)
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
return checkpoint_dict
except Exception:
log.exception(f'Could not load from checkpoint, attempt {attempt}')
def _load_state(self, checkpoint_dict, load_progress=True):
if load_progress:
self.train_step = checkpoint_dict['train_step']
self.env_steps = checkpoint_dict['env_steps']
self.actor_critic.load_state_dict(checkpoint_dict['model'])
self.optimizer.load_state_dict(checkpoint_dict['optimizer'])
if self.aux_loss_module is not None:
self.aux_loss_module.load_state_dict(checkpoint_dict['aux_loss_module'])
log.info('Loaded experiment state at training iteration %d, env step %d', self.train_step, self.env_steps)
def init_model(self, timing):
self.actor_critic = create_actor_critic(self.cfg, self.obs_space, self.action_space, timing)
self.actor_critic.model_to_device(self.device)
self.actor_critic.share_memory()
if self.cfg.use_cpc:
self.aux_loss_module = CPCA(self.cfg, self.action_space)
if self.aux_loss_module is not None:
self.aux_loss_module.to(device=self.device)
def load_from_checkpoint(self, policy_id):
checkpoints = self.get_checkpoints(self.checkpoint_dir(self.cfg, policy_id))
checkpoint_dict = self.load_checkpoint(checkpoints, self.device)
if checkpoint_dict is None:
log.debug('Did not load from checkpoint, starting from scratch!')
else:
log.debug('Loading model from checkpoint')
# if we're replacing our policy with another policy (under PBT), let's not reload the env_steps
load_progress = policy_id == self.policy_id
self._load_state(checkpoint_dict, load_progress=load_progress)
def initialize(self, timing):
with timing.timeit('init'):
# initialize the Torch modules
if self.cfg.seed is None:
log.info('Starting seed is not provided')
else:
log.info('Setting fixed seed %d', self.cfg.seed)
torch.manual_seed(self.cfg.seed)
np.random.seed(self.cfg.seed)
# this does not help with a single experiment
# but seems to do better when we're running more than one experiment in parallel
torch.set_num_threads(1)
if self.cfg.device == 'gpu':
torch.backends.cudnn.benchmark = True
# we should already see only one CUDA device, because of env vars
assert torch.cuda.device_count() == 1
self.device = torch.device('cuda', index=0)
else:
self.device = torch.device('cpu')
self.init_model(timing)
params = list(self.actor_critic.parameters())
if self.aux_loss_module is not None:
params += list(self.aux_loss_module.parameters())
self.optimizer = torch.optim.Adam(
params,
self.cfg.learning_rate,
betas=(self.cfg.adam_beta1, self.cfg.adam_beta2),
eps=self.cfg.adam_eps,
)
self.load_from_checkpoint(self.policy_id)
self._broadcast_model_weights() # sync the very first version of the weights
self.train_thread_initialized.set()
def _process_training_data(self, data, timing, wait_stats=None):
self.is_training = True
buffer, batch_size, samples, env_steps = data
assert samples == batch_size * self.cfg.num_batches_per_iteration
self.env_steps += env_steps
experience_size = buffer.rewards.shape[0]
stats = dict(learner_env_steps=self.env_steps, policy_id=self.policy_id)
with timing.add_time('train'):
discarding_rate = self._discarding_rate()
self._update_pbt()
train_stats = self._train(buffer, batch_size, experience_size, timing)
if train_stats is not None:
stats['train'] = train_stats
if wait_stats is not None:
wait_avg, wait_min, wait_max = wait_stats
stats['train']['wait_avg'] = wait_avg
stats['train']['wait_min'] = wait_min
stats['train']['wait_max'] = wait_max
stats['train']['discarded_rollouts'] = self.num_discarded_rollouts
stats['train']['discarding_rate'] = discarding_rate
stats['stats'] = memory_stats('learner', self.device)
self.is_training = False
try:
safe_put(self.report_queue, stats, queue_name='report')
except Full:
log.warning('Could not report training stats, the report queue is full!')
def _train_loop(self):
timing = Timing()
self.initialize(timing)
wait_times = deque([], maxlen=self.cfg.num_workers)
last_cache_cleanup = time.time()
while not self.terminate:
with timing.timeit('train_wait'):
data = safe_get(self.experience_buffer_queue)
if self.terminate:
break
wait_stats = None
wait_times.append(timing.train_wait)
if len(wait_times) >= wait_times.maxlen:
wait_times_arr = np.asarray(wait_times)
wait_avg = np.mean(wait_times_arr)
wait_min, wait_max = wait_times_arr.min(), wait_times_arr.max()
# log.debug(
# 'Training thread had to wait %.5f s for the new experience buffer (avg %.5f)',
# timing.train_wait, wait_avg,
# )
wait_stats = (wait_avg, wait_min, wait_max)
self._process_training_data(data, timing, wait_stats)
self.num_batches_processed += 1
if time.time() - last_cache_cleanup > 300.0 or (not self.cfg.benchmark and self.num_batches_processed < 50):
if self.cfg.device == 'gpu':
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
last_cache_cleanup = time.time()
time.sleep(0.3)
log.info('Train loop timing: %s', timing)
del self.actor_critic
del self.device
def _experience_collection_rate_stats(self):
now = time.time()
if now - self.discarded_experience_timer > 1.0:
self.discarded_experience_timer = now
self.discarded_experience_over_time.append((now, self.num_discarded_rollouts))
def _discarding_rate(self):
if len(self.discarded_experience_over_time) <= 1:
return 0
first, last = self.discarded_experience_over_time[0], self.discarded_experience_over_time[-1]
delta_rollouts = last[1] - first[1]
delta_time = last[0] - first[0]
discarding_rate = delta_rollouts / (delta_time + EPS)
return discarding_rate
def _extract_rollouts(self, data):
rollouts = []
for rollout_data in data:
tensors = self.rollout_tensors.index(rollout_data['traj_buffer_idx'])
rollout_data['t'] = tensors
rollouts.append(AttrDict(rollout_data))
return rollouts
def _process_pbt_task(self, pbt_task):
task_type, data = pbt_task
with self.pbt_mutex:
if task_type == PbtTask.SAVE_MODEL:
policy_id = data
assert policy_id == self.policy_id
self.should_save_model = True
elif task_type == PbtTask.LOAD_MODEL:
policy_id, new_policy_id = data
assert policy_id == self.policy_id
assert new_policy_id is not None
self.load_policy_id = new_policy_id
elif task_type == PbtTask.UPDATE_CFG:
policy_id, new_cfg = data
assert policy_id == self.policy_id
self.new_cfg = new_cfg
def _accumulated_too_much_experience(self, rollouts):
max_minibatches_to_accumulate = self.cfg.num_minibatches_to_accumulate
if max_minibatches_to_accumulate == -1:
# default value
max_minibatches_to_accumulate = 2 * self.cfg.num_batches_per_iteration
# allow the max batches to accumulate, plus the minibatches we're currently training on
max_minibatches_on_learner = max_minibatches_to_accumulate + self.cfg.num_batches_per_iteration
minibatches_currently_training = int(self.is_training) * self.cfg.num_batches_per_iteration
rollouts_per_minibatch = self.cfg.batch_size / self.cfg.rollout
# count contribution from unprocessed rollouts
minibatches_currently_accumulated = len(rollouts) / rollouts_per_minibatch
# count minibatches ready for training
minibatches_currently_accumulated += self.experience_buffer_queue.qsize() * self.cfg.num_batches_per_iteration
total_minibatches_on_learner = minibatches_currently_training + minibatches_currently_accumulated
return total_minibatches_on_learner >= max_minibatches_on_learner
def _run(self):
self.deferred_initialization()
log.info(f'LEARNER\tpid {os.getpid()}\tparent {os.getppid()}')
# workers should ignore Ctrl+C because the termination is handled in the event loop by a special msg
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
psutil.Process().nice(self.cfg.default_niceness)
except psutil.AccessDenied:
log.error('Low niceness requires sudo!')
if self.cfg.device == 'gpu':
cuda_envvars_for_policy(self.policy_id, 'learner')
torch.multiprocessing.set_sharing_strategy('file_system')
torch.set_num_threads(self.cfg.learner_main_loop_num_cores)
timing = Timing()
rollouts = []
if self.train_in_background:
self.training_thread.start()
else:
self.initialize(timing)
log.error(
'train_in_background set to False on learner %d! This is slow, use only for testing!', self.policy_id,
)
while not self.terminate:
while True:
try:
tasks = self.task_queue.get_many(timeout=0.005)
for task_type, data in tasks:
if task_type == TaskType.TRAIN:
with timing.add_time('extract'):
rollouts.extend(self._extract_rollouts(data))
# log.debug('Learner %d has %d rollouts', self.policy_id, len(rollouts))
elif task_type == TaskType.INIT:
self._init()
elif task_type == TaskType.TERMINATE:
time.sleep(0.3)
log.info('GPU learner timing: %s', timing)
self._terminate()
break
elif task_type == TaskType.PBT:
self._process_pbt_task(data)
except Empty:
break
if self._accumulated_too_much_experience(rollouts):
# if we accumulated too much experience, signal the policy workers to stop experience collection
if not self.stop_experience_collection[self.policy_id]:
self.stop_experience_collection_num_msgs += 1
# TODO: add a logger function for this
if self.stop_experience_collection_num_msgs >= 50:
log.info(
'Learner %d accumulated too much experience, stop experience collection! '
'Learner is likely a bottleneck in your experiment (%d times)',
self.policy_id, self.stop_experience_collection_num_msgs,
)
self.stop_experience_collection_num_msgs = 0
self.stop_experience_collection[self.policy_id] = True
elif self.stop_experience_collection[self.policy_id]:
# otherwise, resume the experience collection if it was stopped
self.stop_experience_collection[self.policy_id] = False
with self.resume_experience_collection_cv:
self.resume_experience_collection_num_msgs += 1
if self.resume_experience_collection_num_msgs >= 50:
log.debug('Learner %d is resuming experience collection!', self.policy_id)
self.resume_experience_collection_num_msgs = 0
self.resume_experience_collection_cv.notify_all()
with torch.no_grad():
rollouts = self._process_rollouts(rollouts, timing)
if not self.train_in_background:
while not self.experience_buffer_queue.empty():
training_data = self.experience_buffer_queue.get()
self._process_training_data(training_data, timing)
self._experience_collection_rate_stats()
if self.train_in_background:
self.experience_buffer_queue.put(None)
self.training_thread.join()
def init(self):
self.task_queue.put((TaskType.INIT, None))
self.initialized_event.wait()
def save_model(self, timeout=None):
self.model_saved_event.clear()
save_task = (PbtTask.SAVE_MODEL, self.policy_id)
self.task_queue.put((TaskType.PBT, save_task))
log.debug('Wait while learner %d saves the model...', self.policy_id)
if self.model_saved_event.wait(timeout=timeout):
log.debug('Learner %d saved the model!', self.policy_id)
else:
log.warning('Model saving request timed out!')
self.model_saved_event.clear()
def close(self):
self.task_queue.put((TaskType.TERMINATE, None))
self.shared_buffers._stop_experience_collection[self.policy_id] = False
def join(self):
join_or_kill(self.process)
|
base_test.py | # -*- coding: utf-8 -*-
import copy
import datetime
import json
import threading
import elasticsearch
import mock
import pytest
from elasticsearch.exceptions import ConnectionError
from elasticsearch.exceptions import ElasticsearchException
from elastalert.enhancements import BaseEnhancement
from elastalert.enhancements import DropMatchException
from elastalert.kibana import dashboard_temp
from elastalert.util import dt_to_ts
from elastalert.util import dt_to_unix
from elastalert.util import dt_to_unixms
from elastalert.util import EAException
from elastalert.util import ts_now
from elastalert.util import ts_to_dt
from elastalert.util import unix_to_dt
START_TIMESTAMP = '2014-09-26T12:34:45Z'
END_TIMESTAMP = '2014-09-27T12:34:45Z'
START = ts_to_dt(START_TIMESTAMP)
END = ts_to_dt(END_TIMESTAMP)
def _set_hits(ea_inst, hits):
res = {'hits': {'total': len(hits), 'hits': hits}}
ea_inst.client_es.return_value = res
def generate_hits(timestamps, **kwargs):
hits = []
for i, ts in enumerate(timestamps):
data = {'_id': 'id{}'.format(i),
'_source': {'@timestamp': ts},
'_type': 'logs',
'_index': 'idx'}
for key, item in kwargs.items():
data['_source'][key] = item
# emulate process_hits(), add metadata to _source
for field in ['_id', '_type', '_index']:
data['_source'][field] = data[field]
hits.append(data)
return {'hits': {'total': len(hits), 'hits': hits}}
def assert_alerts(ea_inst, calls):
""" Takes a list of lists of timestamps. Asserts that an alert was called for each list, containing those timestamps. """
assert ea_inst.rules[0]['alert'][0].alert.call_count == len(calls)
for call_num, call_args in enumerate(ea_inst.rules[0]['alert'][0].alert.call_args_list):
assert not any([match['@timestamp'] not in calls[call_num] for match in call_args[0][0]])
assert len(call_args[0][0]) == len(calls[call_num])
def test_starttime(ea):
invalid = ['2014-13-13',
'2014-11-24T30:00:00',
'Not A Timestamp']
for ts in invalid:
with pytest.raises((TypeError, ValueError)):
ts_to_dt(ts)
def test_init_rule(ea):
# Simulate state of a rule just loaded from a file
ea.rules[0]['minimum_starttime'] = datetime.datetime.now()
new_rule = copy.copy(ea.rules[0])
list(map(new_rule.pop, ['agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime']))
# Properties are copied from ea.rules[0]
ea.rules[0]['starttime'] = '2014-01-02T00:11:22'
ea.rules[0]['processed_hits'] = ['abcdefg']
new_rule = ea.init_rule(new_rule, False)
for prop in ['starttime', 'agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime']:
assert new_rule[prop] == ea.rules[0][prop]
# Properties are fresh
new_rule = ea.init_rule(new_rule, True)
new_rule.pop('starttime')
assert 'starttime' not in new_rule
assert new_rule['processed_hits'] == {}
def test_query(ea):
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.thread_data.current_es.search.assert_called_with(body={
'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix(ea_sixsix):
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
ea_sixsix.thread_data.current_es.search.assert_called_with(body={
'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_query_with_fields(ea):
ea.rules[0]['_source_enabled'] = False
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.thread_data.current_es.search.assert_called_with(body={
'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}], 'fields': ['@timestamp']}, index='idx', ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix_with_fields(ea_sixsix):
ea_sixsix.rules[0]['_source_enabled'] = False
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
ea_sixsix.thread_data.current_es.search.assert_called_with(body={
'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}], 'stored_fields': ['@timestamp']}, index='idx',
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_query_with_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unix(START)
end_unix = dt_to_unix(END)
ea.thread_data.current_es.search.assert_called_with(
body={'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix_with_unix(ea_sixsix):
ea_sixsix.rules[0]['timestamp_type'] = 'unix'
ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unix
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
start_unix = dt_to_unix(START)
end_unix = dt_to_unix(END)
ea_sixsix.thread_data.current_es.search.assert_called_with(
body={'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_query_with_unixms(ea):
ea.rules[0]['timestamp_type'] = 'unixms'
ea.rules[0]['dt_to_ts'] = dt_to_unixms
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unixms(START)
end_unix = dt_to_unixms(END)
ea.thread_data.current_es.search.assert_called_with(
body={'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix_with_unixms(ea_sixsix):
ea_sixsix.rules[0]['timestamp_type'] = 'unixms'
ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unixms
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
start_unix = dt_to_unixms(START)
end_unix = dt_to_unixms(END)
ea_sixsix.thread_data.current_es.search.assert_called_with(
body={'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_no_hits(ea):
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 0
def test_no_terms_hits(ea):
ea.rules[0]['use_terms_query'] = True
ea.rules[0]['query_key'] = 'QWERTY'
ea.rules[0]['doc_type'] = 'uiop'
ea.thread_data.current_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_terms_data.call_count == 0
def test_some_hits(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
hits_dt = generate_hits([START, END])
ea.thread_data.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def test_some_hits_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.rules[0]['ts_to_dt'] = unix_to_dt
hits = generate_hits([dt_to_unix(START), dt_to_unix(END)])
hits_dt = generate_hits([START, END])
ea.thread_data.current_es.search.return_value = copy.deepcopy(hits)
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def _duplicate_hits_generator(timestamps, **kwargs):
"""Generator repeatedly returns identical hits dictionaries
"""
while True:
yield generate_hits(timestamps, **kwargs)
def test_duplicate_timestamps(ea):
ea.thread_data.current_es.search.side_effect = _duplicate_hits_generator([START_TIMESTAMP] * 3, blah='duplicate')
ea.run_query(ea.rules[0], START, ts_to_dt('2014-01-01T00:00:00Z'))
assert len(ea.rules[0]['type'].add_data.call_args_list[0][0][0]) == 3
assert ea.rules[0]['type'].add_data.call_count == 1
# Run the query again, duplicates will be removed and not added
ea.run_query(ea.rules[0], ts_to_dt('2014-01-01T00:00:00Z'), END)
assert ea.rules[0]['type'].add_data.call_count == 1
def test_match(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['alert'][0].alert.called_with({'@timestamp': END_TIMESTAMP})
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_run_rule_calls_garbage_collect(ea):
start_time = '2014-09-26T00:00:00Z'
end_time = '2014-09-26T12:00:00Z'
ea.buffer_time = datetime.timedelta(hours=1)
ea.run_every = datetime.timedelta(hours=1)
with mock.patch.object(ea.rules[0]['type'], 'garbage_collect') as mock_gc, \
mock.patch.object(ea, 'run_query'):
ea.run_rule(ea.rules[0], ts_to_dt(end_time), ts_to_dt(start_time))
# Running ElastAlert every hour for 12 hours, we should see self.garbage_collect called 12 times.
assert mock_gc.call_count == 12
# The calls should be spaced 1 hour apart
expected_calls = [ts_to_dt(start_time) + datetime.timedelta(hours=i) for i in range(1, 13)]
for e in expected_calls:
mock_gc.assert_any_call(e)
def run_rule_query_exception(ea, mock_es):
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
ea.run_rule(ea.rules[0], END, START)
# Assert neither add_data nor garbage_collect were called
# and that starttime did not change
assert ea.rules[0].get('starttime') == START
assert ea.rules[0]['type'].add_data.call_count == 0
assert ea.rules[0]['type'].garbage_collect.call_count == 0
assert ea.rules[0]['type'].add_count_data.call_count == 0
def test_query_exception(ea):
mock_es = mock.Mock()
mock_es.search.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_query_exception_count_query(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blahblahblahblah'
mock_es = mock.Mock()
mock_es.count.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_match_with_module(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
test_match(ea)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
def test_match_with_module_from_pending(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0].pop('aggregation')
pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'],
'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP}
# First call, return the pending alert, second, no associated aggregated alerts
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}},
{'hits': {'hits': []}}]
ea.send_pending_alerts()
assert mod.process.call_count == 0
# If aggregation is set, enhancement IS called
pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'],
'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP}
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}},
{'hits': {'hits': []}}]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.send_pending_alerts()
assert mod.process.call_count == 1
def test_match_with_module_with_agg(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert mod.process.call_count == 0
def test_match_with_enhancements_first(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
ea.rules[0]['run_enhancements_first'] = True
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 1
# Assert that dropmatchexception behaves properly
mod.process = mock.MagicMock(side_effect=DropMatchException)
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 0
def test_agg_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
alerttime1 = dt_to_ts(ts_to_dt(hits_timestamps[0]) + datetime.timedelta(minutes=10))
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
# Aggregate first two, query over full range
mock_es.return_value = ea.thread_data.current_es
ea.rules[0]['aggregate_by_match_time'] = True
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1},
{'_id': 'CDEF', '_index': 'wb', '_source': call3}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call2}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [hits_timestamps[:2], hits_timestamps[2:]])
call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body']
call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body']
call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body']
call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337
def test_agg_not_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
match_time = ts_to_dt('2014-09-26T12:55:00Z')
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert call3['aggregate_id'] == 'ABCD'
def test_agg_cron(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
alerttime1 = dt_to_ts(ts_to_dt('2014-09-26T12:46:00'))
alerttime2 = dt_to_ts(ts_to_dt('2014-09-26T13:04:00'))
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.croniter.get_next') as mock_ts:
# Aggregate first two, query over full range
mock_ts.side_effect = [dt_to_unix(ts_to_dt('2014-09-26T12:46:00')),
dt_to_unix(ts_to_dt('2014-09-26T13:04:00'))]
ea.rules[0]['aggregation'] = {'schedule': '*/5 * * * *'}
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert call3['alert_time'] == alerttime2
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
def test_agg_no_writeback_connectivity(ea):
""" Tests that if writeback_es throws an exception, the matches will be added to 'agg_matches' and when
run again, that they will be passed again to add_aggregated_alert """
hit1, hit2, hit3 = '2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45'
hits = generate_hits([hit1, hit2, hit3])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': hit1},
{'@timestamp': hit2},
{'@timestamp': hit3}]
ea.writeback_es.index.side_effect = elasticsearch.exceptions.ElasticsearchException('Nope')
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'find_pending_aggregate_alert', return_value=None):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['agg_matches'] == [{'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}]
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.add_aggregated_alert = mock.Mock()
with mock.patch.object(ea, 'run_query'):
ea.run_rule(ea.rules[0], END, START)
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
def test_agg_with_aggregation_key(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:43:45']
match_time = ts_to_dt('2014-09-26T12:45:00Z')
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
mock_es.return_value = ea.thread_data.current_es
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
# Hit1 and Hit3 should be aggregated together, since they have same query_key value
ea.rules[0]['type'].matches[0]['key'] = 'Key Value 1'
ea.rules[0]['type'].matches[1]['key'] = 'Key Value 2'
ea.rules[0]['type'].matches[2]['key'] = 'Key Value 1'
ea.rules[0]['aggregation_key'] = 'key'
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['key'] == 'Key Value 1'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert 'aggregation_key' in call1
assert call1['aggregation_key'] == 'Key Value 1'
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['key'] == 'Key Value 2'
assert not call2['alert_sent']
assert 'aggregate_id' not in call2
assert 'aggregation_key' in call2
assert call2['aggregation_key'] == 'Key Value 2'
assert call2['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call3['match_body']['key'] == 'Key Value 1'
assert not call3['alert_sent']
# Call3 should have it's aggregate_id set to call1's _id
# It should also have the same alert_time as call1
assert call3['aggregate_id'] == 'ABCD'
assert 'aggregation_key' in call3
assert call3['aggregation_key'] == 'Key Value 1'
assert call3['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1},
{'_id': 'CDEF', '_index': 'wb', '_source': call2}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call3}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
mock_es.return_value = ea.thread_data.current_es
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [[hits_timestamps[0], hits_timestamps[2]], [hits_timestamps[1]]])
call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body']
call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body']
call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body']
call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337
def test_silence(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence()
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_compound_query_key(ea):
ea.rules[0]['query_key'] = 'this,that,those'
ea.rules[0]['compound_query_key'] = ['this', 'that', 'those']
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP], this='abc', that='☃', those=4)
ea.thread_data.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
call_args = ea.rules[0]['type'].add_data.call_args_list[0]
assert 'this,that,those' in call_args[0][0][0]
assert call_args[0][0][0]['this,that,those'] == 'abc, ☃, 4'
def test_silence_query_key(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence('anytest.qlo')
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
ea.rules[0]['query_key'] = 'username'
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# If there is a new record with a different value for the query_key, we should get an alert
match = [{'@timestamp': '2014-11-17T00:00:01', 'username': 'dpopes'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert(ea):
hits = ['2014-09-26T12:35:%sZ' % (x) for x in range(60)]
matches = [{'@timestamp': x} for x in hits]
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['realert'] = datetime.timedelta(seconds=50)
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Doesn't alert again
matches = [{'@timestamp': x} for x in hits]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['type'].matches = matches
assert ea.rules[0]['alert'][0].alert.call_count == 1
# mock ts_now() to past the realert time
matches = [{'@timestamp': hits[0]}]
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# mock_ts is converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(minutes=10)))
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert_with_query_key(ea):
ea.rules[0]['query_key'] = 'username'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Do alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': ''}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
# Alert with query_key missing
match = [{'@timestamp': '2014-11-17T00:05:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 3
# Still alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'ghengis_khan'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 4
def test_realert_with_nested_query_key(ea):
ea.rules[0]['query_key'] = 'user.name'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_count(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'doctype'
with mock.patch('elastalert.elastalert.elasticsearch_client'), \
mock.patch.object(ea, 'get_hits_count') as mock_hits:
ea.run_rule(ea.rules[0], END, START)
# Assert that es.count is run against every run_every timeframe between START and END
start = START
query = {
'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}}
while END - start > ea.run_every:
end = start + ea.run_every
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['lte'] = dt_to_ts(end)
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['gt'] = dt_to_ts(start)
mock_hits.assert_any_call(mock.ANY, start, end, mock.ANY)
start = start + ea.run_every
def run_and_assert_segmented_queries(ea, start, end, segment_size):
with mock.patch.object(ea, 'run_query') as mock_run_query:
ea.run_rule(ea.rules[0], end, start)
original_end, original_start = end, start
for call_args in mock_run_query.call_args_list:
end = min(start + segment_size, original_end)
assert call_args[0][1:3] == (start, end)
start += segment_size
# Assert elastalert_status was created for the entire time range
assert ea.writeback_es.index.call_args_list[-1][1]['body']['starttime'] == dt_to_ts(original_start)
if ea.rules[0].get('aggregation_query_element'):
assert ea.writeback_es.index.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(
original_end - (original_end - end))
assert original_end - end < segment_size
else:
assert ea.writeback_es.index.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(original_end)
def test_query_segmenting_reset_num_hits(ea):
# Tests that num_hits gets reset every time run_query is run
def assert_num_hits_reset():
assert ea.thread_data.num_hits == 0
ea.thread_data.num_hits += 10
with mock.patch.object(ea, 'run_query') as mock_run_query:
mock_run_query.side_effect = assert_num_hits_reset()
ea.run_rule(ea.rules[0], END, START)
assert mock_run_query.call_count > 1
def test_query_segmenting(ea):
# buffer_time segments with normal queries
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# run_every segments with count queries
ea.rules[0]['use_count_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# run_every segments with terms queries
ea.rules[0].pop('use_count_query')
ea.rules[0]['use_terms_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# buffer_time segments with terms queries
ea.rules[0].pop('use_terms_query')
ea.rules[0]['aggregation_query_element'] = {'term': 'term_val'}
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=30)
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# partial segment size scenario
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# run every segmenting
ea.rules[0]['use_run_every_query_size'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
def test_get_starttime(ea):
endtime = '2015-01-01T00:00:00Z'
mock_es = mock.Mock()
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'endtime': endtime}}]}}
mock_es.info.return_value = {'version': {'number': '2.0'}}
ea.writeback_es = mock_es
# 4 days old, will return endtime
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-05T00:00:00Z') # 4 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) == ts_to_dt(endtime)
# 10 days old, will return None
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-11T00:00:00Z') # 10 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) is None
def test_set_starttime(ea):
# standard query, no starttime, no last run
end = ts_to_dt('2014-10-10T10:10:10')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Standard query, no starttime, rule specific buffer_time
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=37)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - datetime.timedelta(minutes=37)
ea.rules[0].pop('buffer_time')
# Standard query, no starttime, last run
ea.rules[0].pop('starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-10T00:00:00')
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-10T00:00:00')
# Standard query, no starttime, last run, assure buffer_time doesn't go past
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(weeks=1000)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-09T00:00:00')
# First call sets minumum_time
ea.set_starttime(ea.rules[0], end)
# Second call uses buffer_time, but it goes past minimum
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-09T00:00:00')
# Standard query, starttime
ea.rules[0].pop('buffer_time')
ea.rules[0].pop('minimum_starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Count query, starttime, no previous endtime
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blah'
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.run_every
# Count query, with previous endtime
with mock.patch('elastalert.elastalert.elasticsearch_client'), \
mock.patch.object(ea, 'get_hits_count'):
ea.run_rule(ea.rules[0], END, START)
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == END
# buffer_time doesn't go past previous endtime
ea.rules[0].pop('use_count_query')
ea.rules[0]['previous_endtime'] = end - ea.buffer_time * 2
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ea.rules[0]['previous_endtime']
# Make sure starttime is updated if previous_endtime isn't used
ea.rules[0]['previous_endtime'] = end - ea.buffer_time / 2
ea.rules[0]['starttime'] = ts_to_dt('2014-10-09T00:00:01')
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# scan_entire_timeframe
ea.rules[0].pop('previous_endtime')
ea.rules[0].pop('starttime')
ea.rules[0]['timeframe'] = datetime.timedelta(days=3)
ea.rules[0]['scan_entire_timeframe'] = True
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == end - datetime.timedelta(days=3)
def test_kibana_dashboard(ea):
match = {'@timestamp': '2014-10-11T00:00:00'}
mock_es = mock.Mock()
ea.rules[0]['use_kibana_dashboard'] = 'my dashboard'
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
# No dashboard found
mock_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}}
with pytest.raises(EAException):
ea.use_kibana_link(ea.rules[0], match)
mock_call = mock_es.deprecated_search.call_args_list[0][1]
assert mock_call['body'] == {'query': {'term': {'_id': 'my dashboard'}}}
# Dashboard found
mock_es.index.return_value = {'_id': 'ABCDEFG'}
mock_es.deprecated_search.return_value = {'hits': {'hits': [{'_source': {'dashboard': json.dumps(dashboard_temp)}}]}}
url = ea.use_kibana_link(ea.rules[0], match)
assert 'ABCDEFG' in url
db = json.loads(mock_es.index.call_args_list[0][1]['body']['dashboard'])
assert 'anytest' in db['title']
# Query key filtering added
ea.rules[0]['query_key'] = 'foobar'
match['foobar'] = 'baz'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
assert db['services']['filter']['list']['1']['field'] == 'foobar'
assert db['services']['filter']['list']['1']['query'] == '"baz"'
# Compound query key
ea.rules[0]['query_key'] = 'foo,bar'
ea.rules[0]['compound_query_key'] = ['foo', 'bar']
match['foo'] = 'cat'
match['bar'] = 'dog'
match['foo,bar'] = 'cat, dog'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
found_filters = 0
for filter_id, filter_dict in list(db['services']['filter']['list'].items()):
if (filter_dict['field'] == 'foo' and filter_dict['query'] == '"cat"') or \
(filter_dict['field'] == 'bar' and filter_dict['query'] == '"dog"'):
found_filters += 1
continue
assert found_filters == 2
def test_rule_changes(ea):
ea.rule_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule2.yaml': 'DEF'}
ea.rules = [ea.init_rule(rule, True) for rule in [{'rule_file': 'rules/rule1.yaml', 'name': 'rule1', 'filter': []},
{'rule_file': 'rules/rule2.yaml', 'name': 'rule2', 'filter': []}]]
ea.rules[1]['processed_hits'] = ['save me']
new_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule3.yaml': 'XXX',
'rules/rule2.yaml': '!@#$'}
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.side_effect = [{'filter': [], 'name': 'rule2', 'rule_file': 'rules/rule2.yaml'},
{'filter': [], 'name': 'rule3', 'rule_file': 'rules/rule3.yaml'}]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
# All 3 rules still exist
assert ea.rules[0]['name'] == 'rule1'
assert ea.rules[1]['name'] == 'rule2'
assert ea.rules[1]['processed_hits'] == ['save me']
assert ea.rules[2]['name'] == 'rule3'
# Assert 2 and 3 were reloaded
assert mock_load.call_count == 2
mock_load.assert_any_call('rules/rule2.yaml', ea.conf)
mock_load.assert_any_call('rules/rule3.yaml', ea.conf)
# A new rule with a conflicting name wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
with mock.patch.object(ea, 'send_notification_email') as mock_send:
mock_load.return_value = {'filter': [], 'name': 'rule3', 'new': 'stuff',
'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
mock_send.assert_called_once_with(exception=mock.ANY, rule_file='rules/rule4.yaml')
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# A new rule with is_enabled=False wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'is_enabled': False, 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# An old rule which didn't load gets reloaded
new_hashes = copy.copy(new_hashes)
new_hashes['rules/rule4.yaml'] = 'qwerty'
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 4
# Disable a rule by removing the file
new_hashes.pop('rules/rule4.yaml')
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
ea.scheduler.remove_job.assert_called_with(job_id='rule4')
def test_strf_index(ea):
""" Test that the get_index function properly generates indexes spanning days """
ea.rules[0]['index'] = 'logstash-%Y.%m.%d'
ea.rules[0]['use_strftime_index'] = True
# Test formatting with times
start = ts_to_dt('2015-01-02T12:34:45Z')
end = ts_to_dt('2015-01-02T16:15:14Z')
assert ea.get_index(ea.rules[0], start, end) == 'logstash-2015.01.02'
end = ts_to_dt('2015-01-03T01:02:03Z')
assert set(ea.get_index(ea.rules[0], start, end).split(',')) == set(['logstash-2015.01.02', 'logstash-2015.01.03'])
# Test formatting for wildcard
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m'
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m-stuff'
assert ea.get_index(ea.rules[0]) == 'logstash-*-stuff'
def test_count_keys(ea):
ea.rules[0]['timeframe'] = datetime.timedelta(minutes=60)
ea.rules[0]['top_count_keys'] = ['this', 'that']
ea.rules[0]['type'].matches = {'@timestamp': END}
ea.rules[0]['doc_type'] = 'blah'
buckets = [{'aggregations': {
'filtered': {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}},
{'aggregations': {'filtered': {
'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}}]
ea.thread_data.current_es.deprecated_search.side_effect = buckets
counts = ea.get_top_counts(ea.rules[0], START, END, ['this', 'that'])
calls = ea.thread_data.current_es.deprecated_search.call_args_list
assert calls[0][1]['search_type'] == 'count'
assert calls[0][1]['body']['aggs']['filtered']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5,
'min_doc_count': 1}
assert counts['top_events_this'] == {'a': 10, 'b': 5}
assert counts['top_events_that'] == {'d': 10, 'c': 12}
def test_exponential_realert(ea):
ea.rules[0]['exponential_realert'] = datetime.timedelta(days=1) # 1 day ~ 10 * 2**13 seconds
ea.rules[0]['realert'] = datetime.timedelta(seconds=10)
until = ts_to_dt('2015-03-24T00:00:00')
ts5s = until + datetime.timedelta(seconds=5)
ts15s = until + datetime.timedelta(seconds=15)
ts1m = until + datetime.timedelta(minutes=1)
ts5m = until + datetime.timedelta(minutes=5)
ts4h = until + datetime.timedelta(hours=4)
test_values = [(ts5s, until, 0), # Exp will increase to 1, 10*2**0 = 10s
(ts15s, until, 0), # Exp will stay at 0, 10*2**0 = 10s
(ts15s, until, 1), # Exp will increase to 2, 10*2**1 = 20s
(ts1m, until, 2), # Exp will decrease to 1, 10*2**2 = 40s
(ts1m, until, 3), # Exp will increase to 4, 10*2**3 = 1m20s
(ts5m, until, 1), # Exp will lower back to 0, 10*2**1 = 20s
(ts4h, until, 9), # Exp will lower back to 0, 10*2**9 = 1h25m
(ts4h, until, 10), # Exp will lower back to 9, 10*2**10 = 2h50m
(ts4h, until, 11)] # Exp will increase to 12, 10*2**11 = 5h
results = (1, 0, 2, 1, 4, 0, 0, 9, 12)
next_res = iter(results)
for args in test_values:
ea.silence_cache[ea.rules[0]['name']] = (args[1], args[2])
next_alert, exponent = ea.next_alert_time(ea.rules[0], ea.rules[0]['name'], args[0])
assert exponent == next(next_res)
def test_wait_until_responsive(ea):
"""Unblock as soon as ElasticSearch becomes responsive."""
# Takes a while before becoming responsive.
ea.writeback_es.indices.exists.side_effect = [
ConnectionError(), # ES is not yet responsive.
False, # index does not yet exist.
True,
]
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0, 4.0]
timeout = datetime.timedelta(seconds=3.5)
with mock.patch('time.sleep') as sleep:
ea.wait_until_responsive(timeout=timeout, clock=clock)
# Sleep as little as we can.
sleep.mock_calls == [
mock.call(1.0),
]
def test_wait_until_responsive_timeout_es_not_available(ea, capsys):
"""Bail out if ElasticSearch doesn't (quickly) become responsive."""
# Never becomes responsive :-)
ea.writeback_es.ping.return_value = False
ea.writeback_es.indices.exists.return_value = False
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0]
timeout = datetime.timedelta(seconds=2.5)
with mock.patch('time.sleep') as sleep:
with pytest.raises(SystemExit) as exc:
ea.wait_until_responsive(timeout=timeout, clock=clock)
assert exc.value.code == 1
# Ensure we get useful diagnostics.
output, errors = capsys.readouterr()
assert 'Could not reach ElasticSearch at "es:14900".' in errors
# Slept until we passed the deadline.
sleep.mock_calls == [
mock.call(1.0),
mock.call(1.0),
mock.call(1.0),
]
def test_wait_until_responsive_timeout_index_does_not_exist(ea, capsys):
"""Bail out if ElasticSearch doesn't (quickly) become responsive."""
# Never becomes responsive :-)
ea.writeback_es.ping.return_value = True
ea.writeback_es.indices.exists.return_value = False
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0]
timeout = datetime.timedelta(seconds=2.5)
with mock.patch('time.sleep') as sleep:
with pytest.raises(SystemExit) as exc:
ea.wait_until_responsive(timeout=timeout, clock=clock)
assert exc.value.code == 1
# Ensure we get useful diagnostics.
output, errors = capsys.readouterr()
assert 'Writeback alias "wb_a" does not exist, did you run `elastalert-create-index`?' in errors
# Slept until we passed the deadline.
sleep.mock_calls == [
mock.call(1.0),
mock.call(1.0),
mock.call(1.0),
]
def test_stop(ea):
""" The purpose of this test is to make sure that calling ElastAlerter.stop() will break it
out of a ElastAlerter.start() loop. This method exists to provide a mechanism for running
ElastAlert with threads and thus must be tested with threads. mock_loop verifies the loop
is running and will call stop after several iterations. """
# Exit the thread on the fourth iteration
def mock_loop():
for i in range(3):
assert ea.running
yield
ea.stop()
with mock.patch.object(ea, 'sleep_for', return_value=None):
with mock.patch.object(ea, 'sleep_for') as mock_run:
mock_run.side_effect = mock_loop()
start_thread = threading.Thread(target=ea.start)
# Set as daemon to prevent a failed test from blocking exit
start_thread.daemon = True
start_thread.start()
# Give it a few seconds to run the loop
start_thread.join(5)
assert not ea.running
assert not start_thread.is_alive()
assert mock_run.call_count == 4
def test_notify_email(ea):
mock_smtp = mock.Mock()
ea.rules[0]['notify_email'] = ['foo@foo.foo', 'bar@bar.bar']
with mock.patch('elastalert.elastalert.SMTP') as mock_smtp_f:
mock_smtp_f.return_value = mock_smtp
# Notify_email from rules, array
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[0][0][1]) == set(ea.rules[0]['notify_email'])
# With ea.notify_email
ea.notify_email = ['baz@baz.baz']
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[1][0][1]) == set(['baz@baz.baz'] + ea.rules[0]['notify_email'])
# With ea.notify email but as single string
ea.rules[0]['notify_email'] = 'foo@foo.foo'
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[2][0][1]) == set(['baz@baz.baz', 'foo@foo.foo'])
# None from rule
ea.rules[0].pop('notify_email')
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[3][0][1]) == set(['baz@baz.baz'])
def test_uncaught_exceptions(ea):
e = Exception("Errors yo!")
# With disabling set to false
ea.disable_rules_on_error = False
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# With disabling set to true
ea.disable_rules_on_error = True
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 0
assert len(ea.disabled_rules) == 1
# Changing the file should re-enable it
ea.rule_hashes = {'blah.yaml': 'abc'}
new_hashes = {'blah.yaml': 'def'}
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.side_effect = [ea.disabled_rules[0]]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# Notify email is sent
ea.notify_email = 'qlo@example.com'
with mock.patch.object(ea, 'send_notification_email') as mock_email:
ea.handle_uncaught_exception(e, ea.rules[0])
assert mock_email.call_args_list[0][1] == {'exception': e, 'rule': ea.disabled_rules[0]}
def test_get_top_counts_handles_no_hits_returned(ea):
with mock.patch.object(ea, 'get_hits_terms') as mock_hits:
mock_hits.return_value = None
rule = ea.rules[0]
starttime = datetime.datetime.now() - datetime.timedelta(minutes=10)
endtime = datetime.datetime.now()
keys = ['foo']
all_counts = ea.get_top_counts(rule, starttime, endtime, keys)
assert all_counts == {'top_events_foo': {}}
def test_remove_old_events(ea):
now = ts_now()
minute = datetime.timedelta(minutes=1)
ea.rules[0]['processed_hits'] = {'foo': now - minute,
'bar': now - minute * 5,
'baz': now - minute * 15}
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=10)
# With a query delay, only events older than 20 minutes will be removed (none)
ea.rules[0]['query_delay'] = datetime.timedelta(minutes=10)
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 3
# With no query delay, the 15 minute old event will be removed
ea.rules[0].pop('query_delay')
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 2
assert 'baz' not in ea.rules[0]['processed_hits']
def test_query_with_whitelist_filter_es(ea):
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['five'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' \
in new_rule['filter'][-1]['query']['query_string']['query']
def test_query_with_whitelist_filter_es_five(ea_sixsix):
ea_sixsix.rules[0]['_source_enabled'] = False
ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea_sixsix.rules[0]['compare_key'] = "username"
ea_sixsix.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea_sixsix.rules[0])
ea_sixsix.init_rule(new_rule, True)
assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' in \
new_rule['filter'][-1]['query_string']['query']
def test_query_with_blacklist_filter_es(ea):
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in \
new_rule['filter'][-1]['query']['query_string']['query']
def test_query_with_blacklist_filter_es_five(ea_sixsix):
ea_sixsix.rules[0]['_source_enabled'] = False
ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea_sixsix.rules[0]['compare_key'] = "username"
ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea_sixsix.rules[0])
ea_sixsix.init_rule(new_rule, True)
assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in new_rule['filter'][-1]['query_string'][
'query']
|
common_xmr.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2022 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import os
import sys
import json
import shutil
import signal
import logging
import unittest
import threading
import multiprocessing
from urllib.request import urlopen
from unittest.mock import patch
from basicswap.rpc_xmr import (
callrpc_xmr_na,
)
from tests.basicswap.mnemonics import mnemonics
from tests.basicswap.common import (
waitForServer,
)
import basicswap.config as cfg
import bin.basicswap_prepare as prepareSystem
import bin.basicswap_run as runSystem
test_path = os.path.expanduser(os.getenv('TEST_RELOAD_PATH', '~/test_basicswap1'))
PARTICL_PORT_BASE = int(os.getenv('PARTICL_PORT_BASE', '11938'))
XMR_BASE_P2P_PORT = 17792
XMR_BASE_RPC_PORT = 29798
XMR_BASE_WALLET_RPC_PORT = 29998
def waitForBidState(delay_event, port, bid_id, state_str, wait_for=60):
for i in range(wait_for):
if delay_event.is_set():
raise ValueError('Test stopped.')
bid = json.loads(urlopen('http://127.0.0.1:12700/json/bids/{}'.format(bid_id)).read())
if bid['bid_state'] == state_str:
return
delay_event.wait(1)
raise ValueError('waitForBidState failed')
def updateThread(xmr_addr, delay_event):
while not delay_event.is_set():
try:
callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'generateblocks', {'wallet_address': xmr_addr, 'amount_of_blocks': 1})
except Exception as e:
print('updateThread error', str(e))
delay_event.wait(2)
class XmrTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(XmrTestBase, cls).setUpClass()
cls.delay_event = threading.Event()
cls.update_thread = None
cls.processes = []
for i in range(3):
client_path = os.path.join(test_path, 'client{}'.format(i))
config_path = os.path.join(client_path, cfg.CONFIG_FILENAME)
try:
shutil.rmtree(client_path)
except Exception as ex:
logging.warning('setUpClass %s', str(ex))
testargs = [
'basicswap-prepare',
'-datadir="{}"'.format(client_path),
'-bindir="{}"'.format(os.path.join(test_path, 'bin')),
'-portoffset={}'.format(i),
'-particl_mnemonic="{}"'.format(mnemonics[i]),
'-regtest',
'-withcoin=monero',
'-noextractover',
'-xmrrestoreheight=0']
with patch.object(sys, 'argv', testargs):
prepareSystem.main()
with open(os.path.join(client_path, 'particl', 'particl.conf'), 'r') as fp:
lines = fp.readlines()
with open(os.path.join(client_path, 'particl', 'particl.conf'), 'w') as fp:
for line in lines:
if not line.startswith('staking'):
fp.write(line)
fp.write('port={}\n'.format(PARTICL_PORT_BASE + i))
fp.write('bind=127.0.0.1\n')
fp.write('dnsseed=0\n')
fp.write('discover=0\n')
fp.write('listenonion=0\n')
fp.write('upnp=0\n')
fp.write('minstakeinterval=5\n')
fp.write('smsgsregtestadjust=0\n')
for ip in range(3):
if ip != i:
fp.write('connect=127.0.0.1:{}\n'.format(PARTICL_PORT_BASE + ip))
with open(os.path.join(client_path, 'monero', 'monerod.conf'), 'a') as fp:
fp.write('p2p-bind-ip=127.0.0.1\n')
fp.write('p2p-bind-port={}\n'.format(XMR_BASE_P2P_PORT + i))
for ip in range(3):
if ip != i:
fp.write('add-exclusive-node=127.0.0.1:{}\n'.format(XMR_BASE_P2P_PORT + ip))
with open(config_path) as fs:
settings = json.load(fs)
settings['min_delay_event'] = 1
settings['max_delay_event'] = 4
settings['min_delay_retry'] = 10
settings['max_delay_retry'] = 20
settings['check_progress_seconds'] = 5
settings['check_watched_seconds'] = 5
settings['check_expired_seconds'] = 60
settings['check_events_seconds'] = 5
settings['check_xmr_swaps_seconds'] = 5
with open(config_path, 'w') as fp:
json.dump(settings, fp, indent=4)
signal.signal(signal.SIGINT, lambda signal, frame: cls.signal_handler(cls, signal, frame))
def signal_handler(self, sig, frame):
logging.info('signal {} detected.'.format(sig))
self.delay_event.set()
def run_thread(self, client_id):
client_path = os.path.join(test_path, 'client{}'.format(client_id))
testargs = ['basicswap-run', '-datadir=' + client_path, '-regtest']
with patch.object(sys, 'argv', testargs):
runSystem.main()
def start_processes(self):
self.delay_event.clear()
for i in range(3):
self.processes.append(multiprocessing.Process(target=self.run_thread, args=(i,)))
self.processes[-1].start()
waitForServer(self.delay_event, 12701)
def waitForMainAddress():
for i in range(20):
if self.delay_event.is_set():
raise ValueError('Test stopped.')
try:
wallets = json.loads(urlopen('http://127.0.0.1:12701/json/wallets').read())
return wallets['6']['main_address']
except Exception as e:
print('Waiting for main address {}'.format(str(e)))
self.delay_event.wait(1)
raise ValueError('waitForMainAddress timedout')
xmr_addr1 = waitForMainAddress()
num_blocks = 100
if callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'get_block_count')['count'] < num_blocks:
logging.info('Mining {} Monero blocks to {}.'.format(num_blocks, xmr_addr1))
callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'generateblocks', {'wallet_address': xmr_addr1, 'amount_of_blocks': num_blocks})
logging.info('XMR blocks: %d', callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'get_block_count')['count'])
self.update_thread = threading.Thread(target=updateThread, args=(xmr_addr1, self.delay_event))
self.update_thread.start()
# Wait for height, or sequencelock is thrown off by genesis blocktime
num_blocks = 3
logging.info('Waiting for Particl chain height %d', num_blocks)
for i in range(60):
if self.delay_event.is_set():
raise ValueError('Test stopped.')
try:
wallets = json.loads(urlopen('http://127.0.0.1:12701/json/wallets').read())
particl_blocks = wallets['1']['blocks']
print('particl_blocks', particl_blocks)
if particl_blocks >= num_blocks:
break
except Exception as e:
print('Error reading wallets', str(e))
self.delay_event.wait(1)
assert(particl_blocks >= num_blocks)
@classmethod
def tearDownClass(cls):
logging.info('Stopping test')
cls.delay_event.set()
if cls.update_thread:
cls.update_thread.join()
for p in cls.processes:
p.terminate()
for p in cls.processes:
p.join()
cls.update_thread = None
cls.processes = []
|
run_ensemble.py | from elegantrl.train.run_parallel import *
from elegantrl.train.run_tutorial import *
import random as rd
'''run.py'''
def train_and_evaluate_em(args): # keep
args.init_before_training() # necessary!
args_cwd = args.cwd
process = list()
mp.set_start_method(method='spawn', force=True) # force all the multiprocessing to 'spawn' methods
# todo ensemble
ensemble_num = len(args.ensemble_gpus)
for agent_id in range(ensemble_num):
# todo ensemble
args.cwd = f'{args_cwd}/ensemble_{agent_id:02}'
args.learner_gpus = args.ensemble_gpus[agent_id]
args.eval_gpu_id = args.learner_gpus[0]
args.random_seed += agent_id * len(args.learner_gpus) * args.worker_num
os.makedirs(args.cwd, exist_ok=True)
'''learner'''
learner_num = len(args.learner_gpus)
learner_pipe = PipeLearner(learner_num)
for learner_id in range(learner_num):
'''explorer'''
worker_pipe = PipeWorker(args.env_num, args.worker_num)
for worker_id in range(args.worker_num):
proc = mp.Process(target=worker_pipe.run, args=(args, worker_id, learner_id))
proc.start()
process.append(proc)
'''evaluator'''
if learner_id == learner_num - 1:
evaluator_pipe = PipeEvaluator()
proc = mp.Process(target=evaluator_pipe.run, args=(args, agent_id))
proc.start()
process.append(proc)
else:
evaluator_pipe = None
proc = mp.Process(target=learner_pipe.run, args=(args, evaluator_pipe, worker_pipe, learner_id, agent_id))
proc.start()
process.append(proc)
# [(p.start(), time.sleep(0.1)) for p in process]
process[-1].join()
process_safely_terminate(process)
class Ensemble:
def __init__(self, ensemble_gap, ensemble_num, agent_id):
self.ensemble_gap = ensemble_gap
self.ensemble_num = ensemble_num
self.ensemble_timer = time.time() + ensemble_gap * agent_id / ensemble_num
self.agent_id = agent_id
def run(self, cwd, agent):
if time.time() < self.ensemble_timer + self.ensemble_gap:
return
self.ensemble_timer = time.time()
lock_signal = f'{cwd}/lock'
os.makedirs(lock_signal, exist_ok=True)
agent.save_or_load_agent(cwd, if_save=True)
os.rmdir(lock_signal)
'''update episode_return_dir_name'''
recorder = np.load(f"{cwd}/recorder.npy") # `evaluator.py save_learning_curve()`
r_avg = recorder[-8:, 1].mean()
new_r_dir_name = f'episode_return_{r_avg:08.3f}'
old_r_dir_name = self.get_episode_return_dir_name(cwd)
os.makedirs(f"{cwd}/{new_r_dir_name}", exist_ok=True)
os.rmdir(f"{cwd}/{old_r_dir_name}") if old_r_dir_name else None
'''build ensemble_rs'''
ensemble_rs = list()
for i in range(self.ensemble_num):
r_file = self.get_episode_return_dir_name(f"{cwd[:-2]}{i:02}")
ensemble_rs.append(float(r_file.split('_')[-1]) if r_file else -2 ** 16)
ensemble_rs = np.array(ensemble_rs)
'''move training files'''
if r_avg == np.max(ensemble_rs):
move_id = self.agent_id
elif r_avg == np.min(ensemble_rs):
move_id = np.argmax(ensemble_rs)
else:
soft_max_rs = self.np_soft_max(ensemble_rs)
move_id = rd.choice(self.ensemble_num, p=soft_max_rs)
if move_id != self.agent_id:
other_cwd = cwd[:-2] + f'{move_id}:02'
while os.path.exists(f"{other_cwd}/lock"):
time.sleep(1)
agent.save_or_load_agent(other_cwd, if_save=False)
print(f"{' '*20}{self.agent_id:2}<-{move_id} {repr(ensemble_rs.round(2))}")
@staticmethod
def get_episode_return_dir_name(cwd):
r_files = [name
for name in os.listdir(cwd)
if name.find('episode_return_') == 0]
return r_files[0] if len(r_files) else None
@staticmethod
def np_soft_max(raw_x):
norm_x = (raw_x - raw_x.mean()) / (raw_x.std() + 1e-6)
exp_x = np.exp(norm_x) + 1e-6
return exp_x / exp_x.sum()
class PipeLearner:
def __init__(self, learner_num):
self.learner_num = learner_num
self.round_num = int(np.log2(learner_num))
self.pipes = [mp.Pipe() for _ in range(learner_num)]
pipes = [mp.Pipe() for _ in range(learner_num)]
self.pipe0s = [pipe[0] for pipe in pipes]
self.pipe1s = [pipe[1] for pipe in pipes]
self.device_list = [torch.device(f'cuda:{i}') for i in range(learner_num)]
if learner_num == 1:
self.idx_l = None
elif learner_num == 2:
self.idx_l = [(1,), (0,), ]
elif learner_num == 4:
self.idx_l = [(1, 2), (0, 3),
(3, 0), (2, 1), ]
elif learner_num == 8:
self.idx_l = [(1, 2, 4), (0, 3, 5),
(3, 0, 6), (2, 1, 7),
(5, 6, 0), (4, 7, 1),
(7, 4, 2), (6, 5, 3), ]
else:
print(f"| LearnerPipe, ERROR: learner_num {learner_num} should in (1, 2, 4, 8)")
exit()
def comm_data(self, data, learner_id, round_id):
if round_id == -1:
learner_jd = self.idx_l[learner_id][round_id]
self.pipes[learner_jd][0].send(data)
return self.pipes[learner_id][1].recv()
else:
learner_jd = self.idx_l[learner_id][round_id]
self.pipe0s[learner_jd].send(data)
return self.pipe1s[learner_id].recv()
def comm_network_optim(self, agent, learner_id):
device = self.device_list[learner_id]
for round_id in range(self.round_num):
data = get_comm_data(agent)
data = self.comm_data(data, learner_id, round_id)
if data:
avg_update_net(agent.act, data[0], device)
avg_update_optim(agent.act_optim, data[1], device) if data[1] else None
avg_update_net(agent.cri, data[2], device) if data[2] else None
avg_update_optim(agent.cri_optim, data[3], device)
avg_update_net(agent.act_target, data[4], device) if agent.if_use_act_target else None
avg_update_net(agent.cri_target, data[5], device) if agent.if_use_cri_target else None
def run(self, args, comm_eva, comm_exp, learner_id=0, agent_id=0):
# print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}')
pass
'''init Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, state_dim=args.state_dim, action_dim=args.action_dim,
gamma=args.gamma, reward_scale=args.reward_scale,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae,
env_num=args.env_num, gpu_id=args.learner_gpus[learner_id], )
agent.save_or_load_agent(args.cwd, if_save=False)
'''init ReplayBuffer'''
if agent.if_off_policy:
buffer_num = args.worker_num * args.env_num
if self.learner_num > 1:
buffer_num *= 2
buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim,
if_use_per=args.if_per_or_gae,
buffer_num=buffer_num, gpu_id=args.learner_gpus[learner_id])
buffer.save_or_load_history(args.cwd, if_save=False)
def update_buffer(_traj_list):
step_sum = 0
r_exp_sum = 0
for buffer_i, (ten_state, ten_other) in enumerate(_traj_list):
buffer.buffers[buffer_i].extend_buffer(ten_state, ten_other)
step_r_exp = get_step_r_exp(ten_reward=ten_other[:, 0]) # other = (reward, mask, action)
step_sum += step_r_exp[0]
r_exp_sum += step_r_exp[1]
return step_sum, r_exp_sum / len(_traj_list)
else:
buffer = list()
def update_buffer(_traj_list):
_traj_list = list(map(list, zip(*_traj_list)))
_traj_list = [torch.cat(t, dim=0) for t in _traj_list]
(ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list
buffer[:] = (ten_state.squeeze(1),
ten_reward,
ten_mask,
ten_action.squeeze(1),
ten_noise.squeeze(1))
_step, _r_exp = get_step_r_exp(ten_reward=buffer[1])
return _step, _r_exp
'''start training'''
ensemble = Ensemble(ensemble_gap=args.ensemble_gap,
ensemble_num=len(args.ensemble_gpus),
agent_id=agent_id) # todo ensemble
cwd = args.cwd
batch_size = args.batch_size
repeat_times = args.repeat_times
soft_update_tau = args.soft_update_tau
del args
if_train = True
while if_train:
ensemble.run(cwd, agent) # todo ensemble
traj_lists = comm_exp.explore(agent)
if self.learner_num > 1:
data = self.comm_data(traj_lists, learner_id, round_id=-1)
traj_lists.extend(data)
traj_list = sum(traj_lists, list())
if sys.platform == 'win32': # Avoid CUDA runtime error (801)
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
traj_list = [[item.to(torch.device('cpu'))
for item in item_list]
for item_list in traj_list]
steps, r_exp = update_buffer(traj_list)
del traj_lists
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
if self.learner_num > 1:
self.comm_network_optim(agent, learner_id)
if comm_eva:
if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple)
agent.save_or_load_agent(cwd, if_save=True)
if agent.if_off_policy:
print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
buffer.save_or_load_history(cwd, if_save=True)
'''run'''
from elegantrl.agents.AgentPPO import AgentPPO
from elegantrl.agents.AgentA2C import AgentA2C
from elegantrl.train.config import Arguments
def demo_continuous_action_on_policy(): # [ElegantRL.2021.11.11]
env_name = ['Pendulum-v1', 'LunarLanderContinuous-v2',
'BipedalWalker-v3', 'BipedalWalkerHardcore-v3'][ENV_ID]
agent_class = [AgentPPO, AgentA2C][DRL_ID]
args = Arguments(env=build_env(env_name), agent=agent_class())
# args.if_per_or_gae = True
if env_name in {'Pendulum-v1', 'Pendulum-v0'}:
"""
Step 45e4, Reward -138, UsedTime 373s PPO
Step 40e4, Reward -200, UsedTime 400s PPO
Step 46e4, Reward -213, UsedTime 300s PPO
"""
# args = Arguments(env=build_env(env_name), agent=agent_class()) # One way to build env
# args = Arguments(env=env_name, agent=agent_class()) # Another way to build env
# args.env_num = 1
# args.max_step = 200
# args.state_dim = 3
# args.action_dim = 1
# args.if_discrete = False
args.gamma = 0.97
args.net_dim = 2 ** 8
args.worker_num = 2
args.reward_scale = 2 ** -2
args.target_step = 200 * 16 # max_step = 200
args.eval_gap = 2 ** 5
if env_name in {'LunarLanderContinuous-v2', 'LunarLanderContinuous-v1'}:
"""
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
0 1.58e+04 -125.13 | -125.13 45.9 68 12 | -1.64 1.43 -0.01 -0.50
0 2.79e+05 13.42 | 13.42 162.1 295 112 | 0.05 0.36 0.04 -0.51
0 7.27e+05 203.74 | 203.74 100.6 342 113 | 0.16 0.13 -0.01 -0.53
| UsedTime: 823 |
0 3.35e+05 -62.39 | -62.39 144.1 411 151 | 0.00 0.32 -0.02 -0.51
0 5.43e+05 164.83 | 164.83 145.1 371 97 | 0.13 0.16 -0.06 -0.52
0 7.82e+05 204.31 | 204.31 126.2 347 108 | 0.18 0.17 -0.00 -0.52
| UsedTime: 862 |
"""
args.eval_times1 = 2 ** 4
args.eval_times2 = 2 ** 6
args.target_step = args.env.max_step * 4
if env_name in {'BipedalWalker-v3', 'BipedalWalker-v2'}:
"""
Step 51e5, Reward 300, UsedTime 2827s PPO
Step 78e5, Reward 304, UsedTime 4747s PPO
Step 61e5, Reward 300, UsedTime 3977s PPO GAE
Step 95e5, Reward 291, UsedTime 6193s PPO GAE
"""
args.eval_times1 = 2 ** 3
args.eval_times2 = 2 ** 5
args.gamma = 0.98
args.target_step = args.env.max_step * 16
if env_name in {'BipedalWalkerHardcore-v3', 'BipedalWalkerHardcore-v2'}:
"""
Step 57e5, Reward 295, UsedTime 17ks PPO
Step 70e5, Reward 300, UsedTime 21ks PPO
"""
args.gamma = 0.98
args.net_dim = 2 ** 8
args.max_memo = 2 ** 22
args.batch_size = args.net_dim * 4
args.repeat_times = 2 ** 4
args.learning_rate = 2 ** -16
args.eval_gap = 2 ** 8
args.eval_times1 = 2 ** 2
args.eval_times2 = 2 ** 5
# args.break_step = int(80e5)
args.worker_num = 4
args.target_step = args.env.max_step * 16
# todo ensemble
args.if_overwrite = False
args.ensemble_gpus = ((0,), (1,), (2,), (3,))
args.ensemble_gap = 2 ** 8
args.cwd = './temp'
args.target_return = 320
args.if_allow_break = True
args.learner_gpus = (GPU_ID,) # single GPU
# args.learner_gpus = (0, 1) # multiple GPUs
if_use_single_process = 0
if if_use_single_process:
train_and_evaluate(args) # single process
else:
train_and_evaluate_em(args) # multiple process
if __name__ == '__main__':
sys.argv.extend('3 1 0'.split(' '))
GPU_ID = eval(sys.argv[1])
ENV_ID = eval(sys.argv[2])
DRL_ID = eval(sys.argv[3])
demo_continuous_action_on_policy()
|
tpgarchivingwidget.py | #!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
import time, sys, traceback
import threading
from taurus.external.qt import Qt
import pyqtgraph as pg
from taurus.qt.qtgui.application import TaurusApplication
from taurus.qt.qtgui.taurusgui import TaurusGui
from taurus.qt.qtgui.container import TaurusMainWindow, TaurusWidget
from taurus_tangoarchiving.tangoarchivingvalidator import TangoArchivingAttributeNameValidator
from taurus_tangoarchiving.tangoarchivingvalidator import str2localtime
from taurus_tangoarchiving.widget.tangoarchivingmodelchooser import TangoArchivingModelSelectorItem
from taurus_tangoarchiving.widget.tangoarchivingtools import TangoArchivingTimeSelector
from taurus.core.taurushelper import getValidatorFromName
import fandango
import fandango as fn
import fandango.qt
from operator import isSequenceType
try:
from taurus.qt.qtgui.tpg import (TaurusPlot,
DateAxisItem,
TaurusPlotDataItem)
from taurus.qt.qtgui.tpg.curvesmodel import TaurusItemConf
except ImportError:
raise Exception('Missing dependency: taurus_pyqtgraph')
class ArchivingWidget(TaurusWidget): #Qt.QWidget
addXYModelsSig = Qt.pyqtSignal(list,str,str)
def __init__(self,parent=None):
TaurusWidget.__init__(self,parent=parent)
self.plot = ArchivingPlot(self)
self.legend = ArchivingLegend(self.plot)
self.modelchooser = None
self.plot.updateSig[bool,bool].connect(self.updateAll)
self.t0,self.t1 = 0,0
msi = self.modelchooser
if msi:
# Connect button
msi.modelsAdded.connect(onAddXYModel)
self.pb = Qt.QProgressBar()
self.pb.setGeometry(0, 0, 300, 25)
self.info('building layout')
self.setLayout(Qt.QVBoxLayout())
self.tc = TangoArchivingTimeSelector()
l1 = Qt.QSplitter()
l1.addWidget(self.plot)
self.plot.setTimeChooser(self.tc)
l1.addWidget(self.legend)
l0 = Qt.QWidget()
l0.setLayout(Qt.QHBoxLayout())
l0.layout().addWidget(self.tc)
self.refresh = Qt.QPushButton(Qt.QIcon.fromTheme("view-refresh"),
"refresh tgarch curves")
self.refresh.clicked.connect(self.onRefresh)
self.layout().addWidget(l0)
l0.layout().addWidget(self.refresh)
self.layout().addWidget(l1,10)
self.layout().addWidget(self.pb)
self.updateProgressBar()
self.addXYModelsSig.connect(self.addXYModels)
@staticmethod
def run(models, t0, t1):
app = TaurusApplication(app_name='tpgArchiving')
gui = ArchivingWidget()
#gui.setTimes(args[1],args[2])
#gui.setModel(args[0])
gui.show()
gui.addXYModelsSig.emit(list(models),t0,t1)
app.exec_()
def setTimes(self,t0,t1):
pass
def addXYModels(self,attrs,t0=None,t1=None):
"""
Convert model, dates to
'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',
"""
c = self.cursor()
self.setCursor(Qt.Qt.WaitCursor)
attrs = fn.toList(attrs)
if not t0 and not t1 and not self.t0 and not self.t1:
t0,t1 = self.tc.getTimes()
if t0 and t1:
t0 = t0 if fn.isNumber(t0) else fn.str2time(t0,relative=True)
t1 = t1 if fn.isNumber(t1) else fn.str2time(t1,relative=True)
self.t0,self.t1 = fn.time2str(t0,iso=1),fn.time2str(t1,iso=1)
self.t0 = self.t0.replace(' ','T')
self.t1 = self.t1.replace(' ','T')
ms = []
for attr in attrs:
attr = fn.tango.get_full_name(attr,fqdn=True)
attr = attr.replace('tango://','')
q = 'tgarch://%s?db=*;t0=%s;t1=%s' % (attr,self.t0,self.t1)
m = (q+';ts',q)
ms.append(m)
self.plot.onAddXYModel(ms)
self.setCursor(c)
addModels = addXYModels #For backwards compatibility
def getTimes(self):
return self.t0,self.t1
###########################################################################
# Create tgarch tool bar
###########################################################################
def onRefresh(self):
# Update progress bar
self.updateProgressBar(False)
t1 = threading.Thread(target=self._onRefresh)
t1.start()
def _onRefresh(self):
t0, t1 = self.tc.getTimes()
# Validate models
v = TangoArchivingAttributeNameValidator()
query = "{0};t0={1};t1={2}"
for curve in self.plot.getPlotItem().listDataItems():
if isinstance(curve, TaurusPlotDataItem):
ymodel = curve.getModel()
# tgarch attr
if v.getUriGroups(ymodel).get('scheme') != 'tgarch':
continue
fullname, _, _ = v.getNames(ymodel)
bmodel, current_query = fullname.split('?')
db = current_query.split(';')[0]
q = query.format(db, t0, t1)
model = "{0}?{1}".format(bmodel, q)
xmodel = "{};ts".format(model)
curve.setModel(None)
curve.setXModel(None)
curve.setModel(model)
curve.setXModel(xmodel)
self.updateAll(legend=False)
def updateProgressBar(self, stop=True):
if stop is True:
final = 1
else:
final = 0
self.pb.setRange(0, final)
###########################################################################
# Helper
###########################################################################
def updateAll(self,legend=True,stop=True):
print('updateAll(%s,%s)' % (legend,stop))
# Update legend
if legend is True:
try:
self.legend.updateExternalLegend()
except:
traceback.print_exc()
# run plot auto range
time.sleep(0.2) # Wait till models are loading
self.plot.plot_items.getViewBox().menu.autoRange()
# Stop progress bar
self.updateProgressBar(stop=stop)
class ArchivingPlot(TaurusPlot):
updateSig = Qt.pyqtSignal(bool,bool)
def __init__(self,parent=None):
TaurusPlot.__init__(self)
plot = self
#plot.setBackgroundBrush(Qt.QColor('white'))
self.time_selector = None
axis = self.axis = DateAxisItem(orientation='bottom')
plot_items = self.plot_items = plot.getPlotItem()
axis.attachToPlotItem(plot_items)
# TODO (cleanup menu actions)
if plot_items.legend is not None:
plot_items.legend.hide()
vb = plot.getPlotItem().getViewBox()
vb.sigXRangeChanged.connect(self.onUpdateXViewRange)
###########################################################################
# onAddXYModel
###########################################################################
def onAddXYModel(self, models=None):
"""
models being a list like:
[('tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',
'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03')]
"""
try:
plot = self
# Update progress bar
self.updateSig.emit(True,False)
print('onAddXYModel(%s)'%models)
#if not isSequenceType(models):
#print('Overriding models ...')
#models = msi.getSelectedModels()
current = plot._model_chooser_tool.getModelNames()
print('current: %s' % str(current))
models = [m for m in models if m not in current]
print('new models: %s' % str(models))
plot.addModels(models)
traceback.print_exc()
self.updateSig.emit(True,True)
except:
traceback.print_exc()
###########################################################################
# Update t0 and t1 based on sigXRangeChanged
###########################################################################
def onUpdateXViewRange(self):
x, _ = self.viewRange()
t0, t1 = x
t0s = str2localtime(t0)
t1s = str2localtime(t1)
print('times: %s(%s) - %s(%s)' % (t0,t0s,t1,t1s))
if t0s and t1s:
self.updateTimeChooser(t0s,t1s)
def setTimeChooser(self,time_selector):
self.time_selector = time_selector
def updateTimeChooser(self, t0s, t1s):
if self.time_selector:
self.time_selector.ui.comboBox_begin.setItemText(5, t0s)
self.time_selector.ui.comboBox_end.setItemText(7, t1s)
self.time_selector.ui.comboBox_begin.setItemText(5, t0s)
self.time_selector.ui.comboBox_end.setItemText(7, t1s)
self.time_selector.ui.comboBox_begin.setCurrentIndex(5)
self.time_selector.ui.comboBox_end.setCurrentIndex(7)
else:
print('No time chooser widget defined')
class ArchivingLegend(Qt.QGraphicsView):
###########################################################################
# Legend
###########################################################################
def __init__(self, plot):
Qt.QGraphicsScene.__init__(self,Qt.QGraphicsScene())
gv = self
self.plot = plot
gv.setBackgroundBrush(Qt.QBrush(Qt.QColor('white')))
self.legend = pg.LegendItem(None, offset=(0, 0))
gv.scene().addItem(self.legend)
def updateExternalLegend(self):
plot_items = self.plot.plot_items
for dataitem in plot_items.listDataItems():
self.legend.removeItem(dataitem.name())
for dataitem in plot_items.listDataItems():
if dataitem.name():
self.legend.addItem(dataitem, dataitem.name())
###########################################################################
# Connect CurvesAppearanceChooser to external legend
###########################################################################
from taurus_pyqtgraph.curveproperties import (CurvesAppearanceChooser,
CurveAppearanceProperties)
def onApply(self):
names = self.getSelectedCurveNames()
prop = self.getShownProperties()
# Update self.curvePropDict for selected properties
for n in names:
self.curvePropDict[n] = CurveAppearanceProperties.merge(
[self.curvePropDict[n], prop],
conflict=CurveAppearanceProperties.inConflict_update_a)
# emit a (PyQt) signal telling what properties (first argument) need to
# be applied to which curves (second argument)
# self.curveAppearanceChanged.emit(prop, names)
# return both values
self.curvePropAdapter.setCurveProperties(self.curvePropDict, names)
# Update legend
updateExternalLegend()
return prop, names
# Override CurvesAppearanceChooser.onApply
CurvesAppearanceChooser.onApply = onApply
###########################################################################
__usage__ = """
Usage:
tpgarchivingwidget.py [attr0] [attr1] ... [date0] [date1]
e.g.
./ctarchiving02 sr/di/dcct/averagecurrent 2020-05-24 2020-05-27
"""
def main(*args):
"""
usage attr1, attr2, attr3, ..., t0, t1
"""
try:
assert len(args) > 2
t0,t1 = args[-2],args[-1]
models = args[:-2]
ArchivingWidget.run(models,t0,t1)
except:
print(__usage__)
if __name__ == '__main__':
main(*sys.argv[1:])
|
Binance_Detect_Moonings.py | """
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import importlib
# used for directory handling
import glob
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit
session_profit = 0
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
print(f'Working...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
# retreive latest prices
get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=COOLDOWN_INTERVAL)
# only include coin as volatile if it hasn't been picked up in the last COOLDOWN_INTERVAL minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=COOLDOWN_INTERVAL):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + exnumber + len(volatile_coins)) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
for coin in list(coins_bought):
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['take_profit']) / 100
SL = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['stop_loss']) / 100
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
coins_bought[coin]['take_profit'] = PriceChange + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if LastPrice < SL or LastPrice > TP and not USE_TRAILING_STOP_LOSS:
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}")
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
# Log trade
if LOG_TRADES:
profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume'])* (1-(TRADING_FEE*2)) # adjust for trading fee here
write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange-(TRADING_FEE*2):.2f}%")
session_profit=session_profit + (PriceChange-(TRADING_FEE*2))
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
print(f'TP or SL not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}{PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}')
if hsp_head == 1 and len(coins_bought) == 0: print(f'Not holding any coins')
return coins_sold
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': datetime.fromtimestamp(orders[coin][0]['time']).strftime("%Y/%d/%m %H:%M:%S"),
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def write_log(logline):
timestamp = datetime.now().strftime("%Y/%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
if __name__ == '__main__':
# Load arguments then parse settings
args = parse_args()
mymodule = {}
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
COOLDOWN_INTERVAL = parsed_config['trading_options']['COOLDOWN_INTERVAL']
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
MAX_COINS = parsed_config['trading_options']['MAX_COINS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# use separate files for testing and live trading
if TEST_MODE:
coins_bought_file_path = 'test_' + coins_bought_file_path
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: You are using the Mainnet and live funds. Waiting 30 seconds as a security measure')
time.sleep(30)
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
# load signalling modules
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.daemon = True
t.start()
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(e)
# seed initial prices
get_price()
READ_TIMEOUT_COUNT=0
CONNECTION_ERROR_COUNT = 0
while True:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
except ReadTimeout as rt:
READ_TIMEOUT_COUNT += 1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {READ_TIMEOUT_COUNT}\n{rt}{txcolors.DEFAULT}')
except ConnectionError as ce:
CONNECTION_ERROR_COUNT +=1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {CONNECTION_ERROR_COUNT}\n{ce}{txcolors.DEFAULT}')
|
main.py | import random
import threading
import time
import kbInput
import puzzle
import Gesture_Detector
import GD2
from Sound_Manager import Sound_Manager
Game = None
class Game2048():
def __init__(self):
self.gesture = None
self.new_gesture_flag = False
self.flag = False
self.GD = None
# self.game = None
# self._lock = threading.RLock()
self._t_effector1 = threading.Thread(target=self.gesture_worker)
self._t_effector1.daemon = True
#"before the start`"
self._t_effector1.start()
self._t_effector2 = threading.Thread(target=self.pizzle_worker)
self._t_effector2.daemon = True
# "before the start`"
self._t_effector2.start()
def pizzle_worker(self):
Game = puzzle.GameGrid()
def gesture_worker(self):
lastTime = time.time()
frozen_flag = False
while True:
try:
if self.flag == False:
# print("I am here")
# self.GD = Gesture_Detector.Gesture_Detector()
self.GD = GD2.GD2()
self.flag = True
# TODO: finish getGesture
self.gesture = self.GD.getGesture()
# if (self.gesture != 0):
# print(111111111)
# print(self.gesture)
# self.gesture = random.randint(1,8)
# print(self.gesture)
# if self.gesture != 0:
# lastTime = time.time()
# frozen_flag = False
# elif frozen_flag == False and self.gesture == 0 and time.time() - lastTime > 3:
# print("Starting Frozen")
# frozen_flag = True
# Game.frozen_flag = True
# kbInput.createKeyInput(10)
# TODO: need to uncomment: turn off the input in the testing
kbInput.createKeyInput(self.gesture)
# kbInput.createKeyInput(0)
except Exception as e:
print("EXCEPTION In gesture_worker: ", e)
# # self.new_gesture_flag = True
time.sleep(0.08)
if __name__ == '__main__':
my_game = Game2048()
while True:
# print(my_game.gesture)
time.sleep(0.01)
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import datetime
import enum
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = import_helper.import_module("ssl")
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), 'PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except (ConnectionResetError, ConnectionAbortedError):
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except ssl.SSLError as err:
# On Windows sometimes test_pha_required_nocert receives the
# PEER_DID_NOT_RETURN_A_CERTIFICATE exception
# before the 'tlsv13 alert certificate required' exception.
# If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE
# is received test_pha_required_nocert fails with ConnectionResetError
# because the underlying socket is closed
if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason:
if self.server.chatty and support.verbose:
sys.stdout.write(err.args[1])
# test_pha_required_nocert is expecting this exception
raise ssl.SSLError('tlsv13 alert certificate required')
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
# Ignore expected SSLError in ConnectionHandler of ThreadedEchoServer
# (it is only raised sometimes on Windows)
with threading_helper.catch_threading_exception() as cm:
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'):
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
class TestEnumerations(unittest.TestCase):
def test_tlsversion(self):
class CheckedTLSVersion(enum.IntEnum):
MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED
SSLv3 = _ssl.PROTO_SSLv3
TLSv1 = _ssl.PROTO_TLSv1
TLSv1_1 = _ssl.PROTO_TLSv1_1
TLSv1_2 = _ssl.PROTO_TLSv1_2
TLSv1_3 = _ssl.PROTO_TLSv1_3
MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED
enum._test_simple_enum(CheckedTLSVersion, TLSVersion)
def test_tlscontenttype(self):
class Checked_TLSContentType(enum.IntEnum):
"""Content types (record layer)
See RFC 8446, section B.1
"""
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
# pseudo content types
HEADER = 0x100
INNER_CONTENT_TYPE = 0x101
enum._test_simple_enum(Checked_TLSContentType, _TLSContentType)
def test_tlsalerttype(self):
class Checked_TLSAlertType(enum.IntEnum):
"""Alert types for TLSContentType.ALERT messages
See RFC 8466, section B.2
"""
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
INAPPROPRIATE_FALLBACK = 86
USER_CANCELED = 90
NO_RENEGOTIATION = 100
MISSING_EXTENSION = 109
UNSUPPORTED_EXTENSION = 110
CERTIFICATE_UNOBTAINABLE = 111
UNRECOGNIZED_NAME = 112
BAD_CERTIFICATE_STATUS_RESPONSE = 113
BAD_CERTIFICATE_HASH_VALUE = 114
UNKNOWN_PSK_IDENTITY = 115
CERTIFICATE_REQUIRED = 116
NO_APPLICATION_PROTOCOL = 120
enum._test_simple_enum(Checked_TLSAlertType, _TLSAlertType)
def test_tlsmessagetype(self):
class Checked_TLSMessageType(enum.IntEnum):
"""Message types (handshake protocol)
See RFC 8446, section B.3
"""
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
NEWSESSION_TICKET = 4
END_OF_EARLY_DATA = 5
HELLO_RETRY_REQUEST = 6
ENCRYPTED_EXTENSIONS = 8
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
CERTIFICATE_URL = 21
CERTIFICATE_STATUS = 22
SUPPLEMENTAL_DATA = 23
KEY_UPDATE = 24
NEXT_PROTO = 67
MESSAGE_HASH = 254
CHANGE_CIPHER_SPEC = 0x0101
enum._test_simple_enum(Checked_TLSMessageType, _TLSMessageType)
def test_sslmethod(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_options(self):
CheckedOptions = enum._old_convert_(
enum.FlagEnum, 'Options', 'ssl',
lambda name: name.startswith('OP_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedOptions, ssl.Options)
def test_alertdescription(self):
CheckedAlertDescription = enum._old_convert_(
enum.IntEnum, 'AlertDescription', 'ssl',
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedAlertDescription, ssl.AlertDescription)
def test_sslerrornumber(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_verifyflags(self):
CheckedVerifyFlags = enum._old_convert_(
enum.FlagEnum, 'VerifyFlags', 'ssl',
lambda name: name.startswith('VERIFY_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyFlags, ssl.VerifyFlags)
def test_verifymode(self):
CheckedVerifyMode = enum._old_convert_(
enum.IntEnum, 'VerifyMode', 'ssl',
lambda name: name.startswith('CERT_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyMode, ssl.VerifyMode)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = threading_helper.threading_setup()
try:
support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
pickletester.py | import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import os_helper
from test.support import (
TestFailed, run_with_locale, no_tracing,
_2G, _4G, bigmemtest
)
from test.support.import_helper import forget
from test.support.os_helper import TESTFN
from test.support import threading_helper
from test.support.warnings_helper import save_restore_warnings_filters
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
def identity(x):
return x
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
# Simple mutable object.
class Object:
pass
# Hashable immutable key object containing unheshable mutable data.
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(pickle.UnpicklingError, b'g0\np0')
self.check_unpickling_error(pickle.UnpicklingError, b'jens:')
self.check_unpickling_error(pickle.UnpicklingError, b'hens:')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_reduce(self):
self.assertEqual(self.loads(b'cbuiltins\nint\n)R.'), 0)
self.check_unpickling_error(TypeError, b'N)R.')
self.check_unpickling_error(TypeError, b'cbuiltins\nint\nNR.')
def test_bad_newobj(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)\x81.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)\x81.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN\x81.')
def test_bad_newobj_ex(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)}\x92.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\n)N\x92.')
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@threading_helper.reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def _test_recursive_list(self, cls, aslist=identity, minprotocol=0):
# List containing itself.
l = cls()
l.append(l)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
def test_recursive_list(self):
self._test_recursive_list(list)
def test_recursive_list_subclass(self):
self._test_recursive_list(MyList, minprotocol=2)
def test_recursive_list_like(self):
self._test_recursive_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_tuple_and_list(self, cls, aslist=identity, minprotocol=0):
# Tuple containing a list containing the original tuple.
t = (cls(),)
t[0].append(t)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = aslist(x[0])
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
# List containing a tuple containing the original list.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(y[0], tuple)
self.assertEqual(len(y[0]), 1)
self.assertIs(y[0][0], x)
def test_recursive_tuple_and_list(self):
self._test_recursive_tuple_and_list(list)
def test_recursive_tuple_and_list_subclass(self):
self._test_recursive_tuple_and_list(MyList, minprotocol=2)
def test_recursive_tuple_and_list_like(self):
self._test_recursive_tuple_and_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_dict(self, cls, asdict=identity, minprotocol=0):
# Dict containing itself.
d = cls()
d[1] = d
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y.keys()), [1])
self.assertIs(y[1], x)
def test_recursive_dict(self):
self._test_recursive_dict(dict)
def test_recursive_dict_subclass(self):
self._test_recursive_dict(MyDict, minprotocol=2)
def test_recursive_dict_like(self):
self._test_recursive_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing the original tuple.
t = (cls(),)
t[0][1] = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(list(y), [1])
self.assertIs(y[1], x)
# Dict containing a tuple containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y), [1])
self.assertIsInstance(y[1], tuple)
self.assertEqual(len(y[1]), 1)
self.assertIs(y[1][0], x)
def test_recursive_tuple_and_dict(self):
self._test_recursive_tuple_and_dict(dict)
def test_recursive_tuple_and_dict_subclass(self):
self._test_recursive_tuple_and_dict(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like(self):
self._test_recursive_tuple_and_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_dict_key(self, cls, asdict=identity, minprotocol=0):
# Dict containing an immutable object (as key) containing the original
# dict.
d = cls()
d[K(d)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y.keys()), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
def test_recursive_dict_key(self):
self._test_recursive_dict_key(dict)
def test_recursive_dict_subclass_key(self):
self._test_recursive_dict_key(MyDict, minprotocol=2)
def test_recursive_dict_like_key(self):
self._test_recursive_dict_key(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict_key(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing an immutable object (as key)
# containing the original tuple.
t = (cls(),)
t[0][K(t)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
# Dict containing an immutable object (as key) containing a tuple
# containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value[0], x)
def test_recursive_tuple_and_dict_key(self):
self._test_recursive_tuple_and_dict_key(dict)
def test_recursive_tuple_and_dict_subclass_key(self):
self._test_recursive_tuple_and_dict_key(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like_key(self):
self._test_recursive_tuple_and_dict_key(REX_seven, asdict=lambda x: x.table)
def test_recursive_set(self):
# Set containing an immutable object containing the original set.
y = set()
y.add(K(y))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
# Immutable object containing a set containing the original object.
y, = y
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, K)
self.assertIsInstance(x.value, set)
self.assertEqual(len(x.value), 1)
self.assertIs(list(x.value)[0], x)
def test_recursive_inst(self):
# Mutable object containing itself.
i = Object()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, Object)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = Object()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertIs(x[0].attr[1], x)
def _test_recursive_collection_and_inst(self, factory):
# Mutable object containing a collection containing the original
# object.
o = Object()
o.attr = factory([o])
t = type(o.attr)
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x.attr, t)
self.assertEqual(len(x.attr), 1)
self.assertIsInstance(list(x.attr)[0], Object)
self.assertIs(list(x.attr)[0], x)
# Collection containing a mutable object containing the original
# collection.
o = o.attr
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x, t)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], Object)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self._test_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self._test_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self._test_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self._test_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self._test_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyFrozenSet)
def test_recursive_inst_state(self):
# Mutable object containing itself.
y = REX_state()
y.state = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIs(x.state, x)
def test_recursive_tuple_and_inst_state(self):
# Tuple containing a mutable object containing the original tuple.
t = (REX_state(),)
t[0].state = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], REX_state)
self.assertIs(x[0].state, x)
# Mutable object containing a tuple containing the object.
t, = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIsInstance(x.state, tuple)
self.assertEqual(len(x.state), 1)
self.assertIs(x.state[0], x)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_bytearray_memoization_bug(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps((b, b), proto)
b1, b2 = self.loads(p)
self.assertIs(b1, b2)
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_overridden_new(self):
# Test that Python class with C implemented __new__ is pickleable
for proto in protocols:
x = MyIntWithNew2(1)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
self.assertIs(type(y), MyIntWithNew2)
self.assertEqual(int(y), 1)
self.assertEqual(y.foo, 42)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
class REX_state(object):
"""This class is used to check the 3th argument (state) of
the reduce protocol.
"""
def __init__(self, state=None):
self.state = state
def __eq__(self, other):
return type(self) is type(other) and self.state == other.state
def __setstate__(self, state):
self.state = state
def __reduce__(self):
return type(self), (), self.state
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class MyIntWithNew(int):
def __new__(cls, value):
raise AssertionError
class MyIntWithNew2(MyIntWithNew):
__new__ = int.__new__
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os_helper.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os_helper.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
os_helper.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicity invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
close_btn.py | import imghdr
import multiprocessing as mp
import os
import random
import uuid
import numpy as np
import requests
import torch
import yaml
from PIL import Image, ImageOps
from torchvision.transforms import transforms
from datasets.utils import get_transform
from tools.progbar import progbar
class CloseButton:
def __init__(self, opt, split="train"):
self.base_dir = None
self.image_dir = None
self.split = split
self.opt = opt
# set out space params.
self.data_info_path = os.path.join(self.opt.data_gen, self.opt.dataset + "_{}_info.pth.tar".format(split))
self.configure = yaml.load(open(os.path.join(self.opt.configure_path, self.opt.dataset + '.yaml'), 'r'))
for k, v in self.configure.items():
# self.opt.__setattr__('in_channel', self.configure['channel'])
self.opt.__setattr__(k, v)
self.btn_path = self.opt.btn_path
if self.opt.dataset == "close_btn":
self.transforms = transforms.Compose([
transforms.ColorJitter()
])
print("close_btn transform setting..")
self.preprocess = transforms.Compose([
self.transforms,
get_transform(opt),
])
if os.path.exists(self.data_info_path) and not self.opt.refresh:
print("=> load dataset information from {}".format(self.data_info_path))
self.data_info = torch.load(self.data_info_path)
else:
print("=> create dataset information....")
self.data_info = {
"blob": self._get_blob()
}
# ==================== training entries ====================
split_base_dir = os.path.join(self.configure["base_dir"], self.configure["split_dir"])
with open(os.path.join(split_base_dir, "{}.txt".format(split)), "r") as f:
self.data_list = f.read().split()
if self.data_list[-1] == "":
self.data_list = self.data_list[:-1]
self.data_info["image_dir"] = os.path.join(self.opt.base_dir, self.opt.image_dir)
self.data_info["data_list"] = self.data_list
# ================= create checkpoint files. to load next time. ======================
torch.save(self.data_info, self.data_info_path)
print("=> saved dataset information from {}".format(self.data_info_path))
for k, v in self.data_info.items():
self.__setattr__(k, v)
print("=> Total num of {}ing pairs: {}".format(self.split, self.__len__()))
@staticmethod
def _get_blob(src=None, coor=None):
return {"Source": src, "Coordinates": coor}
def __len__(self):
return len(self.data_list)
def blend_btn(self, _img, img_size):
w, h = img_size
side = h
random_scale = 1 + (random.randint(0, 100) - 50) / 200 # scale in[0.75, 1.25]
btn_side = int(w * .12 * random_scale)
btn_size = (btn_side, btn_side)
btn = Image.new("L", btn_size, 128)
coor_r = random.randint(0, h - btn_side)
coor_c = random.randint(0, w - btn_side)
btn_alpha = Image.blend(Image.open(open(self.btn_path, "rb")).resize(btn_size).convert(
'RGBA').split()[-1], Image.new("L", btn_size, 0), 0.2)
_img.paste(btn, (coor_c, coor_r), mask=btn_alpha)
return _img, {"Coordinates": np.array([(coor_c + btn_side / 2) / side, (coor_r + btn_side / 2) / side,
btn_side / side, btn_side / side])}
def _get_one(self, idx):
# mapping to path.
img_path = self.data_list[idx]
img_path = os.path.join(self.image_dir, img_path)
image = Image.open(img_path, 'r').convert('RGB').resize((234, 416))
image = ImageOps.expand(image, (0, 0, 416 - 234, 0), fill=(0, 0, 0))
image, label = self.blend_btn(image, (234, 416))
image = self.preprocess(image)
assert image is not None, "{}, image not exists, img_path".format(img_path)
return image, label
def __getitem__(self, idx):
src, cls = self._get_one(idx)
blob = self._get_blob(src, cls)
return blob
def gen_list():
save_path_list = "/Users/tony/Develop/data/close_btn/"
save_path = os.path.join(save_path_list, "img")
gen_status = mp.Manager().dict()
n_procs = 32
queue = mp.Queue()
results = mp.Queue()
def request_download(path, image_url):
r = requests.get(image_url)
ext = imghdr.what(None, r.content)
save_name = uuid.uuid1().hex + "." + (ext if ext is not None else "png")
save_path = os.path.join(path, save_name)
with open(save_path, "wb") as f:
f.write(r.content)
return save_name
def func(queue, result):
while not queue.empty():
try:
url = queue.get()
local_data = request_download(save_path, url)
result.put(local_data)
pid = os.getpid()
gen_status[pid] = 1 if not pid in gen_status.keys() else gen_status[pid] + 1
except Exception as ex:
# print(str(ex))
break
def mp_run(data_list, split, func):
for data in data_list:
queue.put(data)
mp_pools = []
for _ in range(n_procs):
mp_t = mp.Process(target=func, args=(queue, results))
mp_pools.append(mp_t)
mp_t.start()
my_bar = progbar(len(data_list), width=30)
while True:
sum_cnt = sum([gen_status[pid] for pid in gen_status.keys()])
my_bar.update(sum_cnt)
if sum_cnt == len(data_list):
break
with open(os.path.join(save_path_list, "{}.txt".format(split)), "w") as f:
# f.write("\n".join(data_list))
while not results.empty():
f.write(results.get_nowait() + "\n")
split = (os.sys.argv[1])
image_url_list = "../configures/image_urls.txt"
save_path_list = "/Users/tony/Develop/data/close_btn/"
save_path = os.path.join(save_path_list, "img")
with open(image_url_list, "r") as f:
urls = f.read().split()
train_list = urls[:int(len(urls) / 2)]
val_list = urls[int(len(urls) / 2):]
if split == "train":
mp_run(train_list, split, func)
else:
mp_run(val_list, split, func)
def gen_labeled_data():
btn_path = ""
label_base_dir = "/mnt/cephfs_new_wj/lab_ad_idea/maoyiming/data/close_btn/labels"
base_dir = "/mnt/cephfs_new_wj/lab_ad_idea/maoyiming/data/close_btn/image"
gen_status = mp.Manager().dict()
n_procs = 32
queue = mp.Queue()
def mp_run(data_list, func):
for data in data_list:
queue.put(data)
mp_pools = []
for _ in range(n_procs):
mp_t = mp.Process(target=func, args=(queue, label_base_dir))
mp_pools.append(mp_t)
mp_t.start()
my_bar = progbar(len(data_list), width=30)
while True:
sum_cnt = sum([gen_status[pid] for pid in gen_status.keys()])
my_bar.update(sum_cnt)
if sum_cnt == len(data_list):
break
def func(queue, label_base_path):
while not queue.empty():
try:
url = queue.get()
do_one(str(os.path.join(base_dir, url)), label_base_path)
pid = os.getpid()
gen_status[pid] = 1 if not pid in gen_status.keys() else gen_status[pid] + 1
except Exception as ex:
print(str(ex))
break
def blend_btn(_img, area):
w, h = _img.size
random_scale = 1 + (random.randint(0, 100) - 50) / 200 # scale in[0.75, 1.25]
btn_side = int(w * .12 * random_scale)
btn_size = (btn_side, btn_side)
btn = Image.new("L", btn_size, random.randint(240, 255))
coor_r = random.randint(int(area[0] * h), int(area[1] * h - btn_side))
coor_c = random.randint(int(area[2] * w), int(area[3] * w - btn_side))
random_idx = random.randint(0, 21)
btn_path = os.path.join("/mnt/cephfs_new_wj/lab_ad_idea/maoyiming/code/gans/datasets/btn", "{}.png".format(random_idx))
btn_alpha = Image.blend(Image.open(open(btn_path, "rb")).resize(btn_size).convert(
'RGBA').split()[-1], Image.new("L", btn_size, 0), 0.2)
_img.paste(btn, (coor_c, coor_r), mask=btn_alpha)
return _img, [(coor_c + btn_side / 2) / w, (coor_r + btn_side / 2) / h, btn_side / w, btn_side / h]
def do_one(image_path, label_base_path):
print(image_path)
image = Image.open(image_path, 'r').convert('RGB')
image = image.resize((720, 1280))
label_all = []
pose_all = [[0, 0.5, 0, 0.5], [0.5, 1, 0, 0.5], [0, 0.5, 0.5, 1], [0.5, 1, 0.5, 1]]
if random.random() < 0.333:
image = image.point(lambda p: p * 0.80)
for pos in pose_all:
if random.random() < 0.5:
image, coor = blend_btn(image, pos)
label_all.append([0] + coor)
image_blent = image
image_blent.save(image_path.replace("image", "images"))
label_path = os.path.join(label_base_path, image_path.split("/")[-1].split(".")[0] + ".txt")
label_str = ""
if len(label_all) != 0:
label_str = "\n".join([" ".join(map(str, label)) for label in label_all])
# print(label_str)
with open(label_path, "w") as f:
f.write(label_str)
f.close()
path_list = os.listdir(base_dir)
mp_run(path_list, func)
if __name__ == '__main__':
gen_labeled_data()
# gen_list()
# btn_path = "/Users/tony/PycharmProjects/pytorch-train/datasets/btn.png"
# btn_temp = cv2.imread(btn_path, cv2.IMREAD_UNCHANGED)
# btn_temp = cv2.resize(btn_temp, (50, 50))[:, :, -1]
# # [:, :, -1]
#
# test_path = "/Users/tony/PycharmProjects/pytorch-train/datasets/test.jpeg"
# test_out = "/Users/tony/PycharmProjects/pytorch-train/datasets/test_out.jpeg"
#
# img = Image.open(open(test_path, 'rb'), "RGB")
# img.paste(btn_temp, (0, 0))
# img.save(test_out)
#
# print(btn_temp.shape)
# print("done")
# handle_data(train_list,"train")
# handle_data(val_list, "val")
|
main.py | from tkinter.ttk import Progressbar
from tkinter import scrolledtext
from tkinter import filedialog
from tkinter import ttk
from tkinter import *
from methods import *
import methods
import threading
# init window #
window = Tk()
window.title("Whatsend v0.3 - prerelease") #window name
window.resizable(False, False) #block window resizing
tab_control = ttk.Notebook(window) #create tabs inside window
# positioning objects #
#init first tab
tab1 = ttk.Frame(tab_control)
tab_control.add(tab1, text='Main')
#label
lbl1 = Label(tab1, text="Select phone numbers file")
lbl1.grid(column=0, row=0, sticky="w")
#file directory text box
filedir = Entry(tab1, width=30) #text entry
filedir.grid(column=0, row=1) #position of the objects
#choose file button
def choose_file():
window.filename = filedialog.askopenfilename(title = "Seleziona un file .txt", filetypes = (('text files', 'txt'),))
filedir.config(state=NORMAL)
filedir.delete(0, END)
filedir.insert(0, window.filename)
filedir.config(state=DISABLED)
filebtn = Button(tab1, text="Choose file", width=10, command=choose_file)
filebtn.grid(column=1, row=1)
filedir.config(state=DISABLED)
#space
space1 = Label(tab1)
space1.grid(column=0, row=2)
#label
lbl2 = Label(tab1, text="Write the message")
lbl2.grid(column=0, row=3, sticky="w")
#text area
txt = scrolledtext.ScrolledText(tab1, width=50, height=3)
txt.grid(column=0, row=4, columnspan=2)
#space
space2 = Label(tab1)
space2.grid(column=0, row=5)
#start button
def startStop():
if filedir.get() and len(txt.get("1.0", "end-1c")) > 0 and startbtn["text"] == "Start":
startbtn["text"] = "Stop"
x = threading.Thread(target=mainThread, args=(filedir.get(), txt.get("1.0", "end-1c")))
x.start()
methods.stop_threads = False
elif startbtn["text"] == "Stop":
startbtn["text"] = "Start"
methods.stop_threads = True
#todo: kill thread safely
startbtn = Button(tab1, text="Start", width=10, command=startStop)
startbtn.grid(column=0, row=6, columnspan=2)
#space
space3 = Label(tab1)
space3.grid(column=0, row=7)
bar = Progressbar(tab1, length=350)
bar.grid(column=0, row=8, columnspan=2)
bar['value'] = methods.progress
#tab1.update_idletasks()
#init second tab
tab2 = ttk.Frame(tab_control)
tab_control.add(tab2, text='About')
_license = scrolledtext.ScrolledText(tab2, width=50, height=20)
#txt.configure(background='#f1f1f1')
_license.insert(INSERT,
"""
MIT License
Copyright (c) 2021 Giovanni Almirante
""")
_license.grid(column=0, row=4, columnspan=2)
_license.configure(state ='disabled')
tab_control.pack(expand=1, fill='both')
# end of program #
window.mainloop() |
foo.py | # Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for x in range(1000000):
i += 1
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for k in range (1000000):
i -= 1
def main():
# TODO: Something is missing here (needed to print i)
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
weblinks.py | from flask import Flask, request
from threading import Thread
import re
import json
import pymongo
import os
username = os.environ["DB_USERNAME"]
password = os.environ["DB_PASSWORD"]
database = f"mongodb+srv://{username}:{password}@cluster0.buvu1.mongodb.net/database?retryWrites=true&w=majority"
database = pymongo.MongoClient(database)
app = Flask(__name__)
@app.route("/ping")
def ping_weblink():
return "Pong"
@app.route("/update/plot", methods=["POST"])
def update_plot_weblink():
text = re.sub("\u00A7.","",str(request.data, "utf-8"))
text = text.split("✦")
if text[12] != os.environ["SPECIAL_KEY"]:
return "Invalid key"
json = {
"_id" : text[0],
"name" : text[1],
"owner" : text[2],
"node" : text[3],
"tags" : text[4],
"autoClear" : text[5],
"lastActive" : text[6],
"whitelisted" : text[7],
"playerCount" : text[8],
"currentVotes" : text[9],
"barrelLoc" : text[10],
"icon" : text[11]
}
data = database.discStudiosBot.plotData.find_one({"_id":text[0]})
if data != None:
database.discStudiosBot.plotData.delete_one({"_id":text[0]})
database.discStudiosBot.plotData.insert_one(json)
return json
def run():
app.run(host='0.0.0.0', port=8080)
t = Thread(target=run)
t.start() |
daysim_direct_exe.py | import os
import pandas as pd
import math
import multiprocessing
import shutil
import time
import csv
import ntpath
from OCC import BRepGProp, GProp, TopoDS, BRep
from OCC.StlAPI import StlAPI_Reader
from OCCUtils import Topology
from pyliburo import gml3dmodel
from interface2py3d import pyptlist_frm_occface
import pyliburo
from pyliburo import py2radiance
def make_unique(original_list):
unique_list = []
[unique_list.append(obj) for obj in original_list if obj not in unique_list]
return unique_list
def points_from_face(face):
point_list = []
pnt_coord = []
wire_list = Topology.Topo(face).wires()
for wire in wire_list:
edges_list = Topology.Topo(wire).edges()
for edge in edges_list:
vertice_list = Topology.Topo(edge).vertices()
for vertex in vertice_list:
pnt_coord.append(
[BRep.BRep_Tool().Pnt(vertex).X(),
BRep.BRep_Tool().Pnt(vertex).Y(), BRep.BRep_Tool().Pnt(vertex).Z()])
pnt_coord = make_unique(pnt_coord)
for point in pnt_coord:
point_list.append(point)
return point_list
def add_rad_mat(aresults_path, abui, ageometry_table):
file_path = os.path.join(aresults_path, abui + '\\rad\\' + abui +
"_material")
file_name_rad = file_path + ".rad"
file_name_txt = file_path + ".txt"
os.rename(file_name_rad, file_name_rad.replace(".rad", ".txt"))
with open(file_name_txt, 'a') as write_file:
name_int = 0
for geo in ageometry_table.index.values:
mat_name = ageometry_table['mat_name'][geo]
mat_value_R = round(ageometry_table['mat_value_R'][geo],4)
mat_value_G = round(ageometry_table['mat_value_G'][geo],4)
mat_value_B = round(ageometry_table['mat_value_B'][geo],4)
specularity = round(ageometry_table['specularity'][geo],4)
roughness = round(ageometry_table['roughness'][geo],4)
mat_type = ageometry_table['mat_type'][geo]
mat_nr = ageometry_table['mat_nr'][geo]
string = "void" + " " +str(mat_type)+ " " + mat_name + " 0 0" + " " + str(mat_nr) + " " + str(mat_value_R) + " " + str(mat_value_G) + " " + str(mat_value_B) \
+ " " + str(specularity) + " " + str(roughness)
write_file.writelines('\n' + string + '\n')
name_int += 1
write_file.close()
os.rename(file_name_txt, file_name_rad.replace(".txt", ".rad"))
def percentage(task, now, total):
percent = round((float(now)/float(total))*100, 0)
division = 5
number = int(round(percent/division, 0))
bar = number * ">" + (100 / division - number) * "_"
if now == total:
print "\r", str(task), bar, percent, "%",
else:
print "\r", str(task),bar, percent, "%",
def geometry2radiance(arad, ageometry_table, ainput_path):
# parameters for the radiance
# loop over all builings
bcnt = 0
for geo in ageometry_table.index.values:
filepath = os.path.join(ainput_path, geo + ".stl")
geo_solid = TopoDS.TopoDS_Solid()
StlAPI_Reader().Read(geo_solid, str(filepath))
face_list = pyliburo.py3dmodel.fetch.faces_frm_solid(geo_solid)
bf_cnt = 0
for face in face_list:
bface_pts = pyptlist_frm_occface(face)
srfname = "building_srf" + str(bcnt) + str(bf_cnt)
srfmat = ageometry_table['mat_name'][geo]
py2radiance.RadSurface(srfname, bface_pts, srfmat, arad)
bf_cnt += 1
bcnt += 1
print 'building done'
def calc_sensors(aresults_path, abui, ainput_path, axdim, aydim):
print abui
sen_df = []
fps_df = []
# import stl file
filepath = os.path.join(ainput_path, abui + ".stl")
geo_solid = TopoDS.TopoDS_Solid()
StlAPI_Reader().Read(geo_solid, str(filepath))
# calculate geometries properties
props = GProp.GProp_GProps()
BRepGProp.brepgprop_VolumeProperties(geo_solid, props)
# reverse geometry if volume is negative
if props.Mass() < 0:
bui_vol = (-props.Mass())
geo_solid.Reverse()
else:
bui_vol = (props.Mass())
# get all faces from geometry
face_list = pyliburo.py3dmodel.fetch.faces_frm_solid(geo_solid)
fac_int = 0
for face in face_list:
normal = pyliburo.py3dmodel.calculate.face_normal(face)
# calculate pts of each face
fps = points_from_face(face)
fps_df.append([val for sublist in fps for val in sublist])
# calculate sensor points of each face
sensor_srfs, sensor_pts, sensor_dirs = \
gml3dmodel.generate_sensor_surfaces(face, axdim, aydim)
fac_area = pyliburo.py3dmodel.calculate.face_area(face)
# generate dataframe with building, face and sensor ID
sen_int = 0
for sen_dir in sensor_dirs:
orientation = math.copysign(math.acos(normal[1]), normal[0]) * 180 / math.pi
tilt = math.acos(normal[2]) * 180 / math.pi
sen_df.append((fac_int, sen_int, fac_area, fac_area / len(sensor_dirs), sensor_pts[sen_int][0], sensor_pts[sen_int][1],
sensor_pts[sen_int][2], normal[0], normal[1], normal[2], orientation, tilt))
sen_int += 1
fac_int += 1
sen_df = pd.DataFrame(sen_df, columns=['fac_int', 'sen_int', 'fac_area','sen_area', 'sen_x', 'sen_y',
'sen_z', 'sen_dir_x', 'sen_dir_y', 'sen_dir_z', 'orientation', 'tilt'])
sen_df.to_csv(os.path.join(aresults_path, abui + '_' + 'sen_df' + '.csv'), index=None, float_format="%.2f")
fps_df = pd.DataFrame(fps_df, columns=['fp_0_0', 'fp_0_1', 'fp_0_2', 'fp_1_0', 'fp_1_1', 'fp_1_2', 'fp_2_0', 'fp_2_1', 'fp_2_2', ])
fps_df.to_csv(os.path.join(aresults_path, abui + '_' + 'fps_df' + '.csv'), index=None, float_format="%.2f")
def execute_daysim(name, aresults_path, arad, aweatherfile_path, rad_params, ageometry_table, aground_reflectance, atime_step, astart_time, aend_time, acsv_path):
sen_df = pd.read_csv(os.path.join(aresults_path, name + '_' + 'sen_df' + '.csv'))
#sen_df = pd.read_csv(os.path.join(aresults_path, name +'.csv'))
sen = sen_df[['sen_x', 'sen_y', 'sen_z']].values.tolist()
sen_dir = sen_df[['sen_dir_x', 'sen_dir_y', 'sen_dir_z']].values.tolist()
arad.set_sensor_points(sensor_normals=sen_dir, sensor_positions=sen)
arad.create_sensor_input_file()
# generate daysim result folders for all an_cores
daysim_dir = os.path.join(aresults_path, name)
arad.initialise_daysim(daysim_dir)
# transform weather file
arad.execute_epw2wea(aweatherfile_path, aground_reflectance)
print 'wea complete'
#further transform weather file from hourly to minute ds_shortterm
if(atime_step != 60):
execute_ds_shortterm(aweatherfile_path,arad.hea_file,atime_step)
if (acsv_path!=None):
head_epw, tail_epw = ntpath.split(aweatherfile_path)
wfilename_no_extension = tail_epw.replace(".epw", "")
wea_name_time_step = wfilename_no_extension + "_" + str(atime_step) + "min.wea"
measured_data2wea(astart_time, aend_time, os.path.join(arad.daysimdir_wea,wea_name_time_step ), acsv_path, atime_step)
arad.execute_radfiles2daysim()
print 'radfiles2daysim complete'
add_rad_mat(aresults_path, name, ageometry_table)
arad.write_radiance_parameters(rad_params['rad_ab'], rad_params['rad_ad'], rad_params['rad_as'],rad_params['rad_ar'],
rad_params['rad_aa'], rad_params['rad_lr'],rad_params['rad_st'],rad_params['rad_sj'],
rad_params['rad_lw'],rad_params['rad_dj'],rad_params['rad_ds'],rad_params['rad_dr'],
rad_params['rad_dp'])
#os.rename(os.path.join(arad.data_folder_path, abui + ".pts"), os.path.join(daysim_dir, 'pts', "sensor_points.pts"))
arad.execute_gen_dc("w/m2")
print 'gen_dc complete'
arad.execute_ds_illum()
print 'ds_illum complete'
print name, 'done'
def execute_sum(results_path, bui):
res = pd.read_csv(os.path.join(results_path, bui, 'res', bui+'.ill'), sep=' ', header=None)
sum = res.ix[:, 4:].sum(axis=1)
print[bui]
sum.columns = [bui]
sum.to_csv(os.path.join(results_path, bui, 'res', bui+'.csv'), index=None)
def calc_radiation(project_path, geometry_table_name, weatherfile_path, sen_list=None, sensor_geometries_name=None, ground_reflectance=0.2, time_resolution=60, measurement_start=None, measurement_end=None, measurement_path=None):
print 'Daysim direct mode'
# =============================== parameters =============================== #
input_path = os.path.join(project_path, 'input')
results_path = os.path.join(project_path, 'output')
#params
rad_params = {
'rad_n': 2,
'rad_af': 'file',
'rad_ab': 0, #Because in this case only diret-direct irradiation is used
'rad_ad': 512,
'rad_as': 256,
'rad_ar': 128,
'rad_aa': 0.15,
'rad_lr': 8,
'rad_st': 0.15,
'rad_sj': 0.75,
'rad_lw': 0.002,
'rad_dj': 0.7,
'rad_ds': 0.15,
'rad_dr': 3,
'rad_dp': 512,
}
# =============================== Preface =============================== #
rad = py2radiance.Rad(os.path.join(input_path, 'base.rad'), os.path.join(input_path, 'py2radiance_data'))
# =============================== Import =============================== #
geometry_table = pd.read_csv(os.path.join(input_path, geometry_table_name+".csv"), index_col='name')
# =============================== Simulation =============================== #
geometry2radiance(rad, geometry_table, input_path)
print 'geometry2radiance complete'
rad.create_rad_input_file()
# calculate sensor points
if sen_list == None:
xdim = 1.0
ydim = 1.0
sensor_geometries = pd.read_csv(os.path.join(input_path, sensor_geometries_name + '.csv'), index_col='name')
batch_names = sensor_geometries.index.values
pool = multiprocessing.Pool() # use all available cores, otherwise specify the number you want as an argument
for bui in batch_names:
pool.apply_async(calc_sensors, args=(results_path, bui, input_path, xdim, ydim,))
pool.close()
pool.join()
# load existing sensor points
if sensor_geometries_name == None:
batch_names = [sen_list]
sensor_file_path = os.path.join(input_path, sen_list + '.csv')
sensor_file_path_output = os.path.join(results_path, sen_list + '_sen_df.csv')
shutil.copyfile(sensor_file_path, sensor_file_path_output)
# execute daysim
for bui in batch_names:
execute_daysim(bui, results_path, rad, weatherfile_path, rad_params, geometry_table, ground_reflectance, time_resolution, measurement_start, measurement_end, measurement_path)
# process.start()
# processes = []
# for bui in batch_names:
# process = multiprocessing.Process(target=execute_daysim, args=(bui, results_path, rad, weatherfile_path, rad_params, geometry_table, ground_reflectance, time_resolution, measurement_start, measurement_end, measurement_path))
# process.start()
# processes.append(process)
# for process in processes:
# process.join()
# calculate sums of each stl file
# pool = multiprocessing.Pool()
# print batch_names
# for bui in batch_names:
# pool.apply_async(execute_sum, args=(results_path, bui,))
# pool.close()
# pool.join()
def one_hour_one_point(HOY,aoutput_path, afile_name, sensor_point=0):
res = pd.read_csv(os.path.join(aoutput_path, afile_name), sep=' ', header=None)
value = res.ix[HOY-1, sensor_point+4]
return value
def append_result(csv_path, value):
with open(csv_path, 'ab') as results_saved:
writer = csv.writer(results_saved)
writer.writerow([str(value)])
def execute_ds_shortterm(epw_filepath, hea_filepath, time_step):
head_epw, tail_epw = ntpath.split(epw_filepath)
wfilename_no_extension = tail_epw.replace(".epw", "")
wea_name_60 = wfilename_no_extension + "_60min.wea"
wea_name_time_step = wfilename_no_extension + "_" + str(time_step) + "min.wea"
head_hea, tail_hea = ntpath.split(hea_filepath)
with open(hea_filepath, "r") as hea_file_read:
lines = hea_file_read.read()
lines = lines.replace('time_step 60', 'time_step ' + str(time_step))
lines = lines.replace('wea_data_file' + ' ' + os.path.join(head_epw,wea_name_60), 'wea_data_file' + ' ' +os.path.join(os.path.join(head_hea,"wea"),wea_name_60 ))
lines = lines.replace('wea_data_short_file wea\\' + str(wea_name_60),
'wea_data_short_file wea\\' + str(wea_name_time_step))
print 'wea_data_short_file wea\\'
with open(hea_filepath, "w") as hea_file_write:
hea_file_write.write(lines)
hea_file_write.close()
command1 = "ds_shortterm" + " " + hea_filepath
os.system(command1)
def wea_line(start, btime_step):
cumDaysmonth = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365]
hours = 0
hours = cumDaysmonth[start['month']-1]*24
hours = hours + 24*(start['day']-1) #-1 because the day hasnt finished yet
hours = hours + start['hour'] #here the given hour is already over
minutes = hours*60+start['minute']
return minutes/btime_step-1 #python starts with row 0
def measured_data2wea(start_time, end_time, wea_path, csv_path, time_step):
#pay attention with summer time!
#wea file only uses standard time which is winter time
#set summer_time to 1 for data in summertime
summertime = 1
start_time['hour']=start_time['hour']-summertime
line_start = wea_line(start_time, time_step)
line_end = wea_line(end_time, time_step)
with open(wea_path, 'r+') as weatherfile:
with open(csv_path, 'r') as data_file:
data = pd.read_csv(data_file, header=1, names = ['Time', 'Diffuse_horizontal','direct_normal'] )
weatherfile_content = pd.read_csv(weatherfile, header = 5, sep=" ", names=['month', 'day', 'hour','direct irrad', 'indirect irrad'])
line = line_start
# the value is set at line+1 because when the minute x is averaged in the measurement data, this means all
# seconds of hh:x:ss. In a weather file however, if we choose the minute x, this is the data of the xth
# minute, which is one minute earlier
# This allows to use start_time with the normal format (e.g. 17:34 for the minute between 34:00 and 35:00
for rad in data.direct_normal:
weatherfile_content.set_value(line+1 , 'direct irrad', round(rad,0))
line +=1
for rad in data.Diffuse_horizontal:
weatherfile_content.set_value(line+1 , 'indirect irrad', round(0,0)) #here zero because we want to work only with direct radiation
line +=1
with open(wea_path, 'r') as weatherfile:
weatherfile_all = weatherfile.readlines()
with open(wea_path, 'w' ) as weathernew:
for i in range(0,6):
weathernew.write(weatherfile_all[i])
with open(wea_path, 'a' ) as weathernew:
weatherfile_content.to_csv(wea_path, mode='a', sep=" ", header=False, index=False)
|
expo.py | """
GPS DEMONSTRATION
v.1
written by: Manuel Dionne
credit to: the internet
"""
import pyglet
from pyglet.gl import *
from pyglet.window import key
from pyglet.window import mouse
import primitives
import sys
import threading
import socket
from decimal import *
from threading import Thread
from utils import *
data = []
FPS = 60
address = ("", 9090)
config = pyglet.gl.Config(sample_buffers=1, samples=4)
baud = '60'
mark_freq = '20300'
space_freq = '20250'
confidence = '0.4'
calc_factor = Decimal(0.250)
getcontext().prec = 6
class PrimWin(pyglet.window.Window):
def __init__(self):
super(PrimWin, self).__init__(fullscreen=True,config=config, caption='GPS')
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#initializing some variables for certain handlers
self.linesvisible = False
self.circlesvisible = False
self.fpsvisible = False
self.mousedown = False
self.dragable = False
self.coordlabelvisible = False
#debug off
self.debug = False
#primitives initializing
self.cone = primitives.Circle(425,625,width=100,color=(1,0,0,0.5))
self.ctwo = primitives.Circle(925,725,width=100,color=(0,1,0,0.5))
self.cthree = primitives.Circle(825,225,width=100,color=(0,0,1,0.5))
self.lone = primitives.Line((0,0),(100,100),stroke=2,color=(0,0,0,1))
self.ltwo = primitives.Line((0,0),(100,100),stroke=2,color=(0,0,0,1))
self.lthree = primitives.Line((0,0),(100,100),stroke=2,color=(0,0,0,1))
self.ldebug = primitives.Line((0,1065),(1920,1065),stroke=30,color=(0,0,0,0.5))
#self.p = primitives.Pixel(10,10)
#self.a = primitives.Arc(150,150,radius=100,color=(1.,0.,0.,1.),sweep=90,style=GLU_FILL)
#self.P = primitives.Polygon([(0, 0), (50, 200), (80, 200),(60,100),(100,5)],color=(.3,0.2,0.5,.7))
#Labels
self.distlabelone = pyglet.text.Label('distance',font_name='Arial',font_size=20, x=0, y=10, color=(0,0,0,255))
self.distlabeltwo = pyglet.text.Label('distance',font_name='Arial',font_size=20, x=0, y=10, color=(0,0,0,255))
self.distlabelthree = pyglet.text.Label('distance',font_name='Arial',font_size=20, x=0, y=10, color=(0,0,0,255))
self.coordlabel = pyglet.text.Label('coord',font_name='Arial',font_size=20, x=700, y=50, color=(0,0,0,255))
self.debuglabel = pyglet.text.Label('debug',font_name='Lucida Console',font_size=20, x=10, y=1056, color=(255,255,255,255))
#Images
self.satimage = pyglet.resource.image('sat.png')
self.gpsimage = pyglet.resource.image('gps.png')
self.cursorimage = pyglet.resource.image('blank.png')
self.grid = pyglet.resource.image('grid.png')
#cursor
#self.cursor = pyglet.window.ImageMouseCursor(self.cursorimage, 5, 5)
#self.set_mouse_cursor(self.cursor)
#batch and sprites
self.batch = pyglet.graphics.Batch()
self.sprites = [pyglet.sprite.Sprite(self.gpsimage, batch=self.batch),
pyglet.sprite.Sprite(self.satimage, batch=self.batch),
pyglet.sprite.Sprite(self.satimage, batch=self.batch),
pyglet.sprite.Sprite(self.satimage, batch=self.batch)
]
self.gridsprite = pyglet.sprite.Sprite(self.grid)
# Setup debug framerate display:
self.fps_display = pyglet.clock.ClockDisplay()
# Schedule the update of this window, so it will advance in time at the
# defined framerate. If we don't, the window will only update on events
# like mouse motion.
pyglet.clock.schedule_interval(self.update, 1.0/FPS)
def on_draw(self):
# Window event
setBackgroundColor(1,1,1)
self.clear()
#Make the background a grid
self.gridsprite.draw()
#render the circles if it's turned on
if self.circlesvisible:
self.cone.render()
self.ctwo.render()
self.cthree.render()
#render lines if it's turned on
if self.linesvisible:
self.lone.render()
self.ltwo.render()
self.lthree.render()
self.distlabelone.draw()
self.distlabeltwo.draw()
self.distlabelthree.draw()
#render the coords
if self.coordlabelvisible:
self.coordlabel.draw()
#debug box
if self.debug:
self.ldebug.render()
self.debuglabel.draw()
#self.p.render()
#self.a.render()
#self.P.render()
#self.l.render()
self.batch.draw()
#set the default position of the satallites(we will change this later to be dynamic and maybe update their position) <-- made it dynamic :D
if not self.dragable:
if data:
if data[3] == 1:
self.sprites[1].x = int(data[2][0]) - 25
self.sprites[1].y = int(data[2][1]) - 25
self.cone.x = int(data[2][0])
self.cone.y = int(data[2][1])
if data[3] == 2:
self.sprites[2].x = int(data[2][0]) - 25
self.sprites[2].y = int(data[2][1]) - 25
self.ctwo.x = int(data[2][0])
self.ctwo.y = int(data[2][1])
else:
self.sprites[1].x = 400
self.sprites[1].y = 600
self.sprites[2].x = 900
self.sprites[2].y = 700
self.cone.x = 425
self.cone.y = 625
self.ctwo.x = 925
self.ctwo.y = 725
self.sprites[3].x = 800
self.sprites[3].y = 200
if self.fpsvisible:
self.fps_display.draw()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if self.dragable:
if buttons & mouse.LEFT:
if x > self.sprites[0].x and x < (self.sprites[0].x + 1000):
if y > self.sprites[0].y and y < (self.sprites[0].y + 1000):
self.debuglabel.text = "Mouse drag"
self.sprites[0].x = x - 25
self.sprites[0].y = y - 25
def update(self, dt):
# Scheduled event
#set the labels position
self.distlabelone.x = 100
self.distlabeltwo.x = 100
self.distlabelthree.x = 100
self.distlabelone.y = 100
self.distlabeltwo.y = 75
self.distlabelthree.y = 50
#sat 3 is hardcoded
self.distlabelthree.text = "Dist. a SAT3: %s"% str(getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[3].x,self.sprites[3].y))
self.cthree.radius = getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[3].x,self.sprites[3].y)
#packet handling
print "----", self.dragable, data
if not self.dragable:
if data:
print "yes"
if data[3] == 1:
self.cone.radius = int(data[1])
self.distlabelone.text = "Dist. a SAT1: %s"% str(data[1])
if data[3] == 2:
self.ctwo.radius = int(data[1])
self.distlabeltwo.text = "Dist. a SAT2: %s"% str(data[1])
else:
self.distlabelone.text = "Dist. a SAT1: %s"% str(getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[1].x,self.sprites[1].y))
self.distlabeltwo.text = "Dist. a SAT2: %s"% str(getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[2].x,self.sprites[2].y))
#set the size of the circles depending on were the gps is
self.cone.radius = getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[1].x,self.sprites[1].y)
self.ctwo.radius = getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[2].x,self.sprites[2].y)
#set the coords
if not self.dragable:
SAT1circle = [self.sprites[1].x,self.sprites[1].y,self.cone.radius]
SAT2circle = [self.sprites[2].x,self.sprites[2].y,self.ctwo.radius]
else:
SAT1circle = [self.sprites[1].x,self.sprites[1].y,getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[1].x,self.sprites[1].y)]
SAT2circle = [self.sprites[2].x,self.sprites[2].y,getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[2].x,self.sprites[2].y)]
#alone once again :(
SAT3circle = [self.sprites[3].x,self.sprites[3].y,getdistance(self.sprites[0].x,self.sprites[0].y,self.sprites[3].x,self.sprites[3].y)]
#set the a and b points for the lines
self.lone.a2 = (self.sprites[0].x - 25, self.sprites[0].y - 25)
self.ltwo.a2 = (self.sprites[0].x - 25, self.sprites[0].y -25)
self.lthree.a2 = (self.sprites[0].x - 25, self.sprites[0].y - 25)
self.lone.b2 = (self.sprites[1].x - 25, self.sprites[1].y - 25)
self.ltwo.b2 = (self.sprites[2].x - 25, self.sprites[2].y - 25)
self.lthree.b2 = (self.sprites[3].x - 25, self.sprites[3].y - 25)
SAT1_SAT2 = findintersect(SAT1circle,SAT2circle)
#print "sat1 to sat2", SAT1_SAT2
SAT2_SAT3 = findintersect(SAT2circle,SAT3circle)
#print "sat2 to sat3", SAT2_SAT3
#set the coord str to whatever
coords = "N/A"
if(SAT1_SAT2[1] == SAT2_SAT3[1] and SAT1_SAT2[2] == SAT2_SAT3[2]):
coords = "'{0}', '{1}'".format(int(SAT1_SAT2[1]), int(SAT1_SAT2[2]))
if(SAT1_SAT2[3] == SAT2_SAT3[3] and SAT1_SAT2[4] == SAT2_SAT3[4]):
coords = "'{0}', '{1}'".format(int(SAT1_SAT2[3]), int(SAT1_SAT2[4]))
if(SAT1_SAT2[1] == SAT2_SAT3[3] and SAT1_SAT2[2] == SAT2_SAT3[4]):
coords = "'{0}', '{1}'".format(int(SAT1_SAT2[1]), int(SAT1_SAT2[2]))
if(SAT1_SAT2[3] == SAT2_SAT3[1] and SAT1_SAT2[4] == SAT2_SAT3[2]):
coords = "'{0}', '{1}'".format(int(SAT1_SAT2[3]), int(SAT1_SAT2[4]))
#and finally updating the label and setting the gps pos
self.coordlabel.text = "Coord. du GPS: %s" % coords
if not coords == "N/A":
#yeah yeah, I know, code execution vunerability, whatever
exec("coords = [" + coords + "]")
self.sprites[0].x = int(coords[0])
self.sprites[0].y = int(coords[1])
def on_key_press(self, symbol, modifiers):
#keypress event
if symbol == key.P:
self.debuglabel.text = "Screenshot!"
screenshot()
if symbol == key.F3:
if self.debug:
self.debug = False
self.fpsvisible = False
else:
self.debug = True
self.fpsvisible = True
if symbol == key.L:
if self.linesvisible:
self.debuglabel.text = "Hiding lines"
self.linesvisible = False
else:
self.debuglabel.text = "Showing lines"
self.linesvisible = True
if symbol == key.C:
if self.circlesvisible:
self.debuglabel.text = "Hiding circles"
self.circlesvisible = False
else:
self.debuglabel.text = "Showing circles"
self.circlesvisible = True
if symbol == key.D:
if self.dragable:
self.debuglabel.text = "Objects are no longer dragable"
self.dragable = False
else:
self.debuglabel.text = "Objects are now dragable"
self.dragable = True
if symbol == key.EQUAL:
if self.coordlabelvisible:
self.debuglabel.text = "Showing coords"
self.coordlabelvisible = False
else:
self.debuglabel.text = "Hiding coords"
self.coordlabelvisible = True
if symbol == key.ESCAPE:
self.on_close()
#packet handling
def packethandler():
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(address)
message, addr = sock.recvfrom(2048)
sock.close()
print message
try:
#even better! a REMOTE code execution vunerability!
exec(message)
except Exception, e:
print "GOT AN ERROR: ", e
print packet
global data
data = packet
if __name__ == '__main__':
PrimWin()
packetreception = Thread(target = packethandler).start()
window = Thread(target = sys.exit, args = (pyglet.app.run())).start() |
benchmark.py | #!/usr/bin/env python
# Python benchmark for gamq
import time
import socket
import threading
# Global variables
HostAddress = "localhost"
HostPort = 48879
Protocol = ""
AckMessages = False
NumberOfMessages = 0
# Helper function to check if a number is valid
def isNumber(givenObject):
try:
int(givenObject)
return True
except:
return False
def getSocket(protocol):
if protocol == "tcp":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif protocol == "udp":
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
print "Invalid protocol: {}".format(protocol)
exit(-1)
s.connect((HostAddress, HostPort))
return s
def writeThread():
s = getSocket(Protocol)
if AckMessages:
s.sendall("setack on\n")
startTime = time.clock()
for i in range(0, int(NumberOfMessages), 1):
s.sendall("pub abc\n")
s.sendall("{}\n".format(i))
s.sendall(".\r\n")
if AckMessages:
response = s.recv(8)
if response[:6] != "PUBACK":
print "Error whilst publishing {}, got response: {}".format(i, response)
endTime = time.clock()
s.close()
print "Took {} seconds to write {} messages".format((endTime - startTime), NumberOfMessages)
def readThread():
s = getSocket("tcp")
startTime = time.clock()
s.sendall("sub abc\n")
for i in range(0, int(NumberOfMessages), 1):
response = ""
while response[-3:] != ".\r\n":
response += s.recv(1)
response = response.translate(None, ".\r\n")
if int() != int(i):
print "Expected {}, got {}".format(i, response)
endTime = time.clock()
s.close()
print "Took {} seconds to read {} messages".format((endTime - startTime), NumberOfMessages)
def readConfig():
global AckMessages, NumberOfMessages, HostAddress, HostPort, Protocol
# Get benchmark parameters
protocol = raw_input("Protocol to use (tcp/udp): ")
if protocol not in ["tcp", "udp"]:
print "Invalid protocol"
exit(-1)
else:
Protocol = protocol
numberOfMessages = raw_input("Number of messages to send: ")
if not isNumber(numberOfMessages):
print "Invalid number"
exit(-1)
else:
NumberOfMessages = int(numberOfMessages)
ackMessages = raw_input("Ack messages (y/n): ")
AckMessages = (ackMessages == "y")
hostAddress = raw_input("Host to connect to: ")
if hostAddress == "":
print "Defaulting to localhost"
else:
HostAddress = hostAddress
hostPort = raw_input("Port to connect to: ")
if hostPort == "":
print "Defaulting to 48879"
elif isNumber(hostPort):
HostPort = hostPort
else:
print "Invalid number"
exit(-1)
readConfig()
writeThread = threading.Thread(target=writeThread)
readThread = threading.Thread(target=readThread)
readThread.daemon = True
writeThread.daemon = True
writeThread.start()
readThread.start()
while threading.active_count() > 1:
time.sleep(1)
|
nvvl_eval.py | if __name__ == '__main__':
from torch.multiprocessing import set_start_method
set_start_method('spawn', force=True)
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from rbpn import Net as RBPN
from functools import reduce
import numpy as np
from imageio import imsave
import scipy.io as sio
import time
import cv2
import math
import pdb
from rbpn_loader import loader
from rbpn_eval import eval, save_vid
import nvvl
from flownet2.models import FlowNet2
from torch.multiprocessing import Process, Queue, Barrier
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--upscale_factor', type=int, default=4, help="super resolution upscale factor")
parser.add_argument('--testBatchSize', type=int, default=1, help='testing batch size')
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--gpus', default=1, type=int, help='number of gpu')
parser.add_argument('--data_dir', type=str, default='./Vid4')
parser.add_argument('--file_list', type=str, default='foliage.txt')
parser.add_argument('--vid_dir', type=str, default='./Vid4_video/new_city.mp4')
parser.add_argument('--other_dataset', type=bool, default=True, help="use other dataset than vimeo-90k")
parser.add_argument('--future_frame', type=bool, default=True, help="use future frame")
parser.add_argument('--nFrames', type=int, default=7)
parser.add_argument('--model_type', type=str, default='RBPN')
parser.add_argument('--residual', type=bool, default=False)
parser.add_argument('--output', default='Results/', help='Location to save checkpoint models')
parser.add_argument('--model', default='weights/RBPN_4x.pth', help='sr pretrained base model')
## FlowNet specific parser arguments ##
parser.add_argument('--rgb_max', type=float, default=255.0)
parser.add_argument('--fp16', action='store_true')
opt = parser.parse_args()
gpus_list=range(opt.gpus)
print(opt)
cuda = opt.gpu_mode
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
bar_total = 4 # 1 main process + 1 eval + 1 flownet + 1 save_vid
sta_bar = Barrier(bar_total)
fin_bar = Barrier(bar_total)
frame_queue = Queue()
vid_frame_queue = Queue()
print('===> Building model ', opt.model_type)
if opt.model_type == 'RBPN':
model = RBPN(num_channels=3, base_filter=256, feat = 64, num_stages=3, n_resblock=5, nFrames=opt.nFrames, scale_factor=opt.upscale_factor)
if cuda:
model = torch.nn.DataParallel(model, device_ids=gpus_list)
model.load_state_dict(torch.load(opt.model, map_location=lambda storage, loc: storage))
print('Pre-trained SR model is loaded.')
print('===> Building FlowNet model ')
path = './flownet2/ckpt/FlowNet2_checkpoint.pth.tar'
flownet2 = FlowNet2(opt)
pretrained_dict = torch.load(path)['state_dict']
model_dict = flownet2.state_dict()
pretrained_dict = {k:v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
flownet2.load_state_dict(model_dict)
print('Pre-trained FlowNet model is loaded.')
if cuda:
model = model.cuda(gpus_list[0])
flownet2 = flownet2.cuda(gpus_list[0])
eval_p = Process(target=eval, args=(model, flownet2, frame_queue, vid_frame_queue, opt, sta_bar, fin_bar))
loader_p = Process(target=loader, args=(opt.vid_dir, frame_queue, flownet2, 7, sta_bar, fin_bar))
save_vid_p = Process(target=save_vid, args=(vid_frame_queue, sta_bar, fin_bar))
for p in [eval_p, loader_p, save_vid_p]:
p.start()
sta_bar.wait()
fin_bar.wait()
for p in [eval_p, loader_p]:
p.join()
print("ALL PROCESSES ENDING PROPERLY!")
|
refseq_importerServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from refseq_importer.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'refseq_importer'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from refseq_importer.refseq_importerImpl import refseq_importer # noqa @IgnorePep8
impl_refseq_importer = refseq_importer(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'refseq_importer'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_refseq_importer.run_refseq_importer,
name='refseq_importer.run_refseq_importer',
types=[dict])
self.method_authentication['refseq_importer.run_refseq_importer'] = 'required' # noqa
self.rpc_service.add(impl_refseq_importer.run_single_import,
name='refseq_importer.run_single_import',
types=[dict])
self.method_authentication['refseq_importer.run_single_import'] = 'required' # noqa
self.rpc_service.add(impl_refseq_importer.status,
name='refseq_importer.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'refseq_importer ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
zap_curve_auth.py | import os
import tempfile
import threading
import time
from signal import SIGINT, SIGTERM, signal
from agents import Agent, Message
class NotificationBroker(Agent):
def setup(
self,
name=None,
pub_address=None,
sub_address=None,
private_key=None,
client_certificates_path=None,
):
# configure public key auth/encryption if private_key is provided
options = self.curve_server_config(private_key) if private_key else {}
self.create_notification_broker(pub_address, sub_address, options=options)
# start authenticator if client_certificates_path is provided
if client_certificates_path:
self.auth = self.start_authenticator(
domain="*", certificates_path=client_certificates_path
)
class Sender(Agent):
def setup(
self,
name=None,
pub_address=None,
sub_address=None,
private_key=None,
public_key=None,
server_public_key=None,
):
# configure public key auth/encryption if keys are provided
if private_key and public_key and server_public_key:
options = self.curve_client_config(
server_public_key, public_key, private_key
)
else:
options = {}
self.counter = 0
self.pub, self.sub = self.create_notification_client(
pub_address, sub_address, options=options
)
# begin sending forever, add to managed threads for graceful cleanup
t = threading.Thread(target=self.send_forever)
self.threads.append(t)
t.start()
def send_forever(self):
# use exit event to gracefully exit loop and graceful cleanup
while not self.exit_event.is_set():
time.sleep(1)
self.counter += 1
msg = Message.Notification(payload=str(self.counter))
self.log.info(f"publishing: {msg}")
self.pub.send(msg.to_multipart())
class Listener(Agent):
def setup(
self,
name=None,
pub_address=None,
sub_address=None,
private_key=None,
public_key=None,
server_public_key=None,
):
# configure public key auth/encryption if keys are provided
if private_key and public_key and server_public_key:
options = self.curve_client_config(
server_public_key, public_key, private_key
)
else:
options = {}
self.pub, self.sub = self.create_notification_client(
pub_address, sub_address, options=options
)
self.sub.observable.subscribe(lambda x: self.log.info(f"received: {x}"))
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as trusted_keys_path, tempfile.TemporaryDirectory() as untrusted_keys_path:
# create key pairs in corresponding directories
Agent.create_curve_certificates(trusted_keys_path, "server")
Agent.create_curve_certificates(trusted_keys_path, "listener")
Agent.create_curve_certificates(untrusted_keys_path, "listener2")
# load key pairs
server_public_key, server_private_key = Agent.load_curve_certificate(
os.path.join(trusted_keys_path, "server.key_secret")
)
listener_public_key, listener_private_key = Agent.load_curve_certificate(
os.path.join(trusted_keys_path, "listener.key_secret")
)
listener2_public_key, listener2_private_key = Agent.load_curve_certificate(
os.path.join(untrusted_keys_path, "listener2.key_secret")
)
broker = NotificationBroker(
name="broker",
pub_address="tcp://127.0.0.1:5000",
sub_address="tcp://127.0.0.1:5001",
private_key=server_private_key,
client_certificates_path=trusted_keys_path,
)
sender = Sender(
name="sender",
pub_address="tcp://127.0.0.1:5000",
sub_address="tcp://127.0.0.1:5001",
private_key=server_private_key,
public_key=server_public_key,
server_public_key=server_public_key,
)
listener = Listener(
name="listener",
pub_address="tcp://127.0.0.1:5000",
sub_address="tcp://127.0.0.1:5001",
private_key=listener_private_key,
public_key=listener_public_key,
server_public_key=server_public_key,
)
listener2 = Listener(
name="listener2",
pub_address="tcp://127.0.0.1:5000",
sub_address="tcp://127.0.0.1:5001",
private_key=listener2_private_key,
public_key=listener2_public_key,
server_public_key=server_public_key,
)
# override shutdown signals
def shutdown(signum, frame):
listener.shutdown()
listener2.shutdown()
sender.shutdown()
broker.shutdown()
signal(SIGTERM, shutdown)
signal(SIGINT, shutdown)
|
app_test.py | #!/usr/bin/python3
# Start a Qt application, and use an asynchronous thread to "click" on the GUI.
import time
import threading
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QLabel, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QApplication
from picamera2.previews.qt import QGlPicamera2
from picamera2 import Picamera2
def post_callback(request):
label.setText(''.join("{}: {}\n".format(k, v) for k, v in request.get_metadata().items()))
picam2 = Picamera2()
picam2.post_callback = post_callback
picam2.configure(picam2.preview_configuration(main={"size": (800, 600)}))
app = QApplication([])
def on_button_clicked():
button.setEnabled(False)
cfg = picam2.still_configuration()
picam2.switch_mode_and_capture_file(cfg, "test.jpg", wait=False, signal_function=qpicamera2.signal_done)
def capture_done():
button.setEnabled(True)
qpicamera2 = QGlPicamera2(picam2, width=800, height=600, keep_ar=False)
button = QPushButton("Click to capture JPEG")
label = QLabel()
window = QWidget()
qpicamera2.done_signal.connect(capture_done)
button.clicked.connect(on_button_clicked)
label.setFixedWidth(400)
label.setAlignment(QtCore.Qt.AlignTop)
layout_h = QHBoxLayout()
layout_v = QVBoxLayout()
layout_v.addWidget(label)
layout_v.addWidget(button)
layout_h.addWidget(qpicamera2, 80)
layout_h.addLayout(layout_v, 20)
window.setWindowTitle("Qt Picamera2 App")
window.resize(1200, 600)
window.setLayout(layout_h)
picam2.start()
window.show()
def test_func():
# This function can run in another thread and "click" on the GUI.
time.sleep(5)
button.clicked.emit()
time.sleep(5)
button.clicked.emit()
time.sleep(5)
app.quit()
thread = threading.Thread(target=test_func, daemon=True)
thread.start()
app.exec()
|
email.py | from flask_mail import Message
from flask import current_app
from app import mail
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start() |
test_unix_events.py | """Tests for unix_events.py."""
import collections
import contextlib
import errno
import io
import os
import pathlib
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
from unittest import mock
from test import support
from test.support import socket_helper
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import log
from asyncio import unix_events
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
MOCK_ANY = mock.ANY
def close_pipe_transport(transport):
# Don't call transport.close() because the event loop and the selector
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
async def simple_coroutine():
pass
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@mock.patch('asyncio.unix_events.sys')
@mock.patch('asyncio.unix_events.signal')
def test_close_on_finalizing(self, m_signal, m_sys):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 1)
m_sys.is_finalizing.return_value = True
m_signal.signal.reset_mock()
with self.assertWarnsRegex(ResourceWarning,
"skipping signal handlers removal"):
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
self.assertFalse(m_signal.signal.called)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
sock.close()
coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
srv_coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(srv_coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_connection_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
coro = self.loop.create_unix_connection(lambda: None, path)
with self.assertRaises(FileNotFoundError):
# If pathlib.Path wasn't supported, the exception would be
# different.
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_dgram(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server_path_stream_bittype(self):
sock = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with tempfile.NamedTemporaryFile() as file:
fn = file.name
try:
with sock:
sock.bind(fn)
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
finally:
os.unlink(fn)
def test_create_unix_server_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_connection(lambda: None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_connection(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(os, 'sendfile'),
'sendfile is not supported')
class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
self._ready = loop.create_future()
def connection_made(self, transport):
self.started = True
self.transport = transport
self._ready.set_result(None)
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = socket_helper.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((socket_helper.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, (socket_helper.HOST, port)))
self.run_loop(proto._ready)
def cleanup():
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_not_available(self):
sock, proto = self.prepare()
with mock.patch('asyncio.unix_events.os', spec=[]):
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"os[.]sendfile[(][)] is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_a_file(self):
sock, proto = self.prepare()
f = object()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_iobuffer(self):
sock, proto = self.prepare()
f = io.BytesIO()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_regular_file(self):
sock, proto = self.prepare()
f = mock.Mock()
f.fileno.return_value = -1
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_cancel1(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
with contextlib.suppress(asyncio.CancelledError):
self.run_loop(fut)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_cancel2(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_blocking_error(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = mock.Mock()
fut.cancelled.return_value = False
with mock.patch('os.sendfile', side_effect=BlockingIOError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
key = self.loop._selector.get_key(sock)
self.assertIsNotNone(key)
fut.add_done_callback.assert_called_once_with(mock.ANY)
def test_sock_sendfile_os_error_first_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
with mock.patch('os.sendfile', side_effect=OSError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIsInstance(exc, asyncio.SendfileNotAvailableError)
self.assertEqual(0, self.file.tell())
def test_sock_sendfile_os_error_next_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = OSError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
def test_sock_sendfile_exception(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = asyncio.SendfileNotAvailableError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = self.loop.create_future()
tr = self.read_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertIsNone(waiter.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.pause_reading()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_pause_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.pause_reading()
def test_pause_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
tr.pause_reading()
# the second call should do nothing
tr.pause_reading()
def test_resume_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.resume_reading()
def test_resume_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
# the pipe is not paused
# resuming should do nothing
tr.resume_reading()
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = self.loop.create_future()
tr = self.write_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertEqual(None, waiter.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(b''), tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'ta'), tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'previous')
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'previousdata'), tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 3
tr._write_ready()
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'a'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = err = OSError()
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.assertTrue(tr.is_closing())
m_logexc.assert_not_called()
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
# closing the transport twice must not fail
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.is_active)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
def patch(target, wrapper):
return mock.patch(target, wraps=wrapper,
new_callable=mock.Mock)
with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
patch('os.waitpid', self.waitpid) as m_waitpid:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child is running
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (returncode 12)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (signal 3)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
# child 2 still running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 2 terminates (code 108)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (code 78)
# child 2 terminates (signal 5)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
# child terminates before being registered
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register the same child again
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (signal 8)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# unregister the child
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (code 99)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# terminate with unknown status
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
# register children
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
# remove child handler 1
self.assertTrue(self.watcher.remove_child_handler(54))
# remove child handler 2 multiple times
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
# all children terminate
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
# raise an exception
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates
self.running = False
self.add_zombie(58, 4)
# waitpid is called elsewhere
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
# sigchld
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
# here the FastChildWatche enters a deadlock
# (there is no way to prevent it)
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
# register two children
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings, self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(591, 7)
# an unknown child terminates
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
# attach a new loop
old_loop = self.loop
self.loop = self.new_test_loop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove, \
patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
# register 3 children
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
# detach the loop
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
with self.assertWarnsRegex(
RuntimeWarning, 'A loop is being detached'):
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
# child 1 & 2 terminate
self.add_zombie(61, 11)
self.add_zombie(62, -5)
# SIGCHLD was not caught
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
# attach a new loop
self.loop = self.new_test_loop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11) # race condition!
callback2.assert_called_once_with(62, -5) # race condition!
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
# child 3 terminates
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
# register two children
callback1 = mock.Mock()
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(63, 9)
# other child terminates
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(unittest.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_default_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.ThreadedChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
policy.set_child_watcher(asyncio.SafeChildWatcher())
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
# Explicitly setup SafeChildWatcher,
# default ThreadedChildWatcher has no _loop property
watcher = asyncio.SafeChildWatcher()
policy.set_child_watcher(watcher)
watcher.attach_loop(loop)
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
class TestFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop(None)
def test_add_reader_invalid_argument(self):
def assert_raises():
return self.assertRaisesRegex(ValueError, r'Invalid file object')
cb = lambda: None
with assert_raises():
self.loop.add_reader(object(), cb)
with assert_raises():
self.loop.add_writer(object(), cb)
with assert_raises():
self.loop.remove_reader(object())
with assert_raises():
self.loop.remove_writer(object())
def test_add_reader_or_writer_transport_fd(self):
def assert_raises():
return self.assertRaisesRegex(
RuntimeError,
r'File descriptor .* is used by transport')
async def runner():
tr, pr = await self.loop.create_connection(
lambda: asyncio.Protocol(), sock=rsock)
try:
cb = lambda: None
with assert_raises():
self.loop.add_reader(rsock, cb)
with assert_raises():
self.loop.add_reader(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_reader(rsock)
with assert_raises():
self.loop.remove_reader(rsock.fileno())
with assert_raises():
self.loop.add_writer(rsock, cb)
with assert_raises():
self.loop.add_writer(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_writer(rsock)
with assert_raises():
self.loop.remove_writer(rsock.fileno())
finally:
tr.close()
rsock, wsock = socket.socketpair()
try:
self.loop.run_until_complete(runner())
finally:
rsock.close()
wsock.close()
if __name__ == '__main__':
unittest.main()
|
plugin.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
import atexit
import gzip
import json
import multiprocessing as mp
import os
import sys
import tempfile
import threading
import time
from collections import OrderedDict
from queue import Queue
import werkzeug
from tensorboard import errors
from tensorboard.plugins import base_plugin
from werkzeug import wrappers
from . import consts, io, utils
from .profiler import RunLoader
from .run import DistributedRunProfile, Run, RunProfile
logger = utils.get_logger()
class TorchProfilerPlugin(base_plugin.TBPlugin):
"""TensorBoard plugin for Torch Profiler."""
plugin_name = consts.PLUGIN_NAME
headers = [('X-Content-Type-Options', 'nosniff')]
def __init__(self, context):
"""Instantiates TorchProfilerPlugin.
Args:
context: A base_plugin.TBContext instance.
"""
super(TorchProfilerPlugin, self).__init__(context)
start_method = os.getenv('TORCH_PROFILER_START_METHOD')
if start_method:
mp.set_start_method(start_method, force=True)
self.logdir = io.abspath(context.logdir.rstrip('/'))
self._is_active = None
self._is_active_initialized_event = threading.Event()
self._runs = OrderedDict()
self._runs_lock = threading.Lock()
self._cache = io.Cache()
self._queue = Queue()
self._gpu_metrics_file_dict = {}
monitor_runs = threading.Thread(target=self._monitor_runs, name="monitor_runs", daemon=True)
monitor_runs.start()
receive_runs = threading.Thread(target=self._receive_runs, name="receive_runs", daemon=True)
receive_runs.start()
def clean():
logger.debug("starting cleanup...")
self._cache.__exit__(*sys.exc_info())
for temp_file in self._gpu_metrics_file_dict.values():
logger.info("remove temporary file %s with gpu metrics" % temp_file)
os.remove(temp_file)
atexit.register(clean)
def is_active(self):
"""Returns whether there is relevant data for the plugin to process.
"""
self._is_active_initialized_event.wait()
return self._is_active
def get_plugin_apps(self):
return {
"/index.js": self.static_file_route,
"/index.html": self.static_file_route,
"/trace_viewer_full.html": self.static_file_route,
"/trace_embedding.html": self.static_file_route,
"/runs": self.runs_route,
"/views": self.views_route,
"/workers": self.workers_route,
"/spans": self.spans_route,
"/overview": self.overview_route,
"/operation": self.operation_pie_route,
"/operation/table": self.operation_table_route,
"/operation/stack": self.operation_stack_route,
"/kernel": self.kernel_pie_route,
"/kernel/table": self.kernel_table_route,
"/trace": self.trace_route,
"/distributed/gpuinfo": self.dist_gpu_info_route,
"/distributed/overlap": self.comm_overlap_route,
"/distributed/waittime": self.comm_wait_route,
"/distributed/commops": self.comm_ops_route,
"/memory": self.memory_route,
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path="/index.js")
@wrappers.Request.application
def runs_route(self, request):
with self._runs_lock:
names = list(self._runs.keys())
return self.respond_as_json(names)
@wrappers.Request.application
def views_route(self, request):
name = request.args.get("run")
self._validate(run=name)
run = self._get_run(name)
views = run.views
views_list = []
for view in views:
views_list.append(view.display_name)
return self.respond_as_json(views_list)
@wrappers.Request.application
def workers_route(self, request):
name = request.args.get("run")
view = request.args.get("view")
self._validate(run=name, view=view)
run = self._get_run(name)
self._check_run(run, name)
workers = run.get_workers(view)
return self.respond_as_json(run.get_workers(view))
@wrappers.Request.application
def spans_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
self._validate(run=name, worker=worker)
run = self._get_run(name)
self._check_run(run, name)
return self.respond_as_json(run.get_spans(worker))
@wrappers.Request.application
def overview_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
span = request.args.get("span")
self._validate(run=name, worker=worker)
profile = self._get_profile(name, worker, span)
self._check_normal_profile(profile, name, worker)
run = self._get_run(name)
data = profile.overview
is_gpu_used = profile.has_runtime or profile.has_kernel or profile.has_memcpy_or_memset
normal_workers = [worker for worker in run.workers if worker != 'All']
data["environments"] = [{"title": "Number of Worker(s)", "value": str(len(normal_workers))},
{"title": "Device Type", "value": "GPU" if is_gpu_used else "CPU"}]
if len(profile.gpu_ids) > 0:
gpu_metrics_data, gpu_metrics_tooltip = profile.get_gpu_metrics_data_tooltip()
data["gpu_metrics"] = {"title": "GPU Summary",
"data": gpu_metrics_data,
"tooltip": gpu_metrics_tooltip}
return self.respond_as_json(data)
@wrappers.Request.application
def operation_pie_route(self, request):
profile = self._get_profile_for_request(request)
group_by = request.args.get("group_by")
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_pie_by_name_input)
else:
return self.respond_as_json(profile.operation_pie_by_name)
@wrappers.Request.application
def operation_table_route(self, request):
profile = self._get_profile_for_request(request)
group_by = request.args.get("group_by")
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_table_by_name_input)
else:
return self.respond_as_json(profile.operation_table_by_name)
@wrappers.Request.application
def operation_stack_route(self, request):
profile = self._get_profile_for_request(request)
op_name = request.args.get("op_name")
self._validate(op_name=op_name)
group_by = request.args.get("group_by")
input_shape = request.args.get("input_shape")
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_stack_by_name_input[str(op_name)+"###"+str(input_shape)])
else:
return self.respond_as_json(profile.operation_stack_by_name[str(op_name)])
@wrappers.Request.application
def kernel_pie_route(self, request):
profile = self._get_profile_for_request(request)
return self.respond_as_json(profile.kernel_pie)
@wrappers.Request.application
def kernel_table_route(self, request):
profile = self._get_profile_for_request(request)
group_by = request.args.get("group_by")
if group_by == "Kernel":
return self.respond_as_json(profile.kernel_table)
else:
return self.respond_as_json(profile.kernel_op_table)
@wrappers.Request.application
def trace_route(self, request):
profile = self._get_profile_for_request(request)
if not profile.has_kernel:# Pure CPU.
raw_data = self._cache.read(profile.trace_file_path)
if not profile.trace_file_path.endswith('.gz'):
raw_data = gzip.compress(raw_data, 1)
else:
file_with_gpu_metrics = self._gpu_metrics_file_dict.get(profile.trace_file_path)
if file_with_gpu_metrics:
raw_data = io.read(file_with_gpu_metrics)
else:
raw_data = self._cache.read(profile.trace_file_path)
if profile.trace_file_path.endswith('.gz'):
raw_data = gzip.decompress(raw_data)
raw_data = profile.append_gpu_metrics(raw_data)
# write the data to temp file
fp = tempfile.NamedTemporaryFile('w+b', suffix='.json.gz', delete=False)
fp.close()
# Already compressed, no need to gzip.open
with open(fp.name, mode='wb') as file:
file.write(raw_data)
self._gpu_metrics_file_dict[profile.trace_file_path] = fp.name
headers = [('Content-Encoding', 'gzip')]
headers.extend(TorchProfilerPlugin.headers)
return werkzeug.Response(raw_data, content_type="application/json", headers=headers)
@wrappers.Request.application
def dist_gpu_info_route(self, request):
profile = self._get_profile_for_request(request, True)
return self.respond_as_json(profile.gpu_info)
@wrappers.Request.application
def comm_overlap_route(self, request):
profile = self._get_profile_for_request(request, True)
return self.respond_as_json(profile.steps_to_overlap)
@wrappers.Request.application
def comm_wait_route(self, request):
profile = self._get_profile_for_request(request, True)
return self.respond_as_json(profile.steps_to_wait)
@wrappers.Request.application
def comm_ops_route(self, request):
profile = self._get_profile_for_request(request, True)
return self.respond_as_json(profile.comm_ops)
@wrappers.Request.application
def memory_route(self, request):
profile = self._get_profile_for_request(request)
return self.respond_as_json(profile.memory_view)
@wrappers.Request.application
def static_file_route(self, request):
filename = os.path.basename(request.path)
extension = os.path.splitext(filename)[1]
if extension == '.html':
mimetype = 'text/html'
elif extension == '.css':
mimetype = 'text/css'
elif extension == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError:
raise errors.NotFoundError("404 Not Found")
return werkzeug.Response(
contents, content_type=mimetype, headers=TorchProfilerPlugin.headers
)
@staticmethod
def respond_as_json(obj):
content = json.dumps(obj)
return werkzeug.Response(content, content_type="application/json", headers=TorchProfilerPlugin.headers)
def _monitor_runs(self):
logger.info("Monitor runs begin")
try:
touched = set()
while True:
try:
logger.debug("Scan run dir")
run_dirs = self._get_run_dirs()
# Assume no deletion on run directories, trigger async load if find a new run
for run_dir in run_dirs:
# Set _is_active quickly based on file pattern match, don't wait for data loading
if not self._is_active:
self._is_active = True
self._is_active_initialized_event.set()
if run_dir not in touched:
touched.add(run_dir)
logger.info("Find run directory %s", run_dir)
# Use threading to avoid UI stall and reduce data parsing time
t = threading.Thread(target=self._load_run, args=(run_dir,))
t.start()
except Exception as ex:
logger.warning("Failed to scan runs. Exception=%s", ex, exc_info=True)
time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS)
except:
logger.exception("Failed to start monitor_runs")
def _receive_runs(self):
while True:
run = self._queue.get()
if run is None:
continue
logger.info("Add run %s", run.name)
with self._runs_lock:
is_new = run.name not in self._runs
self._runs[run.name] = run
if is_new:
self._runs = OrderedDict(sorted(self._runs.items()))
# Update is_active
if not self._is_active:
self._is_active = True
self._is_active_initialized_event.set()
def _get_run_dirs(self):
"""Scan logdir, find PyTorch Profiler run directories.
A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz].
E.g. there are 2 runs: run1, run2
/run1
/[worker1].pt.trace.json.gz
/[worker2].pt.trace.json.gz
/run2
/[worker1].pt.trace.json
"""
for root, _, files in io.walk(self.logdir):
for file in files:
if utils.is_chrome_trace_file(file):
yield root
break
def _load_run(self, run_dir):
try:
name = self._get_run_name(run_dir)
logger.info("Load run %s", name)
# Currently, assume run data is immutable, so just load once
loader = RunLoader(name, run_dir, self._cache)
run = loader.load()
logger.info("Run %s loaded", name)
self._queue.put(run)
except Exception as ex:
logger.warning("Failed to load run %s. Exception=%s", ex, name, exc_info=True)
def _get_run(self, name) -> Run:
with self._runs_lock:
return self._runs.get(name, None)
def _get_run_name(self, run_dir):
logdir = io.abspath(self.logdir)
if run_dir == logdir:
name = io.basename(run_dir)
else:
name = io.relpath(run_dir, logdir)
return name
def _get_profile_for_request(self, request, distributed=False):
name = request.args.get("run")
span = request.args.get("span")
if distributed:
self._validate(run=name)
profile = self._get_profile(name, 'All', span)
self._check_distributed_profile(profile, name)
else:
worker = request.args.get("worker")
self._validate(run=name, worker=worker)
profile = self._get_profile(name, worker, span)
self._check_normal_profile(profile, name, worker)
return profile
def _get_profile(self, name, worker, span):
run = self._get_run(name)
self._check_run(run, name)
profile = run.get_profile(worker, span)
if profile is None:
raise errors.NotFoundError("could not find the profile for %s/%s " %(name, worker))
return profile
def _check_run(self, run, name):
if run is None:
raise errors.NotFoundError("could not find the run for %s" %(name))
def _check_normal_profile(self, profile, name, worker):
if not isinstance(profile, RunProfile):
raise errors.InvalidArgumentError("Get an unexpected profile type %s for %s/%s" %(type(profile), name, worker))
def _check_distributed_profile(self, profile, name):
if not isinstance(profile, DistributedRunProfile):
raise errors.InvalidArgumentError("Get an unexpected distributed profile type %s for %s/%s" %(type(profile), name))
def _validate(self, **kwargs):
for name,v in kwargs.items():
if v is None:
raise errors.InvalidArgumentError("Must specify %s in request url" %(name))
|
test_dataloader.py | # Owner(s): ["module: dataloader"]
import math
import sys
import errno
import multiprocessing
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import (
ChainDataset,
ConcatDataset,
DataLoader,
DataLoader2,
Dataset,
IterableDataset,
Subset,
TensorDataset,
communication,
_utils
)
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch.utils.data.datapipes.iter import IterableWrapper
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
err_msg = ("psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run.")
if IS_IN_CI:
raise ImportError(err_msg) from None
else:
warnings.warn(err_msg)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill")
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if TEST_CUDA:
dev_name = torch.cuda.get_device_name(torch.cuda.current_device()).lower()
IS_JETSON = 'xavier' in dev_name or 'nano' in dev_name or 'jetson' in dev_name or 'tegra' in dev_name
else:
IS_JETSON = False
if not NO_MULTIPROCESSING_SPAWN:
# We want to use `spawn` if able because some of our tests check that the
# data loader terminiates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method='spawn')
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset():
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), type(0))
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[list(x) for x in random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(1))],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
)
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx[:]])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super(CUDACountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device='cuda')
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super(CountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super(CountingIterableDataset, self).__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super(ErrorTrackingProcess, self).__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
assert self.is_alive(), "can only use print_traces_of_all_threads if the process is alive"
assert not self.disable_stderr, "do not disable stderr if you use print_traces_of_all_threads"
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.sleeped = False
def __getitem__(self, idx):
if not self.sleeped:
time.sleep(self.sleep_sec)
self.sleeped = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
assert size >= num_workers * batch_size
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1, pin_memory=True,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1)
it = iter(dataloader)
for x in it:
assert x.numel() == 0
raise RuntimeError('My Error')
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method='fork')
dataloader = DataLoader(dataset, num_workers=1, worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
next = __next__ # py2 compatibility
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(is_iterable_dataset, use_workers, pin_memory, exit_method,
hold_iter_reference, loader_setup_event, tester_setup_event,
persistent_workers):
num_workers = 2 if use_workers else 0
if exit_method == 'worker_error' or exit_method == 'worker_kill':
assert use_workers is True
if exit_method == 'worker_error':
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(ds, batch_size=1, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == 'loader_error':
raise RuntimeError('Loader error')
elif exit_method == 'loader_kill':
kill_pid(os.getpid())
elif exit_method == 'worker_kill':
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def _test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
assert worker_id == worker_info.id, "worker_init_fn and worker_info should have consistent id"
assert worker_id < worker_info.num_workers, "worker_init_fn and worker_info should have valid id"
assert worker_info.seed == torch.initial_seed(), "worker_init_fn and worker_info should have consistent seed"
dataset = worker_info.dataset
assert isinstance(dataset, TestWorkerInfoDataset), "worker_info should have correct dataset copy"
assert not hasattr(dataset, 'value'), "worker_info should have correct dataset copy"
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
try:
worker_info.a = 3
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
for k in ['id', 'num_workers', 'seed', 'dataset']:
assert "{}=".format(k) in repr(worker_info)
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
assert torch.utils.data.get_worker_info() is None
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=_test_worker_info_init_fn)
it = iter(dataloader)
data = []
for d in it:
data.append(d)
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# _test_worker_info_init_fn
assert d[1] == worker_pids[d[0]]
# get_worker_info returns None in main proc after data loading
assert torch.utils.data.get_worker_info() is None
# main proc dataset was never assigned this attribute
assert not hasattr(dataset, 'value')
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError('Expected AttributeError')
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
class CustomList(list):
pass
class CustomDict(dict):
pass
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoader(TestCase):
def setUp(self):
super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get('persistent_workers', self.persistent_workers)
if persistent_workers and kwargs.get('num_workers', 0) == 0:
persistent_workers = False
kwargs['persistent_workers'] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(ErrorIterableDataset(), num_workers=num_workers)
with self.assertRaisesRegex(RuntimeError, 'Error in __iter__'):
list(iter(loader))
loader = self._get_data_loader(self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn)
with self.assertRaisesRegex(RuntimeError, 'Error in worker_init_fn'):
list(iter(loader))
def test_typing(self):
from typing import List
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[List[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[List[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ('batch_size', 'sampler', 'batch_sampler', 'drop_last', 'dataset'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(ds, num_workers=num_workers, batch_size=None, sampler=sampler, pin_memory=TEST_CUDA)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(self._get_data_loader(self.dataset, num_workers=2, multiprocessing_context=multiprocessing_context))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA and not NO_MULTIPROCESSING_SPAWN:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(target=_test_large_sampler_indices, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'My Error')
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(ValueError, "num_workers option should be non-negative"):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(ValueError, "timeout option should be non-negative"):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive"):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_size=11, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, batch_size=11, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, shuffle=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=3, batch_sampler=batch_sampler)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=torch.utils.data.SequentialSampler(dataset))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
assert num_workers in [0, 1], "invalid test"
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(lambda: next(it), "Should not warn before accessing len(dataloader)")
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(lambda: next(it), "Should not warn before exceeding length")
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length"):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(4)), tuple(range(7)), tuple(range(7, 14)), tuple(range(14, 20))})
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [dataset1 + dataset2, ChainDataset([dataset1, dataset2])]:
fetched = list(self._get_data_loader(chained_dataset, num_workers=num_workers))
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(ChainDataset([dataset1, self.dataset])))
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows and jetson devices don't support sharing cuda tensor; ROCm does not yet fully support IPC
if ctx in ['spawn', 'forkserver'] and TEST_CUDA and not IS_WINDOWS and not IS_JETSON:
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=torch.Generator().manual_seed(42))
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(set(int(batch) for batch in get_dataloader()), set(int(batch) for batch in get_dataloader()))
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True))
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(self.dataset, shuffle=True, num_workers=0, generator=torch.Generator().manual_seed(42)),
lambda: DataLoader(self.dataset, shuffle=True, num_workers=2, generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2, num_workers=4))
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3))
def test_shuffle_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, prefetch_factor=3))
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(self.dataset, replacement=True, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_with_replacement, n)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# raise error when replacement=False and num_samples is not None
self.assertRaises(ValueError, lambda: RandomSampler(self.dataset, num_samples=len(self.dataset)))
self.assertRaises(ValueError, lambda: RandomSampler(self.dataset, num_samples=0))
# raise error when replacement is non-boolean
with self.assertRaisesRegex(TypeError, "replacement should be a boolean value, but got replacement=0"):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(data_set, batch_size=int(num_batches / num_processes), drop_last=True, sampler=s)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import RandomSampler, WeightedRandomSampler, SubsetRandomSampler
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(self.dataset, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: RandomSampler(self.dataset, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: SubsetRandomSampler(range(10), generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
for sampler in (
RandomSampler(self.dataset, num_samples=5, replacement=True),
RandomSampler(self.dataset, replacement=False),
WeightedRandomSampler(weights, num_samples=5, replacement=True),
WeightedRandomSampler(weights, num_samples=5, replacement=False),
SubsetRandomSampler(range(10)),
):
torch.manual_seed(0)
l1 = list(sampler) + list(sampler)
torch.manual_seed(0)
l2 = list(sampler) + list(sampler)
self.assertEqual(l1, l2)
its = (iter(sampler), iter(sampler))
ls = ([], [])
for idx in range(len(sampler)):
for i in range(2):
if idx == 0:
torch.manual_seed(0)
ls[i].append(next(its[i]))
self.assertEqual(ls[0], ls[1])
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(self.dataset, sampler=indices, batch_size=2, **kwargs)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2:i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
((4, 13434589827475259383), (2884386318, 1088094898, 3523808998, 3860348662)),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
((10, 978296274032934101), (1759791917, 3550927336, 1225977135, 1036538043)),
((12, 11868770762134256968), (3974661794, 3331131333, 3630387033, 2885815368)),
((9, 15378787925219019706), (3815056996, 3162224466, 2735102421, 3190253477)),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
((15, 14617792358407278405), (3402479508, 1588702753, 1169536393, 3675067356)),
((9, 17363320784006640087), (957989458, 2518334477, 1421725660, 3086155459)),
((12, 480002904169484764), (2732851467, 1762620729, 4055801988, 1277640511)),
((15, 16803975943592702950), (3479415043, 4022359553, 295994005, 3358606349)),
((9, 11704776406047813044), (1968928009, 710113752, 2442656196, 1587420279)),
((10, 16357891985431864516), (1271733898, 4197047399, 3727213786, 2338547348)),
((2, 17423369006318065007), (544294336, 1911284083, 3299147734, 3231058347)),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
((6, 6269787272229682235), (2548857855, 1216457374, 1012973562, 2999759647))
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True))
def test_error_workers(self):
self._test_error(self._get_data_loader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(self._get_data_loader(self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory))
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
assert i == 10
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@skipIfRocm
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
(r'''There might be ConnectionResetError or leaked semaphore warning '''
r'''(due to dirty process exit), but they are all safe to ignore''')
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for is_iterable_dataset, use_workers, pin_memory, hold_iter_reference in \
itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or NO_MULTIPROCESSING_SPAWN or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
exit_methods = [None, 'loader_error', 'loader_kill', 'worker_error', 'worker_kill']
persistent_workers = self.persistent_workers
else:
exit_methods = [None, 'loader_error', 'loader_kill']
persistent_workers = False
for exit_method in exit_methods:
if exit_method == 'worker_kill':
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
desc.append('use_workers={}'.format(use_workers))
desc.append('pin_memory={}'.format(pin_memory))
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
desc.append('exit_method={}'.format(exit_method))
desc = 'test_proper_exit with ' + ', '.join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(target=_test_proper_exit,
args=(is_iterable_dataset, use_workers, pin_memory,
exit_method, hold_iter_reference,
loader_setup_event, tester_setup_event,
persistent_workers),
disable_stderr=False)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = desc + ': loader process failed to setup within given time'
if loader_p.exception is not None:
fail_msg += ', and had exception {}'.format(loader_p.exception)
elif not loader_p.is_alive():
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
else:
fail_msg += ', and is still alive.'
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = ['pid', 'name', 'cpu_times', 'io_counters',
'memory_full_info', 'num_ctx_switches',
'open_files', 'threads', 'status',
'nice', 'ionice']
if reason is None:
err_msg = desc
else:
err_msg = '{}: {}'.format(desc, reason)
err_msg += '\nLoader info:\n\t'
if loader_psutil_p.is_running():
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += 'exited with code {}'.format(loader_p.exitcode)
if use_workers:
err_msg += '\nWorker(s) info:'
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
if worker_psutil_p.is_running():
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += 'exited with unknown code'
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = 'loader process did not terminate'
if loader_p.exception is not None:
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
else:
fail(fail_reason + ', and had no exception')
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
if len(alive) > 0:
fail('worker process (pid(s) {}) did not terminate'.format(
', '.join(str(p.pid) for p in alive)))
if exit_method is None:
if loader_p.exitcode != 0:
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
else:
if loader_p.exitcode == 0:
fail('loader process had zero exitcode')
if exit_method == 'loader_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Loader error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Worker error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_convert_mapping_keep_type(self):
data = CustomDict({"a": 1, "b": 2})
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_keep_type(self):
data = CustomList([1, 2, 3])
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_dont_keep_type(self):
data = range(2)
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, [0, 1])
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = _utils.collate.default_collate(arr)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.float64)
arr = [True, False]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ['a', 'b', 'c']
self.assertEqual(arr, _utils.collate.default_collate(arr))
def test_default_collate_mapping_keep_type(self):
batch = [CustomDict({"a": 1, "b": 2}), CustomDict({"a": 3, "b": 4})]
collated = _utils.collate.default_collate(batch)
expected = CustomDict({"a": torch.tensor([1, 3]), "b": torch.tensor([2, 4])})
self.assertEqual(collated, expected)
def test_default_collate_sequence_keep_type(self):
batch = [CustomList([1, 2, 3]), CustomList([4, 5, 6])]
collated = _utils.collate.default_collate(batch)
expected = CustomList([
torch.tensor([1, 4]),
torch.tensor([2, 5]),
torch.tensor([3, 6]),
])
self.assertEqual(collated, expected)
def test_default_collate_sequence_dont_keep_type(self):
batch = [range(2), range(2)]
collated = _utils.collate.default_collate(batch)
self.assertEqual(collated, [torch.tensor([0, 0]), torch.tensor([1, 1])])
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
self.assertEqual(arr, _utils.collate.default_collate(arr))
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode='w+', shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode='r', shape=arr.shape)
tensor = _utils.collate.default_collate(list(arr_new))
self.assertTrue((tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item())
def test_default_collate_bad_sequence_type(self):
batch = [['X'], ['X', 'X']]
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch))
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), False)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = 'x'
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), True)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze"):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2(TestCase):
@skipIfNoDill
def test_basics(self):
# TODO(VitalyFedyunin): This test will start breaking if we remove guaranteed order
# of traversing workers
dp = IterableWrapper(list(range(1000)))
dl = DataLoader(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2 = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2_threading = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2, parallelism_mode='thread')
self.assertEqual(list(dl), list(dl2))
self.assertEqual(list(dl), list(dl2_threading))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2_EventLoop(TestCase):
@skipIfNoDill
def test_basic_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
it = list(range(100))
numbers_dp = IterableWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(numbers_dp)
process.start()
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
clean_me(process, req_queue, res_queue)
self.assertEqual(list(range(100)), actual)
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestStringDataLoader(TestCase):
def setUp(self):
super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for (s, n) in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.empty(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
assert self.start == 0
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN, "DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super(TestDataLoaderPersistentWorkers, self).setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(dataset, num_workers=2, pin_memory=pin_memory)
dataset.start = 0
for i in range(10):
for x in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple('Batch', ['data', 'label', 'random_tensor'])
Data = namedtuple('Data', ['positive', 'negative'])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx), random_tensor=torch.randn(3))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestCustomPinFn(TestCase):
def setUp(self):
super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True, num_workers=1)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"Flaky with ASAN, see https://github.com/pytorch/pytorch/issues/65727")
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
timeout=5, worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
def test_ind_worker_queue(self):
max_num_workers = None
if hasattr(os, 'sched_getaffinity'):
try:
max_num_workers = len(os.sched_getaffinity(0))
except Exception:
pass
if max_num_workers is None:
cpu_count = os.cpu_count()
if cpu_count is not None:
# Use half number of CPUs
max_num_workers = cpu_count // 2
if max_num_workers is None:
max_num_workers = 1
for batch_size in (8, 16, 32, 64):
for num_workers in range(0, min(6, max_num_workers)):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers + 1)
class SetAffinityDataset(IterableDataset):
def __iter__(self):
torch.randperm(1)
after = os.sched_getaffinity(0)
return iter(after)
def worker_set_affinity(_):
os.sched_setaffinity(0, [multiprocessing.cpu_count() - 1])
@unittest.skipIf(
not hasattr(os, 'sched_setaffinity'),
"os.sched_setaffinity is not available")
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
dataset = SetAffinityDataset()
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, worker_init_fn=worker_set_affinity)
for sample in dataloader:
self.assertEqual(sample, [multiprocessing.cpu_count() - 1])
class ConvDataset(Dataset):
def __init__(self):
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
if __name__ == '__main__':
run_tests()
|
test_arrow.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for ArrowDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import sys
import socket
import tempfile
import threading
import unittest
_have_pyarrow = not (sys.version_info[0] == 3 and sys.version_info[1] == 4)
if _have_pyarrow:
import pyarrow as pa
from pyarrow.feather import write_feather
_pyarrow_requirement_message = None if _have_pyarrow else "pyarrow is not supported with Python 3.4"
import tensorflow
tensorflow.compat.v1.disable_eager_execution()
from tensorflow import dtypes
from tensorflow import errors
from tensorflow.compat.v1 import data
import tensorflow_io.arrow as arrow_io
from tensorflow import test
TruthData = namedtuple("TruthData", ["data", "output_types", "output_shapes"])
class ArrowDatasetTest(test.TestCase):
@classmethod
def setUpClass(cls):
cls.scalar_data = [
[True, False, True, True],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1.1, 2.2, 3.3, 4.4],
[1.1, 2.2, 3.3, 4.4],
]
cls.scalar_dtypes = (
dtypes.bool,
dtypes.int8,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.uint8,
dtypes.uint16,
dtypes.uint32,
dtypes.uint64,
dtypes.float32,
dtypes.float64
)
cls.scalar_shapes = tuple(
[tensorflow.TensorShape([]) for _ in cls.scalar_dtypes])
cls.list_data = [
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1], [2, 2], [3, 3, 3], [4, 4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1.1, 1.1], [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]],
[[1.1], [2.2, 2.2], [3.3, 3.3, 3.3], [4.4, 4.4, 4.4]],
[[1.1, 1.1], [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]],
]
cls.list_dtypes = (
dtypes.int32,
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float32,
dtypes.float64
)
cls.list_shapes = tuple(
[tensorflow.TensorShape([None]) for _ in cls.list_dtypes])
def run_test_case(self, dataset, truth_data):
iterator = data.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
def is_float(dtype):
return dtype in [dtypes.float16, dtypes.float32, dtypes.float64]
with self.test_session() as sess:
for row in range(len(truth_data.data[0])):
value = sess.run(next_element)
for i, col in enumerate(dataset.columns):
if truth_data.output_shapes[col].ndims == 0:
if is_float(truth_data.output_types[col]):
self.assertAlmostEqual(value[i], truth_data.data[col][row], 4)
else:
self.assertEqual(value[i], truth_data.data[col][row])
elif truth_data.output_shapes[col].ndims == 1:
if is_float(truth_data.output_types[col]):
for j, v in enumerate(value[i]):
self.assertAlmostEqual(v, truth_data.data[col][row][j], 4)
else:
self.assertListEqual(value[i].tolist(), truth_data.data[col][row])
def get_arrow_type(self, dt, is_list):
if dt == dtypes.bool:
arrow_type = pa.bool_()
elif dt == dtypes.int8:
arrow_type = pa.int8()
elif dt == dtypes.int16:
arrow_type = pa.int16()
elif dt == dtypes.int32:
arrow_type = pa.int32()
elif dt == dtypes.int64:
arrow_type = pa.int64()
elif dt == dtypes.uint8:
arrow_type = pa.uint8()
elif dt == dtypes.uint16:
arrow_type = pa.uint16()
elif dt == dtypes.uint32:
arrow_type = pa.uint32()
elif dt == dtypes.uint64:
arrow_type = pa.uint64()
elif dt == dtypes.float16:
arrow_type = pa.float16()
elif dt == dtypes.float32:
arrow_type = pa.float32()
elif dt == dtypes.float64:
arrow_type = pa.float64()
else:
raise TypeError("Unsupported dtype for Arrow" + str(dt))
if is_list:
arrow_type = pa.list_(arrow_type)
return arrow_type
def make_record_batch(self, truth_data):
arrays = [pa.array(truth_data.data[col],
type=self.get_arrow_type(truth_data.output_types[col],
truth_data.output_shapes[col].ndims == 1))
for col in range(len(truth_data.output_types))]
names = ["%s_[%s]" % (i, a.type) for i, a in enumerate(arrays)]
return pa.RecordBatch.from_arrays(arrays, names)
@unittest.skipIf(not _have_pyarrow, _pyarrow_requirement_message)
def testArrowDataset(self):
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes)
batch = self.make_record_batch(truth_data)
# test all columns selected
dataset = arrow_io.ArrowDataset(
batch,
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data)
# test column selection
columns = (1, 3, len(truth_data.output_types) - 1)
dataset = arrow_io.ArrowDataset(
batch,
columns,
tuple([truth_data.output_types[c] for c in columns]),
tuple([truth_data.output_shapes[c] for c in columns]))
self.run_test_case(dataset, truth_data)
# test construction from pd.DataFrame
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(
df, preserve_index=False)
self.run_test_case(dataset, truth_data)
@unittest.skipIf(not _have_pyarrow, _pyarrow_requirement_message)
def testFromPandasPreserveIndex(self):
data = [
[1.0, 2.0, 3.0],
[0.2, 0.4, 0.8],
]
truth_data = TruthData(
data,
(dtypes.float32, dtypes.float32),
(tensorflow.TensorShape([]), tensorflow.TensorShape([])))
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(
df, preserve_index=True)
# Add index column to test data to check results
truth_data_with_index = TruthData(
truth_data.data + [range(len(truth_data.data[0]))],
truth_data.output_types + (dtypes.int64,),
truth_data.output_shapes + (tensorflow.TensorShape([]),))
self.run_test_case(dataset, truth_data_with_index)
# Test preserve_index again, selecting second column only
# NOTE: need to select TruthData because `df` gets selected also
truth_data_selected_with_index = TruthData(
truth_data_with_index.data[1:],
truth_data_with_index.output_types[1:],
truth_data_with_index.output_shapes[1:])
dataset = arrow_io.ArrowDataset.from_pandas(
df, columns=(1,), preserve_index=True)
self.run_test_case(dataset, truth_data_selected_with_index)
@unittest.skipIf(not _have_pyarrow, _pyarrow_requirement_message)
def testArrowFeatherDataset(self):
# Feather files currently do not support columns of list types
truth_data = TruthData(self.scalar_data, self.scalar_dtypes,
self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
# Create a tempfile that is deleted after tests run
with tempfile.NamedTemporaryFile(delete=False) as f:
write_feather(df, f)
# test single file
dataset = arrow_io.ArrowFeatherDataset(
f.name,
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data)
# test multiple files
dataset = arrow_io.ArrowFeatherDataset(
[f.name, f.name],
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes)
truth_data_doubled = TruthData(
[d * 2 for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data_doubled)
# test construction from schema
dataset = arrow_io.ArrowFeatherDataset.from_schema(
f.name, batch.schema)
self.run_test_case(dataset, truth_data)
os.unlink(f.name)
@unittest.skipIf(not _have_pyarrow, _pyarrow_requirement_message)
def testArrowSocketDataset(self):
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes)
batch = self.make_record_batch(truth_data)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
host_addr, port = sock.getsockname()
host = "%s:%s" % (host_addr, port)
def run_server(num_batches):
conn, _ = sock.accept()
outfile = conn.makefile(mode='wb')
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
for _ in range(num_batches):
writer.write_batch(batch)
writer.close()
outfile.close()
conn.close()
sock.close()
# test with multiple batches, construct from schema
num_batches = 2
server = threading.Thread(target=run_server, args=(num_batches,))
server.start()
dataset = arrow_io.ArrowStreamDataset.from_schema(
host, batch.schema)
truth_data_mult = TruthData(
[d * num_batches for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data_mult)
server.join()
@unittest.skipIf(not _have_pyarrow, _pyarrow_requirement_message)
def testBoolArrayType(self):
# NOTE: need to test this seperately because to_pandas fails with
# ArrowNotImplementedError:
# Not implemented type for list in DataFrameBlock: bool
# see https://issues.apache.org/jira/browse/ARROW-4370
truth_data = TruthData(
[[[False, False], [False, True], [True, False], [True, True]]],
(dtypes.bool,),
(tensorflow.TensorShape([None]),))
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset(
batch,
(0,),
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data)
@unittest.skipIf(not _have_pyarrow, _pyarrow_requirement_message)
def testIncorrectColumnType(self):
truth_data = TruthData(self.scalar_data, self.scalar_dtypes,
self.scalar_shapes)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset(
batch,
list(range(len(truth_data.output_types))),
tuple([dtypes.int32 for _ in truth_data.output_types]),
truth_data.output_shapes)
with self.assertRaisesRegexp(errors.OpError, 'Arrow type mismatch'):
self.run_test_case(dataset, truth_data)
if __name__ == "__main__":
test.main()
|
radio.py | import vlc
import threading, json, os
class Radio:
def __init__(self, config):
self.radio_station_url = ''
self.radio_station_name = ''
self.backgroundthread = None
self.config = config
self.language = config["config"]['setup']['language']
self.messages = config["language"]
self.noErrors = True
self.instance = None
self.player = None
self.radio_station_list = None
self.init_radio()
def init_radio(self):
try:
radio_stations_list_file = open(os.path.dirname(__file__) + '/radio_station_' + self.language + '.json')
self.radio_station_list = json.load(radio_stations_list_file)
radio_stations_list_file.close()
except:
self.noErrors = False
self.instance = vlc.Instance('--input-repeat=-1', '--fullscreen')
self.player = self.instance.media_player_new()
def play_radio_station(self, radio_station):
# to prevent that a thread is still runnning stop the radio
self.stop_radio()
if len(self.radio_station_list) == 0:
self.noErrors = False
return self.messages["Radio"]["error_radio_station_list"]
i = 0
while i < len(self.radio_station_list) -1:
radio_station_url = self.radio_station_list[i]['url']
radio_station_name = ' ' + self.radio_station_list[i]['name'].lower()
i += 1
if radio_station_name.find(radio_station.lower()) > 0:
self.radio_station_url = radio_station_url
self.radio_station_name = radio_station_name
self.play_radio()
return self.messages["Radio"]["playing"].format(self.radio_station_name)
return self.messages["Radio"]["error_radio_station_not_found"]
def play_radio(self):
if self.noErrors:
self.backgroundthread = None
self.backgroundthread = threading.Thread(target=self.playingRadio)
self.backgroundthread.start()
def stop_radio(self):
# it might be that there is no radio running
try:
self.player.stop()
except:
print("radio not started yet")
self.radio_station_url = None
self.radio_station_name = None
self.backgroundthread = None
return ''
def playingRadio(self):
if self.radio_station_url:
radio_station_url = self.radio_station_url
else:
radio_station_url = 'http://stream.sunshine-live.de/house/mp3-192'
media = self.instance.media_new(radio_station_url)
self.player.set_media(media)
self.player.play()
|
server.py | import socket
import threading
import json
class ThreadedServer:
def __init__(self, config):
self.host = config["host"]
self.port = config["port"]
self.timeout = config["timeout"]
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
self.sock.bind((self.host, self.port))
print("Starting a server on %s:%s" % (self.host, self.port))
def listen(self):
self.sock.listen()
while True:
client, address = self.sock.accept()
client.settimeout(self.timeout);
threading.Thread(target = self.listenToClient, args = (client, address)).start()
def listenToClient(self, client, address):
size = 1024
while True:
try:
data = client.recv(size)
if data:
print("Data received: %s" % data)
client.send(data)
else:
raise error('Client disconnected')
except:
client.close()
return False
if __name__ == "__main__":
with open("config.cfg") as config_file:
config = json.load(config_file);
ThreadedServer(config).listen()
|
L_CameraTrack.py | # import the necessary packages
import cv2
import imutils
from threading import Thread, Lock
import time
# Async webcam
class VideoCaptureAsync:
def __init__(self, src = 0, width = 960, height = 540) :
self.stream = cv2.VideoCapture(src)
c = cv2.waitKey(10)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self):
if self.started:
print("already started!!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started :
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
def read(self):
with self.read_lock:
frame = None
grabbed = self.grabbed
if grabbed == True:
frame = self.frame.copy()
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
# def isOpen(self):
# # print(cv2.self.isOpened())
# # self.isOpened = False
# # if (self.stream.isOpened()):
# # stream.open()
# # isOpened = True
# # print(cv2.isOpened())
def __exit__(self, exc_type, exc_value, traceback) :
self.stream.release()
# calculate position
def func_CalcXY(frame, objLower, objUpper):
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# Create mask
mask = cv2.inRange(hsv, objLower, objUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# Find contour
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
if len(cnts) > 0:
# find the largest contour in the mask, then use it to compute centroid
c = max(cnts, key=cv2.contourArea)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
#print(center) # uncomment for console position
# return mask # uncomment for mask view
return center # uncomment for position return
# return mask view
def func_Mask(frame, objLower, objUpper):
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# Create mask
mask = cv2.inRange(hsv, objLower, objUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# Find contour
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
if len(cnts) > 0:
# find the largest contour in the mask, then use it to compute centroid
c = max(cnts, key=cv2.contourArea)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# print(center) # uncomment for console position
return mask |
pabotlib.py | # Copyright 2014->future! Mikko Korpela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from robot.errors import RobotError
try:
import configparser # type: ignore
except:
import ConfigParser as configparser # type: ignore
# Support Python 2
import threading
import time
from typing import Any, Dict, List, Optional, Set, Tuple
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.Remote import Remote
from robot.running import TestLibrary
from .robotremoteserver import RobotRemoteServer
PABOT_LAST_LEVEL = "PABOTLASTLEVEL"
PABOT_QUEUE_INDEX = "PABOTQUEUEINDEX"
PABOT_LAST_EXECUTION_IN_POOL = "PABOTISLASTEXECUTIONINPOOL"
PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE = "pabot_min_queue_index_executing"
class _PabotLib(object):
_TAGS_KEY = "tags"
def __init__(self, resourcefile=None): # type: (Optional[str]) -> None
self._locks = {} # type: Dict[str, Tuple[str, int]]
self._owner_to_values = {} # type: Dict[str, Dict[str, object]]
self._parallel_values = {} # type: Dict[str, object]
self._remote_libraries = (
{}
) # type: Dict[str, Tuple[int, RobotRemoteServer, threading.Thread]]
self._values = self._parse_values(resourcefile)
self._added_suites = [] # type: List[Tuple[str, List[str]]]
self._ignored_executions = set() # type: Set[str]
def _parse_values(
self, resourcefile
): # type: (Optional[str]) -> Dict[str, Dict[str, Any]]
vals = {} # type: Dict[str, Dict[str, Any]]
if resourcefile is None:
return vals
conf = configparser.ConfigParser()
conf.read(resourcefile)
for section in conf.sections():
vals[section] = dict(
(k, conf.get(section, k)) for k in conf.options(section)
)
for section in vals:
if self._TAGS_KEY in vals[section]:
vals[section][self._TAGS_KEY] = [
t.strip() for t in vals[section][self._TAGS_KEY].split(",")
]
else:
vals[section][self._TAGS_KEY] = []
return vals
def set_parallel_value_for_key(self, key, value): # type: (str, object) -> None
self._parallel_values[key] = value
def get_parallel_value_for_key(self, key): # type: (str) -> object
return self._parallel_values.get(key, "")
def acquire_lock(self, name, caller_id): # type: (str, str) -> bool
if name in self._locks and caller_id != self._locks[name][0]:
return False
if name not in self._locks:
self._locks[name] = (caller_id, 0)
self._locks[name] = (caller_id, self._locks[name][1] + 1)
return True
def release_lock(self, name, caller_id): # type: (str, str) -> None
assert self._locks[name][0] == caller_id
self._locks[name] = (caller_id, self._locks[name][1] - 1)
if self._locks[name][1] == 0:
del self._locks[name]
def release_locks(self, caller_id):
# type: (str) -> None
for key in list(self._locks.keys()):
if self._locks[key][0] == caller_id:
self._locks[key] = (caller_id, self._locks[key][1] - 1)
if self._locks[key][1] == 0:
del self._locks[key]
def acquire_value_set(self, caller_id, *tags):
if not self._values:
raise AssertionError(
"Value set cannot be aquired. It was never imported or all are disabled. Use --resourcefile option to import."
)
# CAN ONLY RESERVE ONE VALUE SET AT A TIME
if (
caller_id in self._owner_to_values
and self._owner_to_values[caller_id] is not None
):
raise ValueError("Caller has already reserved a value set.")
matching = False
for valueset_key in self._values:
if all(tag in self._values[valueset_key][self._TAGS_KEY] for tag in tags):
matching = True
if self._values[valueset_key] not in self._owner_to_values.values():
self._owner_to_values[caller_id] = self._values[valueset_key]
return (valueset_key, self._values[valueset_key])
if not matching:
raise ValueError("No value set matching given tags exists.")
# This return value is for situations where no set could be reserved
# and the caller needs to wait until one is free.
return (None, None)
def release_value_set(self, caller_id): # type: (str) -> None
if caller_id not in self._owner_to_values:
return
del self._owner_to_values[caller_id]
def disable_value_set(self, setname, caller_id): # type: (str, str) -> None
del self._owner_to_values[caller_id]
del self._values[setname]
def get_value_from_set(self, key, caller_id): # type: (str, str) -> object
if caller_id not in self._owner_to_values:
raise AssertionError("No value set reserved for caller process")
if key not in self._owner_to_values[caller_id]:
raise AssertionError('No value for key "%s"' % key)
return self._owner_to_values[caller_id][key]
def import_shared_library(self, name): # type: (str) -> int
if name in self._remote_libraries:
return self._remote_libraries[name][0]
imported = TestLibrary(name)
server = RobotRemoteServer(
imported.get_instance(), port=0, serve=False, allow_stop=True
)
server_thread = threading.Thread(target=server.serve)
server_thread.start()
time.sleep(1)
port = server.server_port
self._remote_libraries[name] = (port, server, server_thread)
return port
def add_suite_to_execution_queue(
self, suitename, variables
): # type: (str, List[str]) -> None
self._added_suites.append((suitename, variables or []))
def get_added_suites(self): # type: () -> List[Tuple[str, List[str]]]
added_suites = self._added_suites
self._added_suites = []
return added_suites
def ignore_execution(self, caller_id): # type: (str) -> None
self._ignored_executions.add(caller_id)
def is_ignored_execution(self, caller_id): # type: (str) -> bool
return caller_id in self._ignored_executions
def stop_remote_libraries(self):
for name in self._remote_libraries:
self._remote_libraries[name][1].stop_remote_server()
for name in self._remote_libraries:
self._remote_libraries[name][2].join()
class PabotLib(_PabotLib):
__version__ = 0.67
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LISTENER_API_VERSION = 2
_pollingSeconds_SetupTeardown = 0.3
_pollingSeconds = 0.1
_polling_logging = True
def __init__(self):
_PabotLib.__init__(self)
self.__remotelib = None
self.__my_id = None
self._valueset = None
self._setname = None
self.ROBOT_LIBRARY_LISTENER = self
self._position = [] # type: List[str]
self._row_index = 0
def _start(self, name, attributes):
self._position.append(attributes["longname"])
def _end(self, name, attributes):
self._position = (
self._position[:-1]
if len(self._position) > 1
else [attributes["longname"][: -len(name) - 1]]
)
def _start_keyword(self, name, attributes):
if not (self._position):
self._position = ["0", "0." + str(self._row_index)]
else:
self._position.append(self._position[-1] + "." + str(self._row_index))
self._row_index = 0
def _end_keyword(self, name, attributes):
if not (self._position):
self._row_index = 1
self._position = ["0"]
return
splitted = self._position[-1].split(".")
self._row_index = int(splitted[-1]) if len(splitted) > 1 else 0
self._row_index += 1
self._position = (
self._position[:-1]
if len(self._position) > 1
else [str(int(splitted[0]) + 1)]
)
_start_suite = _start_test = _start
_end_suite = _end_test = _end
def _close(self):
try:
self.release_locks()
self.release_value_set()
except RuntimeError as err:
# This is just last line of defence
# Ignore connection errors if library server already closed
logger.console(
"pabot.PabotLib#_close: threw an exception: is --pabotlib flag used? ErrorDetails: {0}".format(
repr(err)
),
stream="stderr",
)
pass
@property
def _path(self):
if len(self._position) < 1:
return ""
return self._position[-1]
@property
def _my_id(self):
if self.__my_id is None:
my_id = BuiltIn().get_variable_value("${CALLER_ID}")
logger.debug("Caller ID is %r" % my_id)
self.__my_id = my_id if my_id else None
return self.__my_id
@property
def _remotelib(self):
if self.__remotelib is None:
uri = BuiltIn().get_variable_value("${PABOTLIBURI}")
logger.debug("PabotLib URI %r" % uri)
self.__remotelib = Remote(uri) if uri else None
return self.__remotelib
def set_polling_seconds(self, secs):
"""
Determine the amount of seconds to wait between checking for free locks. Default: 0.1 (100ms)
"""
PabotLib._pollingSeconds = secs
def set_polling_seconds_setupteardown(self, secs):
"""
Determine the amount of seconds to wait between checking for free locks during setup and teardown. Default: 0.3 (300ms)
"""
PabotLib._pollingSeconds_SetupTeardown = secs
def set_polling_logging(self, enable):
"""
Enable or disable logging inside of polling. Logging inside of polling can be disabled (enable=False) to reduce log file size.
"""
if isinstance(enable, str):
enable = enable.lower() == "true"
PabotLib._polling_logging = bool(enable)
def run_setup_only_once(self, keyword, *args):
"""
Runs a keyword only once at the first possible moment when
an execution has gone through this step.
[https://pabot.org/PabotLib.html?ref=log#run-setup-only-once|Open online docs.]
"""
lock_name = "pabot_setup_%s" % self._path
try:
self.acquire_lock(lock_name)
passed = self.get_parallel_value_for_key(lock_name)
if passed != "":
if passed == "FAILED":
raise AssertionError("Setup failed in other process")
logger.info("Setup skipped in this item")
return
BuiltIn().run_keyword(keyword, *args)
self.set_parallel_value_for_key(lock_name, "PASSED")
except:
self.set_parallel_value_for_key(lock_name, "FAILED")
raise
finally:
self.release_lock(lock_name)
def run_only_once(self, keyword, *args):
"""
Runs a keyword only once in one of the parallel processes. Optional arguments of the keyword needs to be serializeable in order to
create an unique lockname.
Sample request sequence [keyword, keyword 'x', keyword, keyword 5, keyword 'x', keyword 5]
results in execution of [keyword, keyword 'x', keyword 5]
[https://pabot.org/PabotLib.html?ref=log#run-only-once|Open online docs.]
"""
lock_name = "pabot_run_only_once_%s_%s" % (keyword,str(args))
try:
self.acquire_lock(lock_name)
passed = self.get_parallel_value_for_key(lock_name)
if passed != "":
if passed == "FAILED":
raise AssertionError("Keyword failed in other process")
logger.info("Skipped in this item")
return
BuiltIn().run_keyword(keyword, *args)
self.set_parallel_value_for_key(lock_name, "PASSED")
except:
self.set_parallel_value_for_key(lock_name, "FAILED")
raise
finally:
self.release_lock(lock_name)
def run_teardown_only_once(self, keyword, *args):
"""
Runs a keyword only once after all executions have gone throught this step in the last possible moment.
[https://pabot.org/PabotLib.html?ref=log#run-teardown-only-once|Open online docs.]
"""
last_level = BuiltIn().get_variable_value("${%s}" % PABOT_LAST_LEVEL)
if last_level is None:
BuiltIn().run_keyword(keyword, *args)
return
logger.trace('Current path "%s" and last level "%s"' % (self._path, last_level))
if not self._path.startswith(last_level):
logger.info("Teardown skipped in this item")
return
queue_index = int(
BuiltIn().get_variable_value("${%s}" % PABOT_QUEUE_INDEX) or 0
)
logger.trace("Queue index (%d)" % queue_index)
if self._remotelib:
while (
self.get_parallel_value_for_key(
PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE
)
< queue_index
):
if PabotLib._polling_logging:
logger.trace(
self.get_parallel_value_for_key(
PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE
)
)
time.sleep(PabotLib._pollingSeconds_SetupTeardown)
logger.trace("Teardown conditions met. Executing keyword.")
BuiltIn().run_keyword(keyword, *args)
def run_on_last_process(self, keyword):
"""
Runs a keyword only on last process used by pabot.
[https://pabot.org/PabotLib.html?ref=log#run-on-last-process|Open online docs.]
"""
is_last = (
int(
BuiltIn().get_variable_value("${%s}" % PABOT_LAST_EXECUTION_IN_POOL)
or 1
)
== 1
)
if not is_last:
logger.info("Skipped in this item")
return
queue_index = int(
BuiltIn().get_variable_value("${%s}" % PABOT_QUEUE_INDEX) or 0
)
if queue_index > 0 and self._remotelib:
while self.get_parallel_value_for_key("pabot_only_last_executing") != 1:
time.sleep(PabotLib._pollingSeconds_SetupTeardown)
BuiltIn().run_keyword(keyword)
def set_parallel_value_for_key(self, key, value):
"""
Set a globally available key and value that can be accessed
from all the pabot processes.
[https://pabot.org/PabotLib.html?ref=log#set-parallel-value-for-key|Open online docs.]
"""
self._run_with_lib("set_parallel_value_for_key", key, value)
def _run_with_lib(self, keyword, *args):
if self._remotelib:
try:
return self._remotelib.run_keyword(keyword, args, {})
except RuntimeError as err:
logger.error(
"RuntimeError catched in remotelib keyword execution. Maybe there is no connection - is pabot called with --pabotlib option? ErrorDetails: {0}".format(
repr(err)
)
)
self.__remotelib = None
raise
return getattr(_PabotLib, keyword)(self, *args)
def add_suite_to_execution_queue(self, suitename, *variables):
self._run_with_lib("add_suite_to_execution_queue", suitename, variables)
def get_parallel_value_for_key(self, key):
"""
Get the value for a key. If there is no value for the key then empty
string is returned.
[https://pabot.org/PabotLib.html?ref=log#get-parallel-value-for-key|Open online docs.]
"""
return self._run_with_lib("get_parallel_value_for_key", key)
def acquire_lock(self, name):
"""
Wait for a lock with name.
[https://pabot.org/PabotLib.html?ref=log#acquire-lock|Open online docs.]
"""
if self._remotelib:
try:
while not self._remotelib.run_keyword(
"acquire_lock", [name, self._my_id], {}
):
time.sleep(PabotLib._pollingSeconds)
if PabotLib._polling_logging:
logger.debug("waiting for lock to release")
return True
except RuntimeError as err:
logger.error(
"RuntimeError catched in remote acquire_lock execution. Maybe there is no connection - is pabot called with --pabotlib option? ErrorDetails: {0}".format(
repr(err)
)
)
self.__remotelib = None
raise
return _PabotLib.acquire_lock(self, name, self._my_id)
def release_lock(self, name):
"""
Release a lock with name.
[https://pabot.org/PabotLib.html?ref=log#release-lock|Open online docs.]
"""
self._run_with_lib("release_lock", name, self._my_id)
def release_locks(self):
"""
Release all locks called by instance.
[https://pabot.org/PabotLib.html?ref=log#release-locks|Open online docs.]
"""
self._run_with_lib("release_locks", self._my_id)
def acquire_value_set(self, *tags):
"""
Reserve a set of values for this execution.
[https://pabot.org/PabotLib.html?ref=log#acquire-value-set|Open online docs.]
"""
setname = self._acquire_value_set(*tags)
if setname is None:
raise ValueError("Could not aquire a value set")
return setname
def _acquire_value_set(self, *tags):
if self._remotelib:
try:
while True:
self._setname, self._valueset = self._remotelib.run_keyword(
"acquire_value_set", [self._my_id] + list(tags), {}
)
if self._setname:
logger.info('Value set "%s" acquired' % self._setname)
return self._setname
time.sleep(PabotLib._pollingSeconds)
if PabotLib._polling_logging:
logger.debug("waiting for a value set")
except RuntimeError as err:
logger.error(
"RuntimeError catched in remote _acquire_value_set execution. Maybe there is no connection - is pabot called with --pabotlib option? ErrorDetails: {0}".format(
repr(err)
)
)
self.__remotelib = None
raise
self._setname, self._valueset = _PabotLib.acquire_value_set(
self, self._my_id, *tags
)
return self._setname
def get_value_from_set(self, key):
"""
Get a value from previously reserved value set.
[https://pabot.org/PabotLib.html?ref=log#get-value-from-set|Open online docs.]
"""
if self._valueset is None:
raise AssertionError("No value set reserved for caller process")
key = key.lower()
if key not in self._valueset:
raise AssertionError('No value for key "%s"' % key)
return self._valueset[key]
def ignore_execution(self):
self._run_with_lib("ignore_execution", self._my_id)
error = RobotError("Ignore")
error.ROBOT_EXIT_ON_FAILURE = True
error.ROBOT_CONTINUE_ON_FAILURE = False
raise error
def release_value_set(self):
"""
Release a reserved value set so that other executions can use it also.
[https://pabot.org/PabotLib.html?ref=log#release-value-set|Open online docs.]
"""
self._valueset = None
self._setname = None
self._run_with_lib("release_value_set", self._my_id)
def disable_value_set(self):
"""
Disable a reserved value set.
[https://pabot.org/PabotLib.html?ref=log#disable-value-set|Open online docs.]
"""
self._valueset = None
self._run_with_lib("disable_value_set", self._setname, self._my_id)
self._setname = None
# Module import will give a bad error message in log file
# Workaround: expose PabotLib also as pabotlib
pabotlib = PabotLib
if __name__ == "__main__":
import sys
RobotRemoteServer(
_PabotLib(sys.argv[1]), host=sys.argv[2], port=sys.argv[3], allow_stop=True
)
|
standalone.py | """Support for standalone client challenge solvers. """
import collections
import functools
import http.client as http_client
import http.server as BaseHTTPServer
import logging
import socket
import socketserver
import threading
from typing import List
from acme import challenges
from acme import crypto_util
logger = logging.getLogger(__name__)
class TLSServer(socketserver.TCPServer):
"""Generic TLS Server."""
def __init__(self, *args, **kwargs):
self.ipv6 = kwargs.pop("ipv6", False)
if self.ipv6:
self.address_family = socket.AF_INET6
else:
self.address_family = socket.AF_INET
self.certs = kwargs.pop("certs", {})
self.method = kwargs.pop(
"method", crypto_util._DEFAULT_SSL_METHOD)
self.allow_reuse_address = kwargs.pop("allow_reuse_address", True)
socketserver.TCPServer.__init__(self, *args, **kwargs)
def _wrap_sock(self):
self.socket = crypto_util.SSLSocket(
self.socket, cert_selection=self._cert_selection,
alpn_selection=getattr(self, '_alpn_selection', None),
method=self.method)
def _cert_selection(self, connection): # pragma: no cover
"""Callback selecting certificate for connection."""
server_name = connection.get_servername()
return self.certs.get(server_name, None)
def server_bind(self):
self._wrap_sock()
return socketserver.TCPServer.server_bind(self)
class ACMEServerMixin:
"""ACME server common settings mixin."""
# TODO: c.f. #858
server_version = "ACME client standalone challenge solver"
allow_reuse_address = True
class BaseDualNetworkedServers:
"""Base class for a pair of IPv6 and IPv4 servers that tries to do everything
it's asked for both servers, but where failures in one server don't
affect the other.
If two servers are instantiated, they will serve on the same port.
"""
def __init__(self, ServerClass, server_address, *remaining_args, **kwargs):
port = server_address[1]
self.threads: List[threading.Thread] = []
self.servers: List[ACMEServerMixin] = []
# Must try True first.
# Ubuntu, for example, will fail to bind to IPv4 if we've already bound
# to IPv6. But that's ok, since it will accept IPv4 connections on the IPv6
# socket. On the other hand, FreeBSD will successfully bind to IPv4 on the
# same port, which means that server will accept the IPv4 connections.
# If Python is compiled without IPv6, we'll error out but (probably) successfully
# create the IPv4 server.
for ip_version in [True, False]:
try:
kwargs["ipv6"] = ip_version
new_address = (server_address[0],) + (port,) + server_address[2:]
new_args = (new_address,) + remaining_args
server = ServerClass(*new_args, **kwargs)
logger.debug(
"Successfully bound to %s:%s using %s", new_address[0],
new_address[1], "IPv6" if ip_version else "IPv4")
except socket.error:
if self.servers:
# Already bound using IPv6.
logger.debug(
"Certbot wasn't able to bind to %s:%s using %s, this "
"is often expected due to the dual stack nature of "
"IPv6 socket implementations.",
new_address[0], new_address[1],
"IPv6" if ip_version else "IPv4")
else:
logger.debug(
"Failed to bind to %s:%s using %s", new_address[0],
new_address[1], "IPv6" if ip_version else "IPv4")
else:
self.servers.append(server)
# If two servers are set up and port 0 was passed in, ensure we always
# bind to the same port for both servers.
port = server.socket.getsockname()[1]
if not self.servers:
raise socket.error("Could not bind to IPv4 or IPv6.")
def serve_forever(self):
"""Wraps socketserver.TCPServer.serve_forever"""
for server in self.servers:
thread = threading.Thread(
target=server.serve_forever)
thread.start()
self.threads.append(thread)
def getsocknames(self):
"""Wraps socketserver.TCPServer.socket.getsockname"""
return [server.socket.getsockname() for server in self.servers]
def shutdown_and_server_close(self):
"""Wraps socketserver.TCPServer.shutdown, socketserver.TCPServer.server_close, and
threading.Thread.join"""
for server in self.servers:
server.shutdown()
server.server_close()
for thread in self.threads:
thread.join()
self.threads = []
class TLSALPN01Server(TLSServer, ACMEServerMixin):
"""TLSALPN01 Server."""
ACME_TLS_1_PROTOCOL = b"acme-tls/1"
def __init__(self, server_address, certs, challenge_certs, ipv6=False):
TLSServer.__init__(
self, server_address, _BaseRequestHandlerWithLogging, certs=certs,
ipv6=ipv6)
self.challenge_certs = challenge_certs
def _cert_selection(self, connection):
# TODO: We would like to serve challenge cert only if asked for it via
# ALPN. To do this, we need to retrieve the list of protos from client
# hello, but this is currently impossible with openssl [0], and ALPN
# negotiation is done after cert selection.
# Therefore, currently we always return challenge cert, and terminate
# handshake in alpn_selection() if ALPN protos are not what we expect.
# [0] https://github.com/openssl/openssl/issues/4952
server_name = connection.get_servername()
logger.debug("Serving challenge cert for server name %s", server_name)
return self.challenge_certs.get(server_name, None)
def _alpn_selection(self, _connection, alpn_protos):
"""Callback to select alpn protocol."""
if len(alpn_protos) == 1 and alpn_protos[0] == self.ACME_TLS_1_PROTOCOL:
logger.debug("Agreed on %s ALPN", self.ACME_TLS_1_PROTOCOL)
return self.ACME_TLS_1_PROTOCOL
logger.debug("Cannot agree on ALPN proto. Got: %s", str(alpn_protos))
# Explicitly close the connection now, by returning an empty string.
# See https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_alpn_select_callback # pylint: disable=line-too-long
return b""
class HTTPServer(BaseHTTPServer.HTTPServer):
"""Generic HTTP Server."""
def __init__(self, *args, **kwargs):
self.ipv6 = kwargs.pop("ipv6", False)
if self.ipv6:
self.address_family = socket.AF_INET6
else:
self.address_family = socket.AF_INET
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
class HTTP01Server(HTTPServer, ACMEServerMixin):
"""HTTP01 Server."""
def __init__(self, server_address, resources, ipv6=False, timeout=30):
HTTPServer.__init__(
self, server_address, HTTP01RequestHandler.partial_init(
simple_http_resources=resources, timeout=timeout), ipv6=ipv6)
class HTTP01DualNetworkedServers(BaseDualNetworkedServers):
"""HTTP01Server Wrapper. Tries everything for both. Failures for one don't
affect the other."""
def __init__(self, *args, **kwargs):
BaseDualNetworkedServers.__init__(self, HTTP01Server, *args, **kwargs)
class HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP01 challenge handler.
Adheres to the stdlib's `socketserver.BaseRequestHandler` interface.
:ivar set simple_http_resources: A set of `HTTP01Resource`
objects. TODO: better name?
"""
HTTP01Resource = collections.namedtuple(
"HTTP01Resource", "chall response validation")
def __init__(self, *args, **kwargs):
self.simple_http_resources = kwargs.pop("simple_http_resources", set())
self.timeout = kwargs.pop('timeout', 30)
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args): # pylint: disable=redefined-builtin
"""Log arbitrary message."""
logger.debug("%s - - %s", self.client_address[0], format % args)
def handle(self):
"""Handle request."""
self.log_message("Incoming request")
BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
def do_GET(self): # pylint: disable=invalid-name,missing-function-docstring
if self.path == "/":
self.handle_index()
elif self.path.startswith("/" + challenges.HTTP01.URI_ROOT_PATH):
self.handle_simple_http_resource()
else:
self.handle_404()
def handle_index(self):
"""Handle index page."""
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(self.server.server_version.encode())
def handle_404(self):
"""Handler 404 Not Found errors."""
self.send_response(http_client.NOT_FOUND, message="Not Found")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"404")
def handle_simple_http_resource(self):
"""Handle HTTP01 provisioned resources."""
for resource in self.simple_http_resources:
if resource.chall.path == self.path:
self.log_message("Serving HTTP01 with token %r",
resource.chall.encode("token"))
self.send_response(http_client.OK)
self.end_headers()
self.wfile.write(resource.validation.encode())
return
else: # pylint: disable=useless-else-on-loop
self.log_message("No resources to serve")
self.log_message("%s does not correspond to any resource. ignoring",
self.path)
@classmethod
def partial_init(cls, simple_http_resources, timeout):
"""Partially initialize this handler.
This is useful because `socketserver.BaseServer` takes
uninitialized handler and initializes it with the current
request.
"""
return functools.partial(
cls, simple_http_resources=simple_http_resources,
timeout=timeout)
class _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):
"""BaseRequestHandler with logging."""
def log_message(self, format, *args): # pylint: disable=redefined-builtin
"""Log arbitrary message."""
logger.debug("%s - - %s", self.client_address[0], format % args)
def handle(self):
"""Handle request."""
self.log_message("Incoming request")
socketserver.BaseRequestHandler.handle(self)
|
conftest.py | import os
import random
import shutil
import threading
from textwrap import dedent
import pytest
import stbt_rig
try:
# Needed for timeout argument to wait on Python 2.7
import subprocess32 as subprocess
except ImportError:
import subprocess
@pytest.fixture(scope="function", name="tmpdir")
def fixture_tmpdir():
with stbt_rig.named_temporary_directory(
prefix="stbt-rig-selftest-", ignore_errors=True) as d:
origdir = os.path.abspath(os.curdir)
try:
yield d
finally:
os.chdir(origdir)
@pytest.fixture(scope="function", name="test_pack")
def fixture_test_pack(tmpdir): # pylint: disable=unused-argument
setup_test_pack(tmpdir)
os.chdir("%s/test-pack" % tmpdir)
return stbt_rig.TestPack()
def setup_test_pack(tmpdir, portal_url="https://example.stb-tester.com"):
u = os.path.join(tmpdir, "upstream")
def tp(path=""):
return os.path.join(tmpdir, "test-pack", path)
os.mkdir(u)
subprocess.check_call(['git', 'init', '--bare'], cwd=u)
subprocess.check_call(['git', 'clone', 'upstream', 'test-pack'], cwd=tmpdir)
subprocess.check_call(
['git', 'config', 'user.email', 'stbt-rig@stb-tester.com'], cwd=tp())
subprocess.check_call(
['git', 'config', 'user.name', 'stbt-rig tests'], cwd=tp())
with open(tp(".stbt.conf"), "w") as f:
f.write(dedent("""\
[test_pack]
stbt_version = 32
python_version = 3
portal_url = %s
""" % portal_url))
with open(tp(".gitignore"), "w") as f:
f.write("token\n__pycache__\n")
with open(tp("moo"), 'w') as f:
f.write("Hello!\n")
os.mkdir(tp("tests"))
with open(tp("tests/test.py"), 'w') as f:
f.write("def test_my_tests():\n pass\n")
with open(tp("tests/syntax_error.py"), 'wb') as f:
f.write(
b'# codec: utf-8\n\n'
b'def test_its_a_test():\n\n'
b'syntax error\n\n'
b'I am \xf0\x9f\x98\x80')
shutil.copyfile(_find_file("stbt_rig.py"), tp("stbt_rig.py"))
os.chmod(tp("stbt_rig.py"), 0o0755)
subprocess.check_call(
['git', 'add', '.stbt.conf', 'moo', '.gitignore', 'stbt_rig.py',
'tests/syntax_error.py', 'tests/test.py'], cwd=tp())
subprocess.check_call(['git', 'commit', '-m', 'Test'], cwd=tp())
subprocess.check_call(
['git', 'push', '-u', 'origin', 'master:mybranch'], cwd=tp())
class PortalMock(object):
def __init__(self):
import flask
self.app = flask.Flask(__name__)
self.expectations = []
self.thread = None
self.socket = None
self.address = None
self.last_user_agent = None
self.nodes = ["stb-tester-00044b5af1d3", "stb-tester-00044b5aff8a"]
RESULTS = [{
"result": "pass",
"triage_url": ("https://example.stb-tester.com/app/#/result/"
"/mynode/6Pfq/167/2018-10-10T13:13:20"),
"result_id": "/mynode/6Pfq/167/2018-10-10T13:13:20",
"artifacts": {
"combined.log": {
"size": len(b'Downloaded \'combined.log\''),
"md5": "a31802f438fa89d98d77796cadc5be14",
},
"screenshot.png": {
"size": len(b'Downloaded \'screenshot.png\''),
'md5': "4a2ae485dcf5cf9f391cb5ac65128385",
},
}
}]
@self.app.before_request
def _check_auth():
if flask.request.path.startswith('/unauthorised.git'):
# Used for testing git username prompt behaviour
response = flask.make_response("Unauthorized", 401,
{'WWW-Authenticate':'Basic realm="Login Required"'})
return response
if (flask.request.headers.get('Authorization') !=
"token this is my token"):
return ("Forbidden", 403)
self.last_user_agent = flask.request.headers.get('User-Agent')
return None
@self.app.route("/ready")
def _ready():
return "Ready"
@self.app.route('/api/v2/user')
def _get_user():
return flask.jsonify({"login": "tester"})
@self.app.route('/api/v2/jobs/mynode/6Pfq/167')
def _get_job():
return flask.jsonify({'status': 'exited'})
@self.app.route('/api/v2/jobs/mynode/6Pfq/167/await_completion')
def _await_completion():
return "{}", random.choice([200, 202, 202, 202])
@self.app.route('/api/v2/results')
def _get_results():
assert flask.request.args['filter'] == 'job:/mynode/6Pfq/167'
out = [dict(x) for x in RESULTS]
for x in out:
del x['artifacts']
return flask.jsonify(out)
@self.app.route('/api/v2/results.xml')
def _get_results_xml():
assert flask.request.args['filter'] == 'job:/mynode/6Pfq/167'
assert flask.request.args['include_tz'] == 'true'
return PortalMock.RESULTS_XML
@self.app.route('/api/v2/results/<path:result_id>')
def _get_results_details(result_id):
return flask.jsonify([
x for x in RESULTS if x['result_id'] == '/' + result_id][0])
@self.app.route(
'/api/v2/results/mynode/6Pfq/167/2018-10-10T13:13:20/artifacts'
'/<path:path>')
def _get_artifact(path):
return "Downloaded %r" % path, 200
@self.app.route(
"/api/v2/results/mynode/6Pfq/167/2018-10-10T13:13:20/stbt.log")
def _get_stbt_log():
return "The log output\n"
@self.app.route('/api/v2/run_tests', methods=['POST'])
def _post_run_tests():
return flask.jsonify(self.on_run_tests(flask.request.json))
@self.app.route('/api/_private/workgroup')
def _get_private_workgroup():
return flask.jsonify([{"id": node} for node in self.nodes])
@self.app.route("/shutdown", methods=['POST'])
def _shutdown():
func = flask.request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return ""
def __enter__(self):
from werkzeug.serving import make_server
from werkzeug.debug import DebuggedApplication
server = make_server('localhost', 0, DebuggedApplication(self.app))
self.address = server.socket.getsockname()
self.thread = threading.Thread(target=server.serve_forever)
self.thread.daemon = True
self.thread.start()
return self
def __exit__(self, *_):
import requests
requests.post("%s/shutdown" % self.url,
headers={'Authorization': "token this is my token"})
self.thread.join()
self.thread = None
self.socket = None
assert not self.expectations
@property
def url(self):
return "http://%s:%i" % self.address
def expect_run_tests(self, **kwargs):
self.expectations.append(kwargs)
def on_run_tests(self, j):
expected = self.expectations.pop(0)
for k, v in expected.items():
assert j[k] == v
return {'job_uid': '/mynode/6Pfq/167'}
RESULTS_XML = (
'<testsuite disabled="0" errors="0" failures="0" '
'name="test" skipped="0" tests="1" time="3.270815" '
'timestamp="2019-06-12T15:26:35+00:00">'
'<testcase classname="tests/test.py" name="test_my_tests" '
'time="3.270815"/>'
'</testsuite>')
@pytest.fixture()
def portal_mock():
with PortalMock() as m:
yield m
def main():
import time
with PortalMock() as m, stbt_rig.named_temporary_directory(
prefix="stbt-rig-selftest-", ignore_errors=True) as tmpdir:
setup_test_pack(tmpdir, m.url)
print("Test pack at %s/test-pack\nListening on %s" % (tmpdir, m.url))
time.sleep(10000000)
def _find_file(path, root=os.path.dirname(os.path.abspath(__file__))):
return os.path.join(root, path)
if __name__ == "__main__":
main()
|
manager.py | #!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "0"),
# HKG
("UseClusterSpeed", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("IsLdwsCar", "0"),
("LaneChangeEnabled", "0"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("ShowDebugUI", "0"),
("CustomLeadMark", "0"),
("UseSMDPSHarness", "0"),
("hotspot_on_boot", "1"),
("c_wifi_offroad", "1"),
("SSCOD", "0"),
("RVL", "0"),
("FuseWithStockScc", "1"),
("CustomLeadMark", "0")
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
system("am startservice com.neokii.openpilot/.MainService")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
doa 2.py | import pickle
import marshal
import os
import socket
import subprocess
import sys
import tempfile
import threading
class PythonFileRunner(object):
"""A class for running python project files"""
def __init__(self, pycore, file_, args=None, stdin=None,
stdout=None, analyze_data=None):
self.pycore = pycore
self.file = file_
self.analyze_data = analyze_data
self.observers = []
self.args = args
self.stdin = stdin
self.stdout = stdout
def run(self):
"""Execute the process"""
env = dict(os.environ)
file_path = self.file.real_path
path_folders = self.pycore.get_source_folders() + \
self.pycore.get_python_path_folders()
env['PYTHONPATH'] = os.pathsep.join(folder.real_path
for folder in path_folders)
runmod_path = self.pycore.find_module('rope.base.oi.runmod').real_path
self.receiver = None
self._init_data_receiving()
send_info = '-'
if self.receiver:
send_info = self.receiver.get_send_info()
args = [sys.executable, runmod_path, send_info,
self.pycore.project.address, self.file.real_path]
if self.analyze_data is None:
del args[1:4]
if self.args is not None:
args.extend(self.args)
self.process = subprocess.Popen(
executable=sys.executable, args=args, env=env,
cwd=os.path.split(file_path)[0], stdin=self.stdin,
stdout=self.stdout, stderr=self.stdout, close_fds=os.name != 'nt')
def _init_data_receiving(self):
if self.analyze_data is None:
return
# Disabling FIFO data transfer due to blocking when running
# unittests in the GUI.
# XXX: Handle FIFO data transfer for `rope.ui.testview`
if True or os.name == 'nt':
self.receiver = _SocketReceiver()
else:
self.receiver = _FIFOReceiver()
self.receiving_thread = threading.Thread(target=self._receive_information)
self.receiving_thread.setDaemon(True)
self.receiving_thread.start()
def _receive_information(self):
#temp = open('/dev/shm/info', 'w')
for data in self.receiver.receive_data():
self.analyze_data(data)
#temp.write(str(data) + '\n')
#temp.close()
for observer in self.observers:
observer()
def wait_process(self):
"""Wait for the process to finish"""
self.process.wait()
if self.analyze_data:
self.receiving_thread.join()
def kill_process(self):
"""Stop the process"""
if self.process.poll() is not None:
return
try:
if hasattr(self.process, 'terminate'):
self.process.terminate()
elif os.name != 'nt':
os.kill(self.process.pid, 9)
else:
import ctypes
handle = int(self.process._handle)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
except OSError:
pass
def add_finishing_observer(self, observer):
"""Notify this observer when execution finishes"""
self.observers.append(observer)
class _MessageReceiver(object):
def receive_data(self):
pass
def get_send_info(self):
pass
class _SocketReceiver(_MessageReceiver):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data_port = 3037
while self.data_port < 4000:
try:
self.server_socket.bind(('', self.data_port))
break
except socket.error as e:
self.data_port += 1
self.server_socket.listen(1)
def get_send_info(self):
return str(self.data_port)
def receive_data(self):
conn, addr = self.server_socket.accept()
self.server_socket.close()
my_file = conn.makefile('rb')
while True:
try:
yield pickle.load(my_file)
except EOFError:
break
my_file.close()
conn.close()
class _FIFOReceiver(_MessageReceiver):
def __init__(self):
# XXX: this is insecure and might cause race conditions
self.file_name = self._get_file_name()
os.mkfifo(self.file_name)
def _get_file_name(self):
prefix = tempfile.gettempdir() + '/__rope_'
i = 0
while os.path.exists(prefix + str(i).rjust(4, '0')):
i += 1
return prefix + str(i).rjust(4, '0')
def get_send_info(self):
return self.file_name
def receive_data(self):
my_file = open(self.file_name, 'rb')
while True:
try:
yield marshal.load(my_file)
except EOFError:
break
my_file.close()
os.remove(self.file_name)
|
server.py | import threading
import server_logic
# from logic import constrictor
# from logic import royale
# from logic import squad
# from logic import standard
# from logic.wrapped import wrapped
from logic.human.wrapped import wrapped
from threading import Thread
from classes.GameData import GameData
from flask import request
from flask import Flask
import time
import logging
import os
from dotenv import load_dotenv
load_dotenv()
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
app = Flask(__name__)
@app.get("/")
def handle_info():
"""
This function is called when you register your Battlesnake on play.battlesnake.com
See https://docs.battlesnake.com/guides/getting-started#step-4-register-your-battlesnake
It controls your Battlesnake appearance and author permissions.
For customization options, see https://docs.battlesnake.com/references/personalization
TIP: If you open your Battlesnake URL in browser you should see this data.
"""
print("INFO")
return {
"apiversion": "1",
"author": "", # TODO: Your Battlesnake Username
"color": "#ff00ff", # TODO: Personalize
"head": "default", # TODO: Personalize
"tail": "default", # TODO: Personalize
}
@app.post("/start")
def handle_start():
"""
This function is called everytime your snake is entered into a game.
request.json contains information about the game that's about to be played.
"""
data = request.get_json()
gamedata = GameData(data)
# prepare wrapped game
# wrapped.prepare(gamedata)
# gamedata.get_f
print(f"{data['game']['id']} START")
return "ok"
@app.post("/move")
def handle_move():
"""
This function is called on every turn of a game. It's how your snake decides where to move.
Valid moves are "up", "down", "left", or "right".
"""
print("start move prediction")
data = request.get_json()
gamedata = GameData(data)
# if gamedata.is_royale_mode():
# move = royale.handle_move(gamedata)
# elif gamedata.is_constrictor_mode():
# move = constrictor.handle_move(gamedata)
# elif gamedata.is_wrapped_mode():
# move = wrapped.handle_move(gamedata)
# elif gamedata.is_squad_mode():
# move = squad.handle_move(gamedata)
# else:
# move = standard.handle_move(gamedata)
start = time.time()
# move = wrapped.handle_move(gamedata)
# print(f"move turn : {gamedata.get_turn()} to {move}")
# print(f"execution duration : {end - start}")
# start = time.time()
move = wrapped.handle_move(gamedata)
end = time.time()
print(f"move turn : {gamedata.get_turn()} to {move}")
print(f"execution duration : {end - start}")
print("-----------")
return {"move": move}
@app.post("/end")
def end():
"""
This function is called when a game your snake was in ends.
It's purely for informational purposes, you don't have to make any decisions here.
"""
data = request.get_json()
gamedata = GameData(data)
# wrapped.handle_end(gamedata)
# handle end (e.g. wrapped.handle_end(gamedata))
print(f"{data['game']['id']} END")
print("----------------------------")
print("----------------------------")
return "ok"
@app.after_request
def identify_server(response):
response.headers["Server"] = "BattlesnakeOfficial/starter-snake-python"
return response
def keep_alive():
print("start presistent mode")
server = Thread(target=run)
server.start()
def run():
app.run(host="0.0.0.0", port=int(os.environ.get("PORT", "8080")))
if __name__ == "__main__":
logging.getLogger("werkzeug").setLevel(logging.ERROR)
print("Starting Battlesnake Server...")
is_local = os.getenv('IS_LOCAL') == "True"
if is_local:
port = int(os.environ.get("PORT", "8080"))
app.run(host="0.0.0.0", port=port, debug=True)
else:
keep_alive()
|
other.py | #!/usr/bin/env python2.7
from flask import Flask, render_template, send_file, redirect
from flask.ext.socketio import SocketIO, send, emit
import threading
import time
import os, shutil, re
from lib import *
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
@socketio.on('connect', namespace='/mir')
def handle_message():
log.logger.debug('user connected')
#socketio.emit('newnumber', {'number': 'online'}, namespace='/mir')
@socketio.on('disconnect', namespace='/mir')
def handle_disconnect():
log.logger.debug('user diconnected')
def get_query_status():
status = mq.get('status:query',q)
if status is None:
return 'query process not running!'
else:
return status
def get_master_status():
status = mq.get('status:master_file',q)
if status is None:
return 'master_file process not running!'
else:
return status
def check_downloads():
status = mq.qsize('data:zip',q)
if status < 1:
return 'no downloads ready'
else:
return "<a href='/download'>download ready.</a>"
def cleanup(master_file):
master_dir = os.path.dirname(master_file)
keep = master_file.split("/")[-1]
dirlist = os.listdir(master_dir)
dirlist.remove(keep)
if len(dirlist) > 0:
for f in dirlist:
fl = os.path.join(master_dir,f)
log.logger.debug("cleanup of %s" % fl )
if os.path.isfile(fl):
os.remove(fl)
else:
shutil.rmtree(fl)
def check_errors():
error_list = mq.pop('error:query',q)
if not error_list is None:
socketio.emit('errors', {'data': "</br>".join(error_list.split(","))}, namespace='/mir')
def run_download():
if mq.qsize('data:zip',q) < 1:
return render_template('download.html')
else:
filelink = mq.pop('data:zip',q)
log.logger.debug("found file %s" % filelink)
cwd = os.path.dirname(os.path.realpath("__file__"))
fullpath = os.path.join(cwd,filelink)
cleanup(fullpath)
return send_file(fullpath,
mimetype='application/zip',
attachment_filename=fullpath.split("/")[-1],
as_attachment=True)
def start_query_process():
if os.system("ps aux | grep -v grep | grep setter.py ") == 0 :
log.logger.debug("Process setter.py is running")
return "Process is running"
else:
log.logger.debug("Starting setter.py")
os.system("/usr/bin/python2.7 ./setter.py > /dev/null &")
return "Launched query process"
def start_master_process():
if os.system("ps aux | grep -v grep | grep get_master.py ") == 0 :
log.logger.debug("Process get_master.py is running")
return "Process is running"
else:
log.logger.debug("Starting get_master.py")
os.system("/usr/bin/python2.7 ./get_master.py > /dev/null &")
return "Launched master file process"
def ping_thread():
count = 0
while True:
time.sleep(1)
#qstatus = get_query_status()
#if qstatus == 'query process not running!':
qstatus = start_query_process()
#mstatus = get_master_status()
#if mstatus == 'master_file process not running!':
mstatus = start_master_process()
check_errors()
dstatus = check_downloads()
socketio.emit('querystatus', {'data': qstatus}, namespace='/mir')
socketio.emit('filefind', {'data': mstatus}, namespace='/mir')
socketio.emit('downloads', {'data': dstatus}, namespace='/mir')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/download')
def do_download():
return run_download()
@socketio.on('getfiles', namespace='/mir')
def send_list(message):
not_allowed = ['%'," AND ","CREATE","TABLE","DATABASE","INT","WHERE","DELETE"]
if not message['data']:
log.logger.warning("got no input")
elif any(x in message['data'].upper() for x in not_allowed):
log.logger.warning("input contains not allowed characters %s" % message['data'])
else:
log.logger.info("got input of %s" % message['data'])
mq.put('data:regno_list',q,message['data'])
#
#emit('my respone', {'data': 'pet'})
if __name__ == '__main__':
#setup redis connection
q = mq.redis_connect()
# run_download()
# exit()
#setup threaing
t = threading.Thread(target=ping_thread)
t.daemon = True
t.start()
# start socket webapp
socketio.run(app,host='0.0.0.0')
|
test.py | #! /usr/bin/env python3
#
# Copyright 2019-2020 Garmin Ltd. or its subsidiaries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import grp
import os
import pty
import pwd
import re
import resource
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
PYREX_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(PYREX_ROOT)
import pyrex # NOQA
TEST_PREBUILT_TAG_ENV_VAR = "TEST_PREBUILT_TAG"
def skipIfPrebuilt(func):
def wrapper(self, *args, **kwargs):
if os.environ.get(TEST_PREBUILT_TAG_ENV_VAR, ""):
self.skipTest("Test does not apply to prebuilt images")
return func(self, *args, **kwargs)
return wrapper
built_images = set()
class PyrexTest(object):
@property
def pokyroot(self):
return os.path.join(PYREX_ROOT, "poky", self.pokyver)
def setUp(self):
self.build_dir = os.path.join(PYREX_ROOT, "build", "%d" % os.getpid())
def cleanup_build():
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
self.addCleanup(cleanup_build)
cleanup_build()
os.makedirs(self.build_dir)
self.pyrex_conf = os.path.join(self.build_dir, "pyrex.ini")
conf = self.get_config()
conf.write_conf()
if not os.environ.get(TEST_PREBUILT_TAG_ENV_VAR, ""):
self.prebuild_image()
def cleanup_env():
os.environ.clear()
os.environ.update(self.old_environ)
# OE requires that "python" be python2, not python3
self.bin_dir = os.path.join(self.build_dir, "bin")
self.old_environ = os.environ.copy()
os.makedirs(self.bin_dir)
os.symlink("/usr/bin/python2", os.path.join(self.bin_dir, "python"))
os.environ["PATH"] = self.bin_dir + ":" + os.environ["PATH"]
os.environ["PYREX_BUILD_QUIET"] = "0"
os.environ["PYREX_OEINIT"] = os.path.join(self.pokyroot, "oe-init-build-env")
os.environ["PYREX_CONFIG_BIND"] = PYREX_ROOT
for var in ("SSH_AUTH_SOCK", "BB_ENV_EXTRAWHITE"):
if var in os.environ:
del os.environ[var]
self.thread_dir = os.path.join(
self.build_dir, "%d.%d" % (os.getpid(), threading.get_ident())
)
os.makedirs(self.thread_dir)
def prebuild_image(self):
global built_images
image = ":".join((self.test_image, self.provider))
if image not in built_images:
self.assertSubprocess(
[
os.path.join(PYREX_ROOT, "ci", "build_image.py"),
"--provider",
self.provider,
self.test_image,
]
)
built_images.add(image)
def get_config(self, *, defaults=False):
class Config(configparser.RawConfigParser):
def write_conf(self):
write_config_helper(self)
def write_config_helper(conf):
with open(self.pyrex_conf, "w") as f:
conf.write(f)
config = Config()
if os.path.exists(self.pyrex_conf) and not defaults:
config.read(self.pyrex_conf)
else:
config.read_string(pyrex.read_default_config(True))
# Setup the config suitable for testing
config["config"]["image"] = self.test_image
config["config"]["engine"] = self.provider
config["config"]["buildlocal"] = "0"
tag = os.environ.get(TEST_PREBUILT_TAG_ENV_VAR, "")
if tag:
config["config"]["pyrextag"] = tag
else:
config["config"]["pyrextag"] = "ci-test"
config["config"]["registry"] = ""
config["run"]["bind"] += " " + self.build_dir
config["imagebuild"]["buildcommand"] = "%s --provider=%s %s" % (
os.path.join(PYREX_ROOT, "ci", "build_image.py"),
self.provider,
self.test_image,
)
return config
def assertSubprocess(
self, *args, pretty_command=None, capture=False, returncode=0, **kwargs
):
if capture:
try:
output = subprocess.check_output(
*args, stderr=subprocess.STDOUT, **kwargs
)
except subprocess.CalledProcessError as e:
ret = e.returncode
output = e.output
else:
ret = 0
self.assertEqual(
ret,
returncode,
msg="%s: %s"
% (pretty_command or " ".join(*args), output.decode("utf-8")),
)
return output.decode("utf-8").rstrip()
else:
with subprocess.Popen(
*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs
) as proc:
while True:
out = proc.stdout.readline().decode("utf-8")
if not out and proc.poll() is not None:
break
if out:
sys.stdout.write(out)
ret = proc.poll()
self.assertEqual(
ret, returncode, msg="%s failed" % (pretty_command or " ".join(*args))
)
return None
def _write_host_command(
self,
args,
*,
quiet_init=False,
cwd=PYREX_ROOT,
builddir=None,
bitbakedir="",
init_env={}
):
if builddir is None:
builddir = self.build_dir
command = ['export %s="%s"\n' % (k, v) for k, v in init_env.items()]
command.extend(
[
"PYREXCONFFILE=%s\n" % self.pyrex_conf,
". %s/pyrex-init-build-env%s %s %s && "
% (
self.pokyroot,
" > /dev/null 2>&1" if quiet_init else "",
builddir,
bitbakedir,
),
"(",
" && ".join(list(args)),
")",
]
)
command = "".join(command)
cmd_file = os.path.join(self.thread_dir, "command")
with open(cmd_file, "w") as f:
f.write(command)
return cmd_file, command
def _write_container_command(self, args):
cmd_file = os.path.join(self.thread_dir, "container_command")
with open(cmd_file, "w") as f:
f.write(" && ".join(args))
return cmd_file
def assertPyrexHostCommand(
self,
*args,
quiet_init=False,
cwd=PYREX_ROOT,
builddir=None,
bitbakedir="",
init_env={},
**kwargs
):
cmd_file, command = self._write_host_command(
args,
quiet_init=quiet_init,
cwd=cwd,
builddir=builddir,
bitbakedir=bitbakedir,
init_env=init_env,
)
return self.assertSubprocess(
[os.environ.get("SHELL", "/bin/bash"), cmd_file],
pretty_command=command,
cwd=cwd,
**kwargs
)
def assertPyrexContainerShellCommand(self, *args, **kwargs):
cmd_file = self._write_container_command(args)
return self.assertPyrexHostCommand("pyrex-shell %s" % cmd_file, **kwargs)
def assertPyrexContainerCommand(self, cmd, **kwargs):
return self.assertPyrexHostCommand("pyrex-run %s" % cmd, **kwargs)
def assertPyrexContainerShellPTY(
self, *args, returncode=0, env=None, quiet_init=False, bitbakedir=""
):
container_cmd_file = self._write_container_command(args)
host_cmd_file, _ = self._write_host_command(
["pyrex-shell %s" % container_cmd_file],
quiet_init=quiet_init,
bitbakedir=bitbakedir,
)
stdout = []
def master_read(fd):
while True:
data = os.read(fd, 1024)
if not data:
return data
stdout.append(data)
old_env = None
try:
if env:
old_env = os.environ.copy()
os.environ.clear()
os.environ.update(env)
status = pty.spawn(["/bin/bash", host_cmd_file], master_read)
finally:
if old_env is not None:
os.environ.clear()
os.environ.update(old_env)
self.assertFalse(
os.WIFSIGNALED(status),
msg="%s died from a signal: %s" % (" ".join(args), os.WTERMSIG(status)),
)
self.assertTrue(
os.WIFEXITED(status), msg="%s exited abnormally" % " ".join(args)
)
self.assertEqual(
os.WEXITSTATUS(status), returncode, msg="%s failed" % " ".join(args)
)
return (b"".join(stdout)).decode("utf-8").rstrip()
class PyrexImageType_base(PyrexTest):
"""
Base image tests. All images that derive from a -base image should derive
from this class
"""
def test_init(self):
self.assertPyrexHostCommand("true")
def test_pyrex_shell(self):
self.assertPyrexContainerShellCommand("exit 3", returncode=3)
def test_pyrex_run(self):
self.assertPyrexContainerCommand("/bin/false", returncode=1)
def test_in_container(self):
def capture_pyrex_state(*args, **kwargs):
capture_file = os.path.join(self.thread_dir, "pyrex_capture")
if self.provider == "podman":
self.assertPyrexContainerShellCommand(
"cp --no-preserve=all /proc/1/cmdline %s" % capture_file,
*args,
**kwargs
)
with open(capture_file, "rb") as f:
return f.read()
else:
self.assertPyrexContainerShellCommand(
"cat /proc/self/cgroup > %s" % capture_file, *args, **kwargs
)
with open(capture_file, "r") as f:
return f.read()
def capture_local_state():
if self.provider == "podman":
with open("/proc/1/cmdline", "rb") as f:
return f.read()
else:
with open("/proc/self/cgroup", "r") as f:
return f.read()
local_state = capture_local_state()
pyrex_state = capture_pyrex_state()
self.assertNotEqual(local_state, pyrex_state)
def test_quiet_build(self):
env = os.environ.copy()
env["PYREX_BUILD_QUIET"] = "1"
self.assertPyrexHostCommand("true", env=env)
def test_bad_provider(self):
# Prevent container build from working
os.symlink("/bin/false", os.path.join(self.bin_dir, self.provider))
# Verify that attempting to run build pyrex without a valid container
# provider shows the installation instructions
output = self.assertPyrexHostCommand("true", returncode=1, capture=True)
self.assertIn("Unable to run", output)
def test_ownership(self):
# Test that files created in the container are the same UID/GID as the
# user running outside
test_file = os.path.join(self.thread_dir, "ownertest")
if os.path.exists(test_file):
os.unlink(test_file)
self.assertPyrexContainerShellCommand(
'echo "$(id -un):$(id -gn)" > %s' % test_file
)
s = os.stat(test_file)
self.assertEqual(s.st_uid, os.getuid())
self.assertEqual(s.st_gid, os.getgid())
with open(test_file, "r") as f:
(username, groupname) = f.read().rstrip().split(":")
self.assertEqual(username, pwd.getpwuid(os.getuid()).pw_name)
self.assertEqual(groupname, grp.getgrgid(os.getgid()).gr_name)
def test_owner_env(self):
# This test is primarily designed to ensure that everything is passed
# correctly through 'pyrex run'
if self.provider == "podman":
self.skipTest("Rootless podman cannot change to another user")
conf = self.get_config()
# Note: These config variables are intended for testing use only
conf["run"]["uid"] = "1337"
conf["run"]["username"] = "theuser"
conf["run"]["groups"] = "7331:thegroup 7332:othergroup"
conf["run"]["initcommand"] = ""
conf.write_conf()
# Make a fifo that the container can write into. We can't just write a
# file because it won't be owned by running user and thus can't be
# cleaned up
old_umask = os.umask(0)
self.addCleanup(os.umask, old_umask)
fifo = os.path.join(self.thread_dir, "fifo")
os.mkfifo(fifo)
self.addCleanup(os.remove, fifo)
os.umask(old_umask)
output = []
def read_fifo():
nonlocal output
with open(fifo, "r") as f:
output = f.readline().rstrip().split(":")
thread = threading.Thread(target=read_fifo)
thread.start()
try:
self.assertPyrexContainerShellCommand(
'echo "$(id -u):$(id -g):$(id -un):$(id -gn):$USER:$GROUP:$(id -G):$(id -Gn)" > %s'
% fifo
)
finally:
thread.join()
self.assertEqual(output[0], "1337")
self.assertEqual(output[1], "7331")
self.assertEqual(output[2], "theuser")
self.assertEqual(output[3], "thegroup")
self.assertEqual(output[4], "theuser")
self.assertEqual(output[5], "thegroup")
self.assertEqual(output[6], "7331 7332")
self.assertEqual(output[7], "thegroup othergroup")
def test_rlimit_nofile(self):
if self.provider != "podman":
self.skipTest("Only podman needs rlimit changes")
(soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard / 2, hard))
s = self.assertPyrexContainerShellCommand(
"ulimit -n && ulimit -Hn", capture=True, quiet_init=True
)
self.assertEqual(tuple(int(lim) for lim in s.split()), (hard, hard))
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
def test_bind_from_PYREX_BIND(self):
temp_dir = tempfile.mkdtemp("-pyrex")
self.addCleanup(shutil.rmtree, temp_dir)
temp_file = os.path.join(temp_dir, "data")
env = os.environ.copy()
env["PYREX_BIND"] = temp_dir
self.assertPyrexContainerShellCommand("echo 123 > %s" % temp_file, env=env)
with open(temp_file, "r") as f:
self.assertEqual(f.read(), "123\n")
def test_duplicate_binds(self):
temp_dir = tempfile.mkdtemp("-pyrex")
self.addCleanup(shutil.rmtree, temp_dir)
conf = self.get_config()
conf["run"]["bind"] += " %s %s" % (temp_dir, temp_dir)
conf.write_conf()
self.assertPyrexContainerShellCommand("true")
def test_missing_bind(self):
temp_dir = tempfile.mkdtemp("-pyrex")
self.addCleanup(shutil.rmtree, temp_dir)
missing_bind = os.path.join(temp_dir, "does-not-exist")
conf = self.get_config()
conf["run"]["bind"] += " %s" % missing_bind
conf.write_conf()
s = self.assertPyrexContainerShellCommand(
"test -e %s" % missing_bind, capture=True, returncode=1
)
self.assertRegex(s, r"Error: bind source path \S+ does not exist")
def test_optional_bind(self):
temp_dir = tempfile.mkdtemp("-pyrex")
self.addCleanup(shutil.rmtree, temp_dir)
missing_bind = os.path.join(temp_dir, "does-not-exist")
conf = self.get_config()
conf["run"]["bind"] += " %s,optional" % missing_bind
conf.write_conf()
self.assertPyrexContainerShellCommand("test ! -e %s" % missing_bind)
def test_readonly_bind(self):
temp_dir = tempfile.mkdtemp("-pyrex")
self.addCleanup(shutil.rmtree, temp_dir)
temp_file = "%s/test.txt" % temp_dir
with open(temp_file, "w") as f:
f.write("foo\n")
conf = self.get_config(defaults=True)
conf["run"]["bind"] += " %s" % temp_dir
conf.write_conf()
self.assertPyrexContainerShellCommand("echo bar1 > %s" % temp_file)
with open(temp_file, "r") as f:
self.assertEqual(f.read(), "bar1\n", "Temporary file was overwritten")
conf = self.get_config(defaults=True)
conf["run"]["bind"] += " %s,readonly" % temp_dir
conf.write_conf()
self.assertPyrexContainerShellCommand(
"echo bar2 > %s" % temp_file, returncode=1
)
with open(temp_file, "r") as f:
self.assertEqual(f.read(), "bar1\n", "Temporary file was overwritten")
def test_bad_bind_option(self):
temp_dir = tempfile.mkdtemp("-pyrex")
self.addCleanup(shutil.rmtree, temp_dir)
missing_bind = os.path.join(temp_dir, "does-not-exist")
conf = self.get_config()
conf["run"]["bind"] += " %s,bad-option" % missing_bind
conf.write_conf()
s = self.assertPyrexContainerShellCommand(
"test ! -e %s" % missing_bind, capture=True, returncode=1
)
self.assertIn("Error: bad option(s) 'bad-option' for bind", s)
def test_bad_confversion(self):
# Verify that a bad config is an error
conf = self.get_config()
conf["config"]["confversion"] = "0"
conf.write_conf()
self.assertPyrexHostCommand("true", returncode=1)
def test_conftemplate_ignored(self):
# Write out a template with a bad version in an alternate location. It
# should be ignored
temp_dir = tempfile.mkdtemp("-pyrex")
self.addCleanup(shutil.rmtree, temp_dir)
conftemplate = os.path.join(temp_dir, "pyrex.ini.sample")
conf = self.get_config(defaults=True)
conf["config"]["confversion"] = "0"
with open(conftemplate, "w") as f:
conf.write(f)
self.assertPyrexHostCommand("true")
@skipIfPrebuilt
def test_local_build(self):
conf = self.get_config()
conf["config"]["buildlocal"] = "1"
conf.write_conf()
self.assertPyrexHostCommand("true")
def test_version(self):
self.assertRegex(
pyrex.VERSION,
pyrex.VERSION_REGEX,
msg="Version '%s' is invalid" % pyrex.VERSION,
)
def test_version_tag(self):
tag = None
if os.environ.get("RELEASE_TAG"):
tag = os.environ["RELEASE_TAG"]
else:
try:
tags = (
subprocess.check_output(
["git", "-C", PYREX_ROOT, "tag", "-l", "--points-at", "HEAD"]
)
.decode("utf-8")
.splitlines()
)
if tags:
tag = tags[0]
except subprocess.CalledProcessError:
pass
if not tag:
self.skipTest("No tag found")
self.assertEqual("v%s" % pyrex.VERSION, tag)
self.assertRegex(tag, pyrex.VERSION_TAG_REGEX, msg="Tag '%s' is invalid" % tag)
@skipIfPrebuilt
def test_tag_overwrite(self):
# Test that trying to build the image with a release-like tag fails
# (and doesn't build the image)
conf = self.get_config()
conf["config"]["pyrextag"] = "v1.2.3-ci-test"
conf["config"]["buildlocal"] = "1"
conf.write_conf()
self.assertPyrexHostCommand("true", returncode=1)
output = self.assertSubprocess(
[self.provider, "images", "-q", conf["config"]["tag"]], capture=True
).strip()
self.assertEqual(output, "", msg="Tagged image found!")
def test_pty(self):
self.assertPyrexContainerShellPTY("true")
self.assertPyrexContainerShellPTY("false", returncode=1)
def test_invalid_term(self):
# Tests that an invalid terminal is correctly detected.
bad_term = "this-is-not-a-valid-term"
env = os.environ.copy()
env["TERM"] = bad_term
output = self.assertPyrexContainerShellPTY("true", env=env)
self.assertIn('$TERM has an unrecognized value of "%s"' % bad_term, output)
self.assertPyrexContainerShellPTY(
"/usr/bin/infocmp %s > /dev/null" % bad_term,
env=env,
returncode=1,
quiet_init=True,
)
def test_required_terms(self):
# Tests that a minimum set of terminals are supported
REQUIRED_TERMS = ("dumb", "vt100", "xterm", "xterm-256color")
env = os.environ.copy()
for t in REQUIRED_TERMS:
with self.subTest(term=t):
env["TERM"] = t
output = self.assertPyrexContainerShellPTY(
"echo $TERM", env=env, quiet_init=True
)
self.assertEqual(output, t, msg="Bad $TERM found in container!")
output = self.assertPyrexContainerShellPTY(
"/usr/bin/infocmp %s > /dev/null" % t, env=env
)
self.assertNotIn("$TERM has an unrecognized value", output)
output = self.assertPyrexContainerShellCommand(
"echo $TERM", env=env, quiet_init=True, capture=True
)
self.assertEqual(output, t, msg="Bad $TERM found in container!")
def test_tini(self):
self.assertPyrexContainerCommand("tini --version")
def test_guest_image(self):
# This test makes sure that the image being tested is the image we
# actually expect to be testing
# Split out the image name, version, and type
(image_name, image_version, _) = self.test_image.split("-")
# Capture the LSB release information.
dist_id_str = self.assertPyrexContainerCommand(
"lsb_release -i", quiet_init=True, capture=True
)
release_str = self.assertPyrexContainerCommand(
"lsb_release -r", quiet_init=True, capture=True
)
self.assertRegex(
dist_id_str.lower(), r"^distributor id:\s+" + re.escape(image_name)
)
self.assertRegex(
release_str.lower(), r"^release:\s+" + re.escape(image_version) + r"(\.|$)"
)
def test_default_ini_image(self):
# Tests that the default image specified in pyrex.ini is valid
config = pyrex.Config()
config.read_string(pyrex.read_default_config(True))
self.assertIn(config["config"]["image"], (image for (image, _) in TEST_IMAGES))
def test_envvars(self):
conf = self.get_config()
conf["run"]["envvars"] += " TEST_ENV"
conf.write_conf()
test_string = "set_by_test.%d" % threading.get_ident()
env = os.environ.copy()
env["TEST_ENV"] = test_string
s = self.assertPyrexContainerShellCommand(
"echo $TEST_ENV", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, test_string)
s = self.assertPyrexContainerShellCommand(
"echo $TEST_ENV2", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, "")
def test_custom_startup_script(self):
conf = self.get_config()
conf["run"]["envvars"] += " PYREX_TEST_STARTUP_SCRIPT"
conf.write_conf()
env = os.environ.copy()
env["PYREX_TEST_STARTUP_SCRIPT"] = "3"
self.assertPyrexContainerShellCommand(
"echo $PYREX_TEST_STARTUP_SCRIPT", env=env, quiet_init=True, returncode=3
)
env["PYREX_TEST_STARTUP_SCRIPT"] = "0"
s = self.assertPyrexContainerShellCommand(
"echo $PYREX_TEST_STARTUP_SCRIPT", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, "Startup script test\n0")
def test_users(self):
users = set(
self.assertPyrexContainerShellCommand(
"getent passwd | cut -f1 -d:", quiet_init=True, capture=True
).split()
)
self.assertEqual(users, {"root", pwd.getpwuid(os.getuid()).pw_name})
def test_groups(self):
groups = set(
self.assertPyrexContainerShellCommand(
"getent group | cut -f1 -d:", quiet_init=True, capture=True
).split()
)
my_groups = {"root", grp.getgrgid(os.getgid()).gr_name}
for gid in os.getgroups():
my_groups.add(grp.getgrgid(gid).gr_name)
self.assertEqual(groups, my_groups)
def test_bb_env_extrawhite(self):
env = os.environ.copy()
env["BB_ENV_EXTRAWHITE"] = "TEST_BB_EXTRA"
env["TEST_BB_EXTRA"] = "Hello"
s = set(
self.assertPyrexContainerShellCommand(
"echo $BB_ENV_EXTRAWHITE", env=env, quiet_init=True, capture=True
).split()
)
self.assertIn(env["BB_ENV_EXTRAWHITE"], s)
s = self.assertPyrexContainerShellCommand(
"echo $TEST_BB_EXTRA", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, env["TEST_BB_EXTRA"])
def test_ssh_auth_sock(self):
with tempfile.NamedTemporaryFile() as auth_file:
env = os.environ.copy()
env["SSH_AUTH_SOCK"] = auth_file.name
auth_file_stat = os.stat(auth_file.name)
s = self.assertPyrexContainerShellCommand(
"stat --format='%d %i' $SSH_AUTH_SOCK",
env=env,
quiet_init=True,
capture=True,
)
self.assertEqual(
s, "%d %d" % (auth_file_stat.st_dev, auth_file_stat.st_ino)
)
auth_sock_path = os.path.join(self.build_dir, "does-not-exist")
env = os.environ.copy()
env["SSH_AUTH_SOCK"] = auth_sock_path
s = self.assertPyrexContainerShellCommand("true", env=env, capture=True)
self.assertRegex(s, r"Warning: SSH_AUTH_SOCK \S+ does not exist")
s = self.assertPyrexContainerShellCommand(
"echo $SSH_AUTH_SOCK", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, "")
@skipIfPrebuilt
def test_rebuild(self):
self.assertPyrexHostCommand("pyrex-rebuild")
def test_pyrex_config(self):
conf = self.get_config()
conf.add_section("ci")
conf["ci"]["foo"] = "ABC"
conf["ci"]["bar"] = "${ci:foo}DEF"
conf["ci"]["baz"] = "${bar}GHI"
conf.write_conf()
s = self.assertPyrexHostCommand(
"pyrex-config get ci:foo", quiet_init=True, capture=True
)
self.assertEqual(s, "ABC")
s = self.assertPyrexHostCommand(
"pyrex-config get ci:bar", quiet_init=True, capture=True
)
self.assertEqual(s, "ABCDEF")
s = self.assertPyrexHostCommand(
"pyrex-config get ci:baz", quiet_init=True, capture=True
)
self.assertEqual(s, "ABCDEFGHI")
def test_pyrex_mkconfig(self):
out_file = os.path.join(self.build_dir, "temp-pyrex.ini")
cmd = [os.path.join(PYREX_ROOT, "pyrex.py"), "mkconfig"]
output = self.assertSubprocess(
cmd + [out_file], capture=True, cwd=self.build_dir
)
self.assertEqual(output, out_file)
output = self.assertSubprocess(cmd, capture=True)
self.assertEqual(output, pyrex.read_default_config(False).rstrip())
with open(out_file, "r") as f:
self.assertEqual(f.read().rstrip(), output)
self.assertSubprocess(cmd + [out_file], cwd=self.build_dir, returncode=1)
def test_user_commands(self):
conf = self.get_config()
conf["config"]["commands"] = "/bin/true !/bin/false"
conf.write_conf()
self.assertPyrexHostCommand("/bin/true")
self.assertPyrexHostCommand("/bin/false", returncode=1)
prefix = []
if "zsh" in os.environ.get("SHELL", ""):
prefix = ["disable true false"]
true_path = self.assertPyrexHostCommand(
*(prefix + ["which true"]), capture=True, quiet_init=True
)
true_link_path = os.readlink(true_path)
self.assertEqual(os.path.basename(true_link_path), "exec-shim-pyrex")
false_path = self.assertPyrexHostCommand(
*(prefix + ["which false"]), capture=True, quiet_init=True
)
false_link_path = os.readlink(false_path)
self.assertEqual(os.path.basename(false_link_path), "false")
def test_lets_encrypt_root_ca(self):
# Tests that root Let's Encrypt certficiate still works. The older X3
# certificate expired in September 2021 and a bug in older versions of
# OpenSSL prevents clients from seeing the new one
self.assertPyrexContainerShellCommand("curl https://letsencrypt.org/")
class PyrexImageType_oe(PyrexImageType_base):
"""
Tests images designed for building OpenEmbedded
"""
def test_bitbake_parse(self):
self.assertPyrexHostCommand("bitbake -p")
def test_bitbake_parse_altpath_arg(self):
# The new bitbake directory is out of the normally bound tree (passed
# as an argument)
with tempfile.TemporaryDirectory() as tmpdir:
bitbakedir = os.path.join(tmpdir, "bitbake")
shutil.copytree(os.path.join(self.pokyroot, "bitbake"), bitbakedir)
# If the bitbake directory is not bound, capture should fail with
# an error
d = self.assertPyrexHostCommand(
"bitbake -p", bitbakedir=bitbakedir, returncode=1, capture=True
)
self.assertIn("ERROR: %s not bound in container" % bitbakedir, d)
# Binding the build directory in the conf file will allow bitbake
# to be found
conf = self.get_config()
conf["run"]["bind"] = bitbakedir
conf.write_conf()
d = self.assertPyrexContainerCommand(
"which bitbake", bitbakedir=bitbakedir, quiet_init=True, capture=True
)
self.assertEqual(d, os.path.join(bitbakedir, "bin", "bitbake"))
self.assertPyrexHostCommand("bitbake -p", bitbakedir=bitbakedir)
def test_bitbake_parse_altpath_env(self):
# The new bitbake directory is out of the normally bound tree (passed
# as an argument)
with tempfile.TemporaryDirectory() as tmpdir:
bitbakedir = os.path.join(tmpdir, "bitbake")
shutil.copytree(os.path.join(self.pokyroot, "bitbake"), bitbakedir)
env = {"BITBAKEDIR": bitbakedir}
# If the bitbake directory is not bound, capture should fail with
# an error
d = self.assertPyrexHostCommand(
"bitbake -p", returncode=1, capture=True, init_env=env
)
self.assertIn("ERROR: %s not bound in container" % bitbakedir, d)
# Binding the build directory in the conf file will allow bitbake
# to be found
conf = self.get_config()
conf["run"]["bind"] = bitbakedir
conf.write_conf()
d = self.assertPyrexContainerCommand(
"which bitbake", quiet_init=True, capture=True, init_env=env
)
self.assertEqual(d, os.path.join(bitbakedir, "bin", "bitbake"))
self.assertPyrexHostCommand("bitbake -p", init_env=env)
def test_builddir_alt_env(self):
with tempfile.TemporaryDirectory() as builddir:
# Binding the build directory in the conf file will allow building
# to continue
conf = self.get_config()
conf["run"]["bind"] = builddir
conf.write_conf()
env = {"BDIR": builddir}
self.assertPyrexHostCommand("true", builddir="", init_env=env)
def test_unbound_builddir(self):
with tempfile.TemporaryDirectory() as builddir:
# If the build directory is not bound, capture should fail with an
# error
d = self.assertPyrexHostCommand(
"true", builddir=builddir, returncode=1, capture=True
)
self.assertIn("ERROR: %s not bound in container" % builddir, d)
# Binding the build directory in the conf file will allow building
# to continue
conf = self.get_config()
conf["run"]["bind"] = builddir
conf.write_conf()
self.assertPyrexHostCommand("true", builddir=builddir)
def test_icecc(self):
self.assertPyrexContainerCommand("icecc --version")
def test_templateconf_abs(self):
template_dir = os.path.join(self.thread_dir, "template")
os.makedirs(template_dir)
self.assertTrue(os.path.isabs(template_dir))
shutil.copyfile(
os.path.join(self.pokyroot, "meta-poky/conf/local.conf.sample"),
os.path.join(template_dir, "local.conf.sample"),
)
shutil.copyfile(
os.path.join(self.pokyroot, "meta-poky/conf/bblayers.conf.sample"),
os.path.join(template_dir, "bblayers.conf.sample"),
)
test_string = "set_by_test.%d" % threading.get_ident()
conf = self.get_config()
conf["run"]["envvars"] += " TEST_ENV"
conf.write_conf()
env = os.environ.copy()
env["TEMPLATECONF"] = template_dir
env["TEST_ENV"] = test_string
s = self.assertPyrexContainerShellCommand(
"echo $TEST_ENV", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, test_string)
def test_templateconf_rel(self):
template_dir = os.path.join(self.thread_dir, "template")
os.makedirs(template_dir)
self.assertTrue(os.path.isabs(template_dir))
shutil.copyfile(
os.path.join(self.pokyroot, "meta-poky/conf/local.conf.sample"),
os.path.join(template_dir, "local.conf.sample"),
)
shutil.copyfile(
os.path.join(self.pokyroot, "meta-poky/conf/bblayers.conf.sample"),
os.path.join(template_dir, "bblayers.conf.sample"),
)
test_string = "set_by_test.%d" % threading.get_ident()
conf = self.get_config()
conf["run"]["envvars"] += " TEST_ENV"
conf.write_conf()
env = os.environ.copy()
env["TEMPLATECONF"] = os.path.relpath(template_dir, self.pokyroot)
env["TEST_ENV"] = test_string
s = self.assertPyrexContainerShellCommand(
"echo $TEST_ENV", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, test_string)
def test_top_dir(self):
# Verify that the TOPDIR reported by bitbake in pyrex is the same as
# the one reported by bitbake outside of pyrex
cwd = os.path.join(self.build_dir, "oe-build")
try:
os.makedirs(cwd)
except OSError:
pass
builddir = os.path.join(cwd, "build")
oe_topdir = self.assertSubprocess(
[
"/bin/bash",
"-c",
". %s/oe-init-build-env > /dev/null && (bitbake -e | grep ^TOPDIR=)"
% os.path.relpath(self.pokyroot, cwd),
],
capture=True,
cwd=cwd,
)
shutil.rmtree(builddir)
pyrex_topdir = self.assertPyrexHostCommand(
"bitbake -e | grep ^TOPDIR=",
quiet_init=True,
capture=True,
cwd=cwd,
builddir="build",
)
shutil.rmtree(builddir)
self.assertEqual(oe_topdir, pyrex_topdir)
def test_env_capture(self):
extra_white = set(
self.assertPyrexHostCommand(
"echo $BB_ENV_EXTRAWHITE", quiet_init=True, capture=True
).split()
)
# The exact values aren't relevant, only that they are correctly
# imported from the capture
self.assertIn("MACHINE", extra_white)
self.assertIn("DISTRO", extra_white)
builddir = self.assertPyrexHostCommand(
"echo $BUILDDIR", quiet_init=True, capture=True
)
self.assertEqual(builddir, self.build_dir)
def test_bb_env_extrawhite_parse(self):
env = os.environ.copy()
env["BB_ENV_EXTRAWHITE"] = "TEST_BB_EXTRA"
env["TEST_BB_EXTRA"] = "foo"
s = self.assertPyrexHostCommand(
"bitbake -e | grep ^TEST_BB_EXTRA=", env=env, quiet_init=True, capture=True
)
self.assertEqual(s, 'TEST_BB_EXTRA="foo"')
class PyrexImageType_oetest(PyrexImageType_oe):
"""
Tests images designed for building OpenEmbedded Test image
"""
def test_wine(self):
self.assertPyrexContainerCommand("wine --version")
PROVIDERS = ("docker", "podman")
TEST_IMAGES = (
("ubuntu-14.04-base", "2.6"),
("ubuntu-16.04-base", "2.6"),
("ubuntu-18.04-base", "2.6"),
("ubuntu-20.04-base", "3.1"),
("ubuntu-14.04-oe", "2.6"),
("ubuntu-16.04-oe", "2.6"),
("ubuntu-18.04-oe", "2.6"),
("ubuntu-20.04-oe", "3.1"),
("ubuntu-18.04-oetest", "2.6"),
("ubuntu-20.04-oetest", "3.1"),
)
def add_image_tests():
self = sys.modules[__name__]
for provider in PROVIDERS:
for (image, pokyver) in TEST_IMAGES:
(_, _, image_type) = image.split("-")
parent = getattr(self, "PyrexImageType_" + image_type)
name = "PyrexImage_%s_%s" % (provider, re.sub(r"\W", "_", image))
setattr(
self,
name,
type(
name,
(parent, unittest.TestCase),
{"test_image": image, "provider": provider, "pokyver": pokyver},
),
)
add_image_tests()
if __name__ == "__main__":
unittest.main()
|
helper.py | import asyncio
import functools
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from contextlib import contextmanager
from datetime import datetime
from itertools import islice
from pathlib import Path
from types import SimpleNamespace
from typing import (
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
)
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
]
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from .excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for req in batch_iterator(data, batch_size, split_over_axis):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
while True:
chunk = tuple(islice(data, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
_port = None
if 'JINA_RANDOM_PORTS' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'Couldn\'t find an available port in [{min_port}, {max_port}].'
)
else:
_port = _get_port()
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return str(random_uuid(use_uuid1))
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if os.name == 'nt':
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from .executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]], parser: ArgumentParser
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
try:
p_args, unknown_args = parser.parse_known_args(args)
except SystemExit:
raise ValueError(
f'bad arguments "{args}" with parser {parser}, '
'you may want to double check your args '
)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from .jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
from . import __version__, __proto_version__, __jina_env__, __resources_path__
from google.protobuf.internal import api_implementation
import os, grpc, zmq, numpy, google.protobuf, yaml
from grpc import _grpcio_metadata
import platform
from jina.logging.predefined import default_logger
try:
info = {
'jina': __version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', '(unset)'),
'libzmq': zmq.zmq_version(),
'pyzmq': numpy.__version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'jina-resources': __resources_path__,
}
env_info = {k: os.getenv(k, '(unset)') for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _use_uvloop():
from .importer import ImportExtensions
with ImportExtensions(
required=False,
help_text='Jina uses uvloop to manage events and sockets, '
'it often yields better performance than builtin asyncio',
):
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
if 'JINA_DISABLE_UVLOOP' not in os.environ:
_use_uvloop()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip():
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:return: Public IP address.
"""
import urllib.request
timeout = 0.2
results = []
def _get_ip(url):
try:
with urllib.request.urlopen(url, timeout=timeout) as fp:
results.append(fp.read().decode('utf8'))
except:
pass
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://ipinfo.io/ip',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from .excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use Clien/Flow(asyncio=True). If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from . import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import Struct
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if False:
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
|
_channel.py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import sys
import threading
import time
import logging
import grpc
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_UNARY_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_UNARY_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
def _deadline(timeout):
if timeout is None:
return None, _INFINITE_FUTURE
else:
deadline = time.time() + timeout
return deadline, cygrpc.Timespec(deadline)
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = (
'Internal gRPC call error %d. ' +
'Please report to https://github.com/grpc/grpc/issues')
def _check_call_error(call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
raise ValueError('metadata was invalid: %s' % metadata)
elif call_error != cygrpc.CallError.ok:
raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
def _call_error_set_RPCstate(state, call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
_abort(state, grpc.StatusCode.INTERNAL,
'metadata was invalid: %s' % metadata)
else:
_abort(state, grpc.StatusCode.INTERNAL,
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, call, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return call if done else None
return handle_event
def _consume_request_iterator(request_iterator, state, call,
request_serializer):
event_handler = _event_handler(state, call, None)
def consume_request_iterator():
while True:
try:
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
logging.exception("Exception iterating requests!")
call.cancel()
_abort(state, grpc.StatusCode.UNKNOWN,
"Exception iterating requests!")
return
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
call.cancel()
details = 'Exception serializing request!'
_abort(state, grpc.StatusCode.INTERNAL, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
call.start_client_batch(operations, event_handler)
state.due.add(cygrpc.OperationType.send_message)
while True:
state.condition.wait()
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
call.start_client_batch(operations, event_handler)
state.due.add(cygrpc.OperationType.send_close_from_client)
def stop_consumption_thread(timeout): # pylint: disable=unused-argument
with state.condition:
if state.code is None:
call.cancel()
state.cancelled = True
_abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!')
state.condition.notify_all()
consumption_thread = _common.CleanupThread(
stop_consumption_thread, target=consume_request_iterator)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
_abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!')
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state, self._call,
self._response_deserializer)
self._call.start_client_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return self._state.trailing_metadata
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
else:
return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
self._state.code, _common.decode(self._state.details))
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
self._state.code = grpc.StatusCode.CANCELLED
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline, deadline_timespec = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, deadline_timespec, None, rendezvous
else:
return deadline, deadline_timespec, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _prepare(self, request, timeout, metadata):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
return None, None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
return state, operations, deadline, deadline_timespec, None
def _blocking(self, request, timeout, metadata, credentials):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
raise rendezvous
else:
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
call_error = call.start_client_batch(operations, None)
_check_call_error(call_error, metadata)
_handle_event(completion_queue.poll(), state,
self._response_deserializer)
return state, call, deadline
def __call__(self, request, timeout=None, metadata=None, credentials=None):
state, call, deadline = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, False, deadline)
def with_call(self, request, timeout=None, metadata=None, credentials=None):
state, call, deadline = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, True, deadline)
def future(self, request, timeout=None, metadata=None, credentials=None):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
return rendezvous
else:
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self, request, timeout=None, metadata=None, credentials=None):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
raise rendezvous
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.SendInitialMetadataOperation(
metadata, _EMPTY_FLAGS), cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _blocking(self, request_iterator, timeout, metadata, credentials):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None, deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
with state.condition:
call.start_client_batch(
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),), None)
operations = (
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, None)
_check_call_error(call_error, metadata)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
while True:
event = completion_queue.poll()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call, deadline
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, deadline = self._blocking(request_iterator, timeout,
metadata, credentials)
return _end_unary_response_blocking(state, call, False, deadline)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, deadline = self._blocking(request_iterator, timeout,
metadata, credentials)
return _end_unary_response_blocking(state, call, True, deadline)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.completion_queue = cygrpc.CompletionQueue()
self.managed_calls = None
def _run_channel_spin_thread(state):
def channel_spin():
while True:
event = state.completion_queue.poll()
completed_call = event.tag(event)
if completed_call is not None:
with state.lock:
state.managed_calls.remove(completed_call)
if not state.managed_calls:
state.managed_calls = None
return
def stop_channel_spin(timeout): # pylint: disable=unused-argument
with state.lock:
if state.managed_calls is not None:
for call in state.managed_calls:
call.cancel()
channel_spin_thread = _common.CleanupThread(
stop_channel_spin, target=channel_spin)
channel_spin_thread.start()
def _channel_managed_call_management(state):
def create(parent, flags, method, host, deadline):
"""Creates a managed cygrpc.Call and a function to call to drive it.
If operations are successfully added to the returned cygrpc.Call, the
returned function must be called. If operations are not successfully added
to the returned cygrpc.Call, the returned function must not be called.
Args:
parent: A cygrpc.Call to be used as the parent of the created call.
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A cygrpc.Timespec to be the deadline of the created call.
Returns:
A cygrpc.Call with which to conduct an RPC and a function to call if
operations are successfully started on the call.
"""
call = state.channel.create_call(parent, flags, state.completion_queue,
method, host, deadline)
def drive():
with state.lock:
if state.managed_calls is None:
state.managed_calls = set((call,))
_run_channel_spin_thread(state)
else:
state.managed_calls.add(call)
return call, drive
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = threading.Thread(
target=_deliver, args=(state, state.connectivity, callbacks,))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
completion_queue = cygrpc.CompletionQueue()
while True:
channel.watch_connectivity_state(connectivity,
cygrpc.Timespec(time.time() + 0.2),
completion_queue, None)
event = completion_queue.poll()
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
# NOTE(nathaniel): The field is only ever used as a
# sequence so it's fine that both lists and tuples are
# assigned to it.
callbacks = _deliveries(state) # pylint: disable=redefined-variable-type
if callbacks:
_spawn_delivery(state, callbacks)
def _moot(state):
with state.lock:
del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = _common.CleanupThread(
lambda timeout: _moot(state),
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity
) in enumerate(state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)
]
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target),
_common.channel_args(_options(options)), credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
# TODO(https://github.com/grpc/grpc/issues/9884)
# Temporary work around UNAVAILABLE issues
# Remove this once c-core has retry support
_subscribe(self._connectivity_state, lambda *args: None, None)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def __del__(self):
_moot(self._connectivity_state)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.